public BDB_GIDLookupImpl(String aim) {

    EnvironmentConfig environmentConfig = new EnvironmentConfig();

    // will be created if not existing -> will crash if folder not found
    environmentConfig.setAllowCreate(true);

    // no transaction yet -> might be needed later
    // not sure if needed to be set in environment and database
    environmentConfig.setTransactional(false);

    File file = new File(aim);

    if (!file.exists()) {
      file.mkdirs();
    }

    environment = new Environment(file, environmentConfig);
    DatabaseConfig databaseConfig = new DatabaseConfig();

    // will be created if not existing -> will crash if folder not found
    databaseConfig.setAllowCreate(true);

    // no transaction yet -> might be needed later
    // not sure if needed to be set in environment and database
    databaseConfig.setTransactional(false);

    // create 2 "tables" one for relations-gid one for node-gid
    RelationBDB = environment.openDatabase(null, "Relation", databaseConfig);
    NodeBDB = environment.openDatabase(null, "Node", databaseConfig);
  }
Example #2
0
  public static void main(String[] args) {

    /*
     * Don't write to System.out in this process because the parent
     * process only reads System.err.
     */
    try {
      EnvironmentConfig envConfig = TestUtils.initEnvConfig();
      envConfig.setTransactional(true);
      envConfig.setReadOnly(true);

      File envHome = new File(System.getProperty(TestUtils.DEST_DIR));
      Environment env = new Environment(envHome, envConfig);

      // System.err.println("Opened read-only: " + envHome);
      // System.err.println(System.getProperty("java.class.path"));

      /* Notify the test that this process has opened the environment. */
      ReadOnlyLockingTest.createProcessFile();

      /* Sleep until the parent process kills me. */
      Thread.sleep(Long.MAX_VALUE);
    } catch (Exception e) {

      e.printStackTrace(System.err);
      System.exit(1);
    }
  }
Example #3
0
  public AbstractFrontier(String homeDirectory) throws DatabaseException, FileNotFoundException {
    // 打开env
    File file = new File(homeDirectory);
    if (!file.exists()) {
      file.mkdirs();
    }
    System.out.println("Opening environment in: " + homeDirectory);
    EnvironmentConfig envConfig = new EnvironmentConfig();
    envConfig.setTransactional(true);
    envConfig.setAllowCreate(true);

    env = new Environment(new File(homeDirectory), envConfig);
    // 设置DatabaseConfig
    DatabaseConfig dbConfig = new DatabaseConfig();
    dbConfig.setTransactional(true);
    dbConfig.setAllowCreate(true);
    dbConfig.setSortedDuplicates(false);
    // 打开
    catalogdatabase = env.openDatabase(null, CLASS_CATALOG, dbConfig);
    javaCatalog = new StoredClassCatalog(catalogdatabase);
    // 设置DatabaseConfig
    DatabaseConfig dbConfig0 = new DatabaseConfig();
    dbConfig0.setTransactional(true);
    dbConfig0.setAllowCreate(true);
    // 打开
    database = env.openDatabase(null, "URL", dbConfig);
  }
 public ByteStoreBDB(File dir, String dbname, boolean ro) {
   this.dir = Files.initDirectory(dir);
   this.readonly = ro;
   settings = new SettingsJE();
   EnvironmentConfig bdb_eco = new EnvironmentConfig();
   bdb_eco.setReadOnly(ro);
   bdb_eco.setAllowCreate(!ro);
   bdb_eco.setTransactional(false);
   //          bdb_eco.setDurability(Durability.COMMIT_NO_SYNC);
   if (ro) {
     bdb_eco.setConfigParam(
         EnvironmentConfig.ENV_RUN_CLEANER, "false"); // Disable log cleaner thread
     bdb_eco.setCacheMode(CacheMode.EVICT_LN);
   }
   JEUtil.mergeSystemProperties(bdb_eco);
   SettingsJE.updateEnvironmentConfig(settings, bdb_eco);
   bdb_env = new Environment(dir, bdb_eco);
   bdb_cfg = new DatabaseConfig();
   bdb_cfg.setReadOnly(ro);
   bdb_cfg.setAllowCreate(true);
   bdb_cfg.setDeferredWrite(true);
   SettingsJE.updateDatabaseConfig(settings, bdb_cfg);
   bdb = bdb_env.openDatabase(null, dbname, bdb_cfg);
   if (log.isDebugEnabled()) {
     log.debug(SettingsJE.dumpDebug(bdb));
   }
 }
Example #5
0
  public boolean initEngine(File envHome) {
    EnvironmentConfig envConfig = new EnvironmentConfig();
    m_DbConfig = new DatabaseConfig();

    // If the environment is read-only, then
    // make the databases read-only too.
    envConfig.setReadOnly(false);
    m_DbConfig.setReadOnly(false);

    // If the environment is opened for write, then we want to be
    // able to create the environment and databases if
    // they do not exist.
    envConfig.setAllowCreate(true);
    m_DbConfig.setAllowCreate(true);

    // Allow transactions if we are writing to the database
    envConfig.setTransactional(false);
    m_DbConfig.setTransactional(false);

    m_DbConfig.setDeferredWrite(true);

    envConfig.setLocking(false); // No locking

    m_DbConfig.setBtreeComparator(BtreeKeyComparator.class);

    // Open the environment
    try {
      m_env = new Environment(envHome, envConfig);
      return true;
    } catch (DatabaseException e) {
      return false;
    }
  }
Example #6
0
  private void openEnvironment(File file) throws DatabaseInitException {
    EnvironmentConfig envConfig = new EnvironmentConfig();
    envConfig.setCachePercent(30);
    envConfig.setAllowCreate(true);
    envConfig.setTransactional(true);
    envConfig.setTxnTimeout(10, TimeUnit.SECONDS);
    envConfig.setLockTimeout(2000, TimeUnit.MILLISECONDS);
    try {
      environment = new Environment(file, envConfig);
      transactionConfig = new TransactionConfig();
      transactionConfig.setReadCommitted(true);

      cursorConfig = new CursorConfig();
      cursorConfig.setReadCommitted(true);
    } catch (EnvironmentLockedException e) {
      String message =
          "Environment locked exception. Another process is using the same database, or the current user has no write access (database location: \""
              + file.getAbsolutePath()
              + "\")";
      throw new DatabaseInitException(message);
    } catch (DatabaseException e) {
      String message = "A database initialisation error has occured (" + e.getMessage() + ")";
      throw new DatabaseInitException(message);
    }
  }
  private EnvironmentImpl createEnvImpl(File envDir) {
    EnvironmentConfig envConfig = new EnvironmentConfig();
    envConfig.setAllowCreate(true);
    envConfig.setTransactional(true);

    Environment backEnv = new Environment(envDir, envConfig);

    return DbInternal.getEnvironmentImpl(backEnv);
  }
  public BdbStorageConfiguration(VoldemortConfig config) {
    this.voldemortConfig = config;
    environmentConfig = new EnvironmentConfig();
    environmentConfig.setTransactional(true);
    environmentConfig.setCacheSize(config.getBdbCacheSize());
    if (config.isBdbWriteTransactionsEnabled() && config.isBdbFlushTransactionsEnabled()) {
      environmentConfig.setTxnNoSync(false);
      environmentConfig.setTxnWriteNoSync(false);
    } else if (config.isBdbWriteTransactionsEnabled() && !config.isBdbFlushTransactionsEnabled()) {
      environmentConfig.setTxnNoSync(false);
      environmentConfig.setTxnWriteNoSync(true);
    } else {
      environmentConfig.setTxnNoSync(true);
    }
    environmentConfig.setAllowCreate(true);
    environmentConfig.setConfigParam(
        EnvironmentConfig.LOG_FILE_MAX, Long.toString(config.getBdbMaxLogFileSize()));
    environmentConfig.setConfigParam(
        EnvironmentConfig.CHECKPOINTER_BYTES_INTERVAL,
        Long.toString(config.getBdbCheckpointBytes()));
    environmentConfig.setConfigParam(
        EnvironmentConfig.CHECKPOINTER_WAKEUP_INTERVAL,
        Long.toString(config.getBdbCheckpointMs() * Time.US_PER_MS));
    environmentConfig.setConfigParam(
        EnvironmentConfig.CLEANER_MIN_FILE_UTILIZATION,
        Integer.toString(config.getBdbCleanerMinFileUtilization()));
    environmentConfig.setConfigParam(
        EnvironmentConfig.CLEANER_MIN_UTILIZATION,
        Integer.toString(config.getBdbCleanerMinUtilization()));
    environmentConfig.setConfigParam(
        EnvironmentConfig.CLEANER_THREADS, Integer.toString(config.getBdbCleanerThreads()));
    environmentConfig.setConfigParam(
        EnvironmentConfig.CLEANER_LOOK_AHEAD_CACHE_SIZE,
        Integer.toString(config.getBdbCleanerLookAheadCacheSize()));
    environmentConfig.setConfigParam(
        EnvironmentConfig.LOCK_N_LOCK_TABLES, Integer.toString(config.getBdbLockNLockTables()));
    environmentConfig.setConfigParam(
        EnvironmentConfig.ENV_FAIR_LATCHES, Boolean.toString(config.getBdbFairLatches()));
    environmentConfig.setConfigParam(
        EnvironmentConfig.CHECKPOINTER_HIGH_PRIORITY,
        Boolean.toString(config.getBdbCheckpointerHighPriority()));
    environmentConfig.setConfigParam(
        EnvironmentConfig.CLEANER_MAX_BATCH_FILES,
        Integer.toString(config.getBdbCleanerMaxBatchFiles()));

    environmentConfig.setLockTimeout(config.getBdbLockTimeoutMs(), TimeUnit.MILLISECONDS);
    databaseConfig = new DatabaseConfig();
    databaseConfig.setAllowCreate(true);
    databaseConfig.setSortedDuplicates(config.isBdbSortedDuplicatesEnabled());
    databaseConfig.setNodeMaxEntries(config.getBdbBtreeFanout());
    databaseConfig.setTransactional(true);
    bdbMasterDir = config.getBdbDataDirectory();
    useOneEnvPerStore = config.isBdbOneEnvPerStore();
    if (useOneEnvPerStore) environmentConfig.setSharedCache(true);
  }
  /**
   * Returns a new read-only handle to the JE root container for this backend. The caller is
   * responsible for closing the root container after use.
   *
   * @return The read-only RootContainer object for this backend.
   * @throws ConfigException If an unrecoverable problem arises during initialization.
   * @throws InitializationException If a problem occurs during initialization that is not related
   *     to the server configuration.
   */
  public RootContainer getReadOnlyRootContainer() throws ConfigException, InitializationException {
    EnvironmentConfig envConfig = ConfigurableEnvironment.parseConfigEntry(cfg);

    envConfig.setReadOnly(true);
    envConfig.setAllowCreate(false);
    envConfig.setTransactional(false);
    envConfig.setConfigParam("je.env.isLocking", "true");
    envConfig.setConfigParam("je.env.runCheckpointer", "true");

    return initializeRootContainer(envConfig);
  }
  private EnvironmentConfig makeEnvConfig() {

    EnvironmentConfig envConfig = new EnvironmentConfig();
    envConfig.setAllowCreate(true);
    envConfig.setTransactional(true);
    DbInternal.disableParameterValidation(envConfig);
    envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), "10000");
    /* Control cleaning explicitly. */
    envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
    return envConfig;
  }
Example #11
0
  /**
   * Set an attribute value for the given environment.
   *
   * @param targetEnv The target JE environment. May be null if the environment is not open.
   * @param attribute name/value pair
   */
  public void setAttribute(Environment targetEnv, Attribute attribute)
      throws AttributeNotFoundException, InvalidAttributeValueException {

    if (attribute == null) {
      throw new AttributeNotFoundException("Attribute cannot be null");
    }

    /* Sanity check parameters. */
    String name = attribute.getName();
    Object value = attribute.getValue();

    if (name == null) {
      throw new AttributeNotFoundException("Attribute name cannot be null");
    }

    if (value == null) {
      throw new InvalidAttributeValueException(
          "Attribute value for attribute " + name + " cannot be null");
    }

    try {
      if (name.equals(ATT_SET_READ_ONLY)) {
        openConfig.setReadOnly(((Boolean) value).booleanValue());
      } else if (name.equals(ATT_SET_TRANSACTIONAL)) {
        openConfig.setTransactional(((Boolean) value).booleanValue());
      } else if (name.equals(ATT_SET_SERIALIZABLE)) {
        openConfig.setTxnSerializableIsolation(((Boolean) value).booleanValue());
      } else {
        /* Set the specified attribute if the environment is open. */
        if (targetEnv != null) {

          EnvironmentMutableConfig config = targetEnv.getMutableConfig();

          if (name.equals(ATT_CACHE_SIZE)) {
            config.setCacheSize(((Long) value).longValue());
            targetEnv.setMutableConfig(config);
          } else if (name.equals(ATT_CACHE_PERCENT)) {
            config.setCachePercent(((Integer) value).intValue());
            targetEnv.setMutableConfig(config);
          } else {
            throw new AttributeNotFoundException("attribute " + name + " is not valid.");
          }
        } else {
          throw new AttributeNotFoundException("attribute " + name + " is not valid.");
        }
      }
    } catch (NumberFormatException e) {
      throw new InvalidAttributeValueException("attribute name=" + name);
    } catch (DatabaseException e) {
      throw new InvalidAttributeValueException("attribute name=" + name + e.getMessage());
    }
  }
 /**
  * @param dbEnvPath path to db environment
  * @param isReadOnly If true, then all databases opened in this environment must be opened as
  *     read-only. If you are writing a multi-process application, then all but one of your
  *     processes must set this value to true.
  * @param allowCreateNew If true, the database environment is created when it is opened. If false,
  *     environment open fails if the environment does not exist. This property has no meaning if
  *     the database environment already exists.
  * @param cacheSizeInMB
  * @throws DatabaseException
  */
 public BerkeleyDbEnvironment(
     String dbEnvPath, boolean isReadOnly, boolean allowCreateNew, Long cacheSizeInMB)
     throws DatabaseException {
   EnvironmentConfig envConfig = new EnvironmentConfig();
   envConfig.setAllowCreate(allowCreateNew);
   envConfig.setReadOnly(isReadOnly);
   envConfig.setTransactional(false);
   if (cacheSizeInMB != null && cacheSizeInMB != -1) {
     long cacheSizeInByte = cacheSizeInMB * 1024 * 1024;
     envConfig.setCacheSize(cacheSizeInByte);
   }
   environment = new Environment(new File(dbEnvPath), envConfig);
 }
Example #13
0
  /**
   * Verify the integrity of the backend instance.
   *
   * @param verifyConfig The verify configuration.
   * @param statEntry Optional entry to save stats into.
   * @return The error count.
   * @throws ConfigException If an unrecoverable problem arises during initialization.
   * @throws InitializationException If a problem occurs during initialization that is not related
   *     to the server configuration.
   * @throws DirectoryException If a Directory Server error occurs.
   */
  public long verifyBackend(VerifyConfig verifyConfig, Entry statEntry)
      throws InitializationException, ConfigException, DirectoryException {
    // If the backend already has the root container open, we must use the same
    // underlying root container
    boolean openRootContainer = rootContainer == null;
    long errorCount = 0;

    try {
      if (openRootContainer) {
        EnvironmentConfig envConfig = ConfigurableEnvironment.parseConfigEntry(cfg);

        envConfig.setReadOnly(true);
        envConfig.setAllowCreate(false);
        envConfig.setTransactional(false);
        envConfig.setConfigParam("je.env.isLocking", "true");
        envConfig.setConfigParam("je.env.runCheckpointer", "true");

        rootContainer = initializeRootContainer(envConfig);
      }

      VerifyJob verifyJob = new VerifyJob(verifyConfig);
      errorCount = verifyJob.verifyBackend(rootContainer, statEntry);
    } catch (DatabaseException e) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, e);
      }
      throw createDirectoryException(e);
    } catch (JebException e) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, e);
      }
      throw new DirectoryException(
          DirectoryServer.getServerErrorResultCode(), e.getMessageObject());
    } finally {
      // If a root container was opened in this method as read only, close it
      // to leave the backend in the same state.
      if (openRootContainer && rootContainer != null) {
        try {
          rootContainer.close();
          rootContainer = null;
        } catch (DatabaseException e) {
          if (debugEnabled()) {
            TRACER.debugCaught(DebugLogLevel.ERROR, e);
          }
        }
      }
    }
    return errorCount;
  }
  public BerkeleyStorage(File file, boolean readonly, boolean async) {
    if (file == null) {
      int pid = 0;
      try {
        pid = Integer.parseInt((new File("/proc/self")).getCanonicalFile().getName());
      } catch (NumberFormatException | IOException e) {
      }
      String path = "/tmp";
      String db_path = System.getenv("DB");
      if (db_path != null) {
        path = db_path;
      }
      file = new File(path + "/ringpaxos-db/" + pid);
      file.mkdirs();
    }
    EnvironmentConfig envConfig = new EnvironmentConfig();
    DatabaseConfig dbConfig = new DatabaseConfig();
    envConfig.setReadOnly(readonly);
    dbConfig.setReadOnly(readonly);
    envConfig.setAllowCreate(!readonly);
    dbConfig.setAllowCreate(!readonly);

    // performance settings
    envConfig.setTransactional(true);
    envConfig.setCacheMode(CacheMode.DEFAULT);
    // envConfig.setCacheSize(1000000*800); // 800M
    if (async) {
      dbConfig.setTransactional(false);
      envConfig.setDurability(Durability.COMMIT_NO_SYNC);
      dbConfig.setDeferredWrite(true);
    } else {
      dbConfig.setTransactional(true);
      envConfig.setDurability(Durability.COMMIT_SYNC);
      dbConfig.setDeferredWrite(false);
    }

    env = new Environment(file, envConfig);
    db = env.openDatabase(null, "paxosDB", dbConfig);
    classCatalogDb = env.openDatabase(null, "ClassCatalogDB", dbConfig);
    classCatalog = new StoredClassCatalog(classCatalogDb);
    keyBinding = TupleBinding.getPrimitiveBinding(Long.class);
    dataBinding = new SerialBinding<Decision>(classCatalog, Decision.class);
    ballotBinding = TupleBinding.getPrimitiveBinding(Integer.class);

    logger.info("BerkeleyStorage cache size: " + env.getMutableConfig().getCacheSize());
    logger.info(
        "BerkeleyStorage durability: " + env.getMutableConfig().getDurability().getLocalSync());
    logger.info("BerkeleyStorage deferred write: " + db.getConfig().getDeferredWrite());
  }
  /**
   * Tests that when a file is opened with a lesser version than the current version, a new log file
   * is started for writing new log entries. This is important so that the new header version is
   * written even if no new log file is needed. If the new version were not written, an older
   * version of JE would not recognize that there had been a version change.
   */
  public void testLesserVersionNotUpdated() throws DatabaseException, IOException {

    TestUtils.loadLog(getClass(), Utils.MIN_VERSION_NAME, envHome);
    File logFile = new File(envHome, TestUtils.LOG_FILE_NAME);
    long origFileSize = logFile.length();

    EnvironmentConfig envConfig = TestUtils.initEnvConfig();
    envConfig.setAllowCreate(false);
    envConfig.setTransactional(true);

    Environment env = new Environment(envHome, envConfig);
    env.sync();
    env.close();

    assertEquals(origFileSize, logFile.length());
  }
  private void openGroup() throws IOException {

    EnvironmentConfig envConfig = new EnvironmentConfig();
    envConfig.setTransactional(true);
    envConfig.setAllowCreate(true);

    ReplicationConfig repConfig = new ReplicationConfig();
    repConfig.setConfigParam(RepParams.VLSN_LOG_CACHE_SIZE.getName(), "2");

    repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, nNodes, envConfig, repConfig);
    master = RepTestUtils.joinGroup(repEnvInfo);

    StoreConfig config = new StoreConfig();
    config.setAllowCreate(true);
    config.setTransactional(true);
    store = new EntityStore(master, "test", config);
    primaryIndex = store.getPrimaryIndex(Integer.class, AppData.class);
  }
  private static void openEnv() throws DatabaseException {
    System.out.println("opening env");

    // Set up the environment.
    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
    myEnvConfig.setAllowCreate(true);
    myEnvConfig.setTransactional(true);
    // Environment handles are free-threaded in JE,
    // so we do not have to do anything to cause the
    // environment handle to be free-threaded.

    // Set up the database
    DatabaseConfig myDbConfig = new DatabaseConfig();
    myDbConfig.setAllowCreate(true);
    myDbConfig.setTransactional(true);
    myDbConfig.setSortedDuplicates(true);
    // no DatabaseConfig.setThreaded() method available.
    // db handles in java are free-threaded so long as the
    // env is also free-threaded.

    // Open the environment
    myEnv =
        new Environment(
            new File(myEnvPath), // Env home
            myEnvConfig);

    // Open the database. Do not provide a txn handle. This open
    // is autocommitted because DatabaseConfig.setTransactional()
    // is true.
    myDb =
        myEnv.openDatabase(
            null, // txn handle
            dbName, // Database file name
            myDbConfig);

    // Used by the bind API for serializing objects
    // Class database must not support duplicates
    myDbConfig.setSortedDuplicates(false);
    myClassDb =
        myEnv.openDatabase(
            null, // txn handle
            cdbName, // Database file name
            myDbConfig);
  }
  /**
   * Tests that an exception is thrown when a log header is read with a newer version than the
   * current version. The maxversion.jdb log file is loaded as a resource by this test and written
   * as a regular log file. When the environment is opened, we expect a LogException.
   */
  public void testGreaterVersionNotAllowed() throws DatabaseException, IOException {

    TestUtils.loadLog(getClass(), Utils.MAX_VERSION_NAME, envHome);

    EnvironmentConfig envConfig = TestUtils.initEnvConfig();
    envConfig.setAllowCreate(false);
    envConfig.setTransactional(true);

    try {
      Environment env = new Environment(envHome, envConfig);
      try {
        env.close();
      } catch (Exception ignore) {
      }
    } catch (DatabaseException e) {
      if (e.getCause() instanceof LogException) {
        /* Got LogException as expected. */
        return;
      }
    }
    fail("Expected LogException");
  }
Example #19
0
  static {
    long stime = System.currentTimeMillis();

    File f = new File(FILE_PATH);
    if (!f.exists()) {
      f.mkdirs();
    }
    /** =====================env配置======================= */
    envConfig = new EnvironmentConfig();
    envConfig.setAllowCreate(true);
    envConfig.setTransactional(false);
    /** 缓存40M 日志文件最大为20M,默认是10M */
    envConfig.setCacheSize(1024 * 1024 * 40);
    envConfig.setConfigParam("je.log.fileMax", String.valueOf(1024 * 1024 * 20));

    /** 修改写缓存比例,总共6M */
    envConfig.setConfigParam("je.log.bufferSize", String.valueOf(1024 * 1024 * 2));
    envConfig.setConfigParam("je.log.totalBufferBytes", String.valueOf(1024 * 1024 * 6)); //
    env = new Environment(new File(FILE_PATH), envConfig);
    env.cleanLog();
    /** db配置 */
    dbConfig = new DatabaseConfig();
    dbConfig.setAllowCreate(true);
    // dbConfig.setDeferredWrite(true);

    database = env.openDatabase(null, SOURCE_DB, dbConfig);

    Runtime.getRuntime()
        .addShutdownHook(
            new Thread() {
              public void run() {
                close();
                logger.warn("close db and env");
              }
            });

    logger.warn("init bdb success " + (System.currentTimeMillis() - stime));
  }
  /*
   * See SR11455 for details.
   *
   * This test is checking that the maxTxnId gets recovered properly during
   * recovery.  The SR has to do with the INFileReader not including
   * DupCountLN_TX and DelDupLN_TX's in its txnIdTrackingMap.  When these
   * were not included, it was possible for a transaction to consist solely
   * of DupCountLN_TX/DelDupLN_TX pairs.  The "deleteData" transaction below
   * does just this.  If no checkpoint occurred following such a transaction,
   * then the correct current txnid would not be written to the log and
   * determining this value during recovery would be left up to the
   * INFileReader.  However, without reading the DupCountLN_TX/DelDupLN_TX
   * records, it would not recover the correct value.
   *
   * We take the poor man's way out of creating this situation by just
   * manually asserting the txn id is correct post-recovery.  The txnid of 12
   * was determined by looking through logs before and after the fix.
   */
  public void testSR11455() throws Throwable {

    createEnvAndDbs(1 << 20, true, 1);
    int numRecs = 1;
    int nDups = 3;

    try {
      /* Set up an repository of expected data. */
      Map<TestData, Set<TestData>> expectedData = new HashMap<TestData, Set<TestData>>();

      /* Insert all the data. */
      Transaction txn = env.beginTransaction(null, null);
      insertData(txn, 0, numRecs - 1, expectedData, nDups, true, 1);
      txn.commit();

      txn = env.beginTransaction(null, null);
      /* Delete all the even records. */
      deleteData(txn, expectedData, false, false, 1);
      txn.abort();
      closeEnv();

      /* Open it again, which will run recovery. */
      EnvironmentConfig recoveryConfig = TestUtils.initEnvConfig();
      recoveryConfig.setTransactional(true);
      recoveryConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
      recoveryConfig.setConfigParam(EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
      env = new Environment(envHome, recoveryConfig);

      txn = env.beginTransaction(null, null);
      assertEquals(6, txn.getId());
      txn.commit();
      env.close();

    } catch (Throwable t) {
      t.printStackTrace();
      throw t;
    }
  }
Example #21
0
  public static <EnvironmenConfig> void envconfig() {
    EnvironmentConfig envConfig = new EnvironmentConfig();
    envConfig.setTransactional(false);
    envConfig.setAllowCreate(true);
    File envDir = new File("d://");

    try {
      // 新建环境变量
      Environment exampleEnv = new Environment(envDir, envConfig);
      String databaseName = "ToDoTaskList.db";
      DatabaseConfig dbConfig = new DatabaseConfig();
      dbConfig.setAllowCreate(true);
      dbConfig.setTransactional(false);
      // 打开用来存储类信息的数据库
      // 用来存储类信息的数据库数据库,不要求能共存储重复的关键字
      dbConfig.setSortedDuplicates(false);

      Database myClassDb = exampleEnv.openDatabase(null, databaseName, dbConfig);
      // 初始化catalog类
      StoredClassCatalog catalog = new StoredClassCatalog(myClassDb);
      TupleBinding<String> keyBinding = TupleBinding.getPrimitiveBinding(String.class);
      // 把value作为对象的序列化方式存储
      SerialBinding<String> valueBinding = new SerialBinding<String>(catalog, String.class);
      Database store = exampleEnv.openDatabase(null, databaseName, dbConfig);
      // 建立数据存储映射
      StoredSortedMap<String, String> map =
          new StoredSortedMap<String, String>(store, keyBinding, valueBinding, true);
      // 释放环境变量
      // exampleEnv.syncReplication();
      store.close();
      myClassDb.close();
      exampleEnv.close();
      exampleEnv = null;
    } catch (Exception e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }
  }
Example #22
0
  public void testFlipFile() throws Throwable {

    /*
     * The setUp() method opens a standalone FileManager, but in this test
     * case we need a regular Environment.  On Windows, we can't lock the
     * file range twice in FileManager.lockEnvironment, so we must close
     * the standalone FileManager here before opening a regular
     * environment.
     */
    fileManager.clear();
    fileManager.close();
    fileManager = null;

    EnvironmentConfig envConfig = TestUtils.initEnvConfig();
    envConfig.setAllowCreate(true);
    envConfig.setTransactional(true);
    Environment env = new Environment(envHome, envConfig);
    EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
    FileManager fileManager = envImpl.getFileManager();

    DatabaseConfig dbConfig = new DatabaseConfig();
    dbConfig.setAllowCreate(true);
    Database exampleDb = env.openDatabase(null, "simpleDb", dbConfig);

    assertEquals("Should have 0 as current file", 0L, fileManager.getCurrentFileNum());
    long flipLsn = envImpl.forceLogFileFlip();
    assertEquals("LSN should be 1 post-flip", 1L, DbLsn.getFileNumber(flipLsn));
    DatabaseEntry key = new DatabaseEntry();
    DatabaseEntry data = new DatabaseEntry();
    key.setData("key".getBytes());
    data.setData("data".getBytes());
    exampleDb.put(null, key, data);
    assertEquals("Should have 1 as last file", 1L, fileManager.getCurrentFileNum());
    exampleDb.close();
    env.close();
  }
  public AbstractBerkeleyDB(String homeDirectory, String dbname) {
    // 打开env
    EnvironmentConfig envConfig = new EnvironmentConfig();
    envConfig.setTransactional(true);
    envConfig.setAllowCreate(true);
    File bdbHomeFile = new File(homeDirectory);
    if (!bdbHomeFile.exists()) bdbHomeFile.mkdirs();
    env = new Environment(bdbHomeFile, envConfig);

    if (database != null) return;
    // 设置DatabaseConfig
    DatabaseConfig dbConfig = new DatabaseConfig();
    dbConfig.setTransactional(true);
    dbConfig.setAllowCreate(true);
    // 打开
    catalogdatabase = env.openDatabase(null, CLASS_CATALOG, dbConfig);
    javaCatalog = new StoredClassCatalog(catalogdatabase);
    // 设置DatabaseConfig
    DatabaseConfig dbConfig0 = new DatabaseConfig();
    dbConfig0.setTransactional(true);
    dbConfig0.setAllowCreate(true);
    // 打开
    database = env.openDatabase(null, dbname, dbConfig);
  }
  /**
   * Create a JE environment configuration with default values.
   *
   * @return A JE environment config containing default values.
   */
  public static EnvironmentConfig defaultConfig() {
    EnvironmentConfig envConfig = new EnvironmentConfig();

    envConfig.setTransactional(true);
    envConfig.setAllowCreate(true);

    // "je.env.sharedLatches" is "true" by default since JE #12136 (3.3.62?)

    // This parameter was set to false while diagnosing a Berkeley DB JE bug.
    // Normally cleansed log files are deleted, but if this is set false
    // they are instead renamed from .jdb to .del.
    envConfig.setConfigParam(CLEANER_EXPUNGE, "true");

    // Under heavy write load the check point can fall behind causing
    // uncontrolled DB growth over time. This parameter makes the out of
    // the box configuration more robust at the cost of a slight
    // reduction in maximum write throughput. Experiments have shown
    // that response time predictability is not impacted negatively.
    envConfig.setConfigParam(CHECKPOINTER_HIGH_PRIORITY, "true");

    // If the JVM is reasonably large then we can safely default to
    // bigger read buffers. This will result in more scalable checkpointer
    // and cleaner performance.
    if (Runtime.getRuntime().maxMemory() > 256 * 1024 * 1024) {
      envConfig.setConfigParam(CLEANER_LOOK_AHEAD_CACHE_SIZE, String.valueOf(2 * 1024 * 1024));
      envConfig.setConfigParam(LOG_ITERATOR_READ_SIZE, String.valueOf(2 * 1024 * 1024));
      envConfig.setConfigParam(LOG_FAULT_READ_SIZE, String.valueOf(4 * 1024));
    }

    // Disable lock timeouts, meaning that no lock wait
    // timelimit is enforced and a deadlocked operation
    // will block indefinitely.
    envConfig.setLockTimeout(0, TimeUnit.MICROSECONDS);

    return envConfig;
  }
  /** Insert or retrieve data */
  public void run() throws DatabaseException {
    /* Create a new, transactional database environment */
    EnvironmentConfig envConfig = new EnvironmentConfig();
    envConfig.setTransactional(true);
    envConfig.setAllowCreate(true);
    Environment exampleEnv = new Environment(envDir, envConfig);

    /* Make a database within that environment */
    Transaction txn = exampleEnv.beginTransaction(null, null);
    DatabaseConfig dbConfig = new DatabaseConfig();
    dbConfig.setTransactional(true);
    dbConfig.setAllowCreate(true);
    dbConfig.setSortedDuplicates(true);
    Database exampleDb = exampleEnv.openDatabase(txn, "bindingsDb", dbConfig);

    /*
     * In our example, the database record is composed of an integer
     * key and and instance of the MyData class as data.
     *
     * A class catalog database is needed for storing class descriptions
     * for the serial binding used below.  This avoids storing class
     * descriptions redundantly in each record.
     */
    DatabaseConfig catalogConfig = new DatabaseConfig();
    catalogConfig.setTransactional(true);
    catalogConfig.setAllowCreate(true);
    Database catalogDb = exampleEnv.openDatabase(txn, "catalogDb", catalogConfig);
    StoredClassCatalog catalog = new StoredClassCatalog(catalogDb);

    /*
     * Create a serial binding for MyData data objects.  Serial bindings
     * can be used to store any Serializable object.
     */
    EntryBinding<MyData> dataBinding = new SerialBinding<MyData>(catalog, MyData.class);

    txn.commit();

    /*
     * Further below we'll use a tuple binding (IntegerBinding
     * specifically) for integer keys.  Tuples, unlike serialized Java
     * objects, have a well defined sort order.
     */

    /* DatabaseEntry represents the key and data of each record */
    DatabaseEntry keyEntry = new DatabaseEntry();
    DatabaseEntry dataEntry = new DatabaseEntry();

    if (doInsert) {

      /* put some data in */
      for (int i = offset; i < numRecords + offset; i++) {

        StringBuilder stars = new StringBuilder();
        for (int j = 0; j < i; j++) {
          stars.append('*');
        }
        MyData data = new MyData(i, stars.toString());

        IntegerBinding.intToEntry(i, keyEntry);
        dataBinding.objectToEntry(data, dataEntry);

        txn = exampleEnv.beginTransaction(null, null);
        OperationStatus status = exampleDb.put(txn, keyEntry, dataEntry);

        /*
         * Note that put will throw a DatabaseException when
         * error conditions are found such as deadlock.
         * However, the status return conveys a variety of
         * information. For example, the put might succeed,
         * or it might not succeed if the record exists
         * and duplicates were not
         */
        if (status != OperationStatus.SUCCESS) {
          throw new RuntimeException("Data insertion got status " + status);
        }
        txn.commit();
      }
    } else {

      /* retrieve the data */
      Cursor cursor = exampleDb.openCursor(null, null);

      while (cursor.getNext(keyEntry, dataEntry, LockMode.DEFAULT) == OperationStatus.SUCCESS) {

        int key = IntegerBinding.entryToInt(keyEntry);
        MyData data = dataBinding.entryToObject(dataEntry);

        System.out.println("key=" + key + " data=" + data);
      }
      cursor.close();
    }

    catalogDb.close();
    exampleDb.close();
    exampleEnv.close();
  }
Example #26
0
  /** {@inheritDoc} */
  @Override()
  public LDIFImportResult importLDIF(LDIFImportConfig importConfig) throws DirectoryException {
    RuntimeInformation.logInfo();

    // If the backend already has the root container open, we must use the same
    // underlying root container
    boolean openRootContainer = rootContainer == null;

    // If the rootContainer is open, the backend is initialized by something
    // else.
    // We can't do import while the backend is online.
    if (!openRootContainer) {
      Message message = ERR_JEB_IMPORT_BACKEND_ONLINE.get();
      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message);
    }

    try {
      EnvironmentConfig envConfig = new EnvironmentConfig();

      envConfig.setAllowCreate(true);
      envConfig.setTransactional(false);
      envConfig.setDurability(Durability.COMMIT_NO_SYNC);
      envConfig.setLockTimeout(0, TimeUnit.SECONDS);
      envConfig.setTxnTimeout(0, TimeUnit.SECONDS);
      envConfig.setConfigParam(
          EnvironmentConfig.CLEANER_MIN_FILE_UTILIZATION,
          String.valueOf(cfg.getDBCleanerMinUtilization()));
      envConfig.setConfigParam(
          EnvironmentConfig.LOG_FILE_MAX, String.valueOf(cfg.getDBLogFileMax()));

      if (!importConfig.appendToExistingData()) {
        if (importConfig.clearBackend() || cfg.getBaseDN().size() <= 1) {
          // We have the writer lock on the environment, now delete the
          // environment and re-open it. Only do this when we are
          // importing to all the base DNs in the backend or if the backend only
          // have one base DN.
          File parentDirectory = getFileForPath(cfg.getDBDirectory());
          File backendDirectory = new File(parentDirectory, cfg.getBackendId());
          // If the backend does not exist the import will create it.
          if (backendDirectory.exists()) {
            EnvManager.removeFiles(backendDirectory.getPath());
          }
        }
      }

      Importer importer = new Importer(importConfig, cfg, envConfig);
      rootContainer = initializeRootContainer(envConfig);
      return importer.processImport(rootContainer);
    } catch (ExecutionException execEx) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, execEx);
      }
      if (execEx.getCause() instanceof DirectoryException) {
        throw ((DirectoryException) execEx.getCause());
      } else {
        Message message = ERR_EXECUTION_ERROR.get(execEx.getMessage());
        throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message);
      }
    } catch (InterruptedException intEx) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, intEx);
      }
      Message message = ERR_INTERRUPTED_ERROR.get(intEx.getMessage());
      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message);
    } catch (JebException je) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, je);
      }
      throw new DirectoryException(
          DirectoryServer.getServerErrorResultCode(), je.getMessageObject());
    } catch (InitializationException ie) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, ie);
      }
      throw new DirectoryException(
          DirectoryServer.getServerErrorResultCode(), ie.getMessageObject());
    } catch (ConfigException ce) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, ce);
      }
      throw new DirectoryException(
          DirectoryServer.getServerErrorResultCode(), ce.getMessageObject());
    } finally {
      // leave the backend in the same state.
      try {
        if (rootContainer != null) {
          long startTime = System.currentTimeMillis();
          rootContainer.close();
          long finishTime = System.currentTimeMillis();
          long closeTime = (finishTime - startTime) / 1000;
          Message msg = NOTE_JEB_IMPORT_LDIF_ROOTCONTAINER_CLOSE.get(closeTime);
          logError(msg);
          rootContainer = null;
        }

        // Sync the environment to disk.
        if (debugEnabled()) {
          Message message = NOTE_JEB_IMPORT_CLOSING_DATABASE.get();
          TRACER.debugInfo(message.toString());
        }
      } catch (DatabaseException de) {
        if (debugEnabled()) {
          TRACER.debugCaught(DebugLogLevel.ERROR, de);
        }
      }
    }
  }
  private void experienceLogFlushTask(String sleepTime, boolean flushBeforeCrash) throws Throwable {

    try {
      createRepEnvInfo(sleepTime);

      ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo);
      long startTime = System.currentTimeMillis();

      StatsConfig stConfig = new StatsConfig();
      stConfig.setClear(true);

      /* Flush the existed dirty data before we do writes. */
      for (int i = 0; i < repEnvInfo.length; i++) {
        repEnvInfo[i].getEnv().sync();
        repEnvInfo[i].getEnv().getStats(stConfig);
      }

      DatabaseConfig dbConfig = new DatabaseConfig();
      dbConfig.setAllowCreate(true);
      dbConfig.setTransactional(true);

      Database db = master.openDatabase(null, dbName, dbConfig);

      DatabaseEntry key = new DatabaseEntry();
      DatabaseEntry data = new DatabaseEntry();
      for (int i = 1; i <= 100; i++) {
        IntegerBinding.intToEntry(i, key);
        StringBinding.stringToEntry(value, data);
        db.put(null, key, data);
      }

      assertTrue(System.currentTimeMillis() - startTime < 15000);

      Thread.sleep(15000);

      long endTime = System.currentTimeMillis();

      for (int i = 0; i < repEnvInfo.length; i++) {
        EnvironmentStats envStats = repEnvInfo[i].getEnv().getStats(stConfig);
        LogFlusher flusher = repEnvInfo[i].getRepNode().getLogFlusher();
        if (flushBeforeCrash) {
          /* Make sure the LogFlushTask has been invoked. */
          assertTrue(flusher.getFlushTask().scheduledExecutionTime() > startTime);
          assertTrue(flusher.getFlushTask().scheduledExecutionTime() < endTime);

          /*
           * Since the log file size is not so big, we can't assure
           * all the data will be written in the same log file, but
           * we can sure that a flush does happen.
           */
          assertTrue(envStats.getNSequentialWrites() >= 1);
          assertTrue(envStats.getNLogFSyncs() == 1);
        } else {

          /*
           * Make sure the LogFlushTask is not invoked after making
           * the changes.
           */
          assertTrue(flusher.getFlushTask().scheduledExecutionTime() < startTime);
          assertTrue(envStats.getNSequentialWrites() == 0);
          assertTrue(envStats.getNLogFSyncs() == 0);
        }
        assertTrue(envStats.getNFSyncs() == 0);
      }

      File[] envHomes = new File[3];
      /* Close the replicas without doing a checkpoint. */
      for (int i = 0; i < repEnvInfo.length; i++) {
        envHomes[i] = repEnvInfo[i].getEnvHome();
        repEnvInfo[i].getRepImpl().abnormalClose();
      }

      /*
       * Open a read only standalone Environment on the replicas to see
       * whether the data has been synced to the disk.
       */
      EnvironmentConfig newConfig = new EnvironmentConfig();
      newConfig.setAllowCreate(false);
      newConfig.setReadOnly(true);
      newConfig.setTransactional(true);

      for (int i = 0; i < repEnvInfo.length; i++) {
        Environment env = new Environment(envHomes[i], newConfig);

        dbConfig.setAllowCreate(false);
        dbConfig.setReadOnly(true);

        try {
          db = env.openDatabase(null, dbName, dbConfig);
        } catch (DatabaseNotFoundException e) {

          /*
           * If the system crashes before the flush, the database is
           * not synced to the disk, so this database can't be found
           * at all, it's expected.
           */
          assertFalse(flushBeforeCrash);
        }

        if (flushBeforeCrash) {
          assertTrue(db.count() == 100);
          for (int index = 1; index <= 100; index++) {
            IntegerBinding.intToEntry(index, key);
            OperationStatus status = db.get(null, key, data, null);
            if (flushBeforeCrash) {
              assertTrue(status == OperationStatus.SUCCESS);
              assertEquals(value, StringBinding.entryToString(data));
            }
          }
        }

        if (flushBeforeCrash) {
          db.close();
        }
        env.close();
      }
    } catch (Throwable t) {
      t.printStackTrace();
      throw t;
    }
  }
  /**
   * Synchronize publications with pubmed using pmid
   *
   * @throws Exception if an error occurs
   */
  public void execute() throws Exception {
    // Needed so that STAX can find it's implementation classes
    ClassLoader cl = Thread.currentThread().getContextClassLoader();

    Thread.currentThread().setContextClassLoader(getClass().getClassLoader());

    Database db = null;
    Transaction txn = null;
    try {
      if (osAlias == null) {
        throw new BuildException("osAlias attribute is not set");
      }
      if (outputFile == null) {
        throw new BuildException("outputFile attribute is not set");
      }

      // environment is transactional
      EnvironmentConfig envConfig = new EnvironmentConfig();
      envConfig.setTransactional(true);
      envConfig.setAllowCreate(true);

      Environment env = new Environment(new File(cacheDirName), envConfig);

      DatabaseConfig dbConfig = new DatabaseConfig();
      dbConfig.setTransactional(true);
      dbConfig.setAllowCreate(true);
      dbConfig.setSortedDuplicates(true);

      db = env.openDatabase(null, "publications_db", dbConfig);

      txn = env.beginTransaction(null, null);

      LOG.info("Starting EntrezPublicationsRetriever");

      Writer writer = new FileWriter(outputFile);
      ObjectStore os = ObjectStoreFactory.getObjectStore(osAlias);

      Set<Integer> idsToFetch = new HashSet<Integer>();
      itemFactory = new ItemFactory(os.getModel(), "-1_");
      writer.write(FullRenderer.getHeader() + ENDL);
      for (Iterator<Publication> iter = getPublications(os).iterator(); iter.hasNext(); ) {
        String pubMedId = iter.next().getPubMedId();
        Integer pubMedIdInteger;
        try {
          pubMedIdInteger = Integer.valueOf(pubMedId);
        } catch (NumberFormatException e) {
          // not a pubmed id
          continue;
        }

        if (seenPubMeds.contains(pubMedIdInteger)) {
          continue;
        }
        DatabaseEntry key = new DatabaseEntry(pubMedId.getBytes());
        DatabaseEntry data = new DatabaseEntry();
        if (db.get(txn, key, data, null).equals(OperationStatus.SUCCESS)) {
          try {
            ByteArrayInputStream mapInputStream = new ByteArrayInputStream(data.getData());
            ObjectInputStream deserializer = new ObjectInputStream(mapInputStream);
            Map<String, Object> pubMap = (Map) deserializer.readObject();
            writeItems(writer, mapToItems(itemFactory, pubMap));
            seenPubMeds.add(pubMedIdInteger);
          } catch (EOFException e) {
            // ignore and fetch it again
            System.err.println(
                "found in cache, but igored due to cache problem: " + pubMedIdInteger);
          }
        } else {
          idsToFetch.add(pubMedIdInteger);
        }
      }

      Iterator<Integer> idIter = idsToFetch.iterator();
      Set<Integer> thisBatch = new HashSet<Integer>();
      while (idIter.hasNext()) {
        Integer pubMedIdInteger = idIter.next();
        thisBatch.add(pubMedIdInteger);
        if (thisBatch.size() == BATCH_SIZE || !idIter.hasNext() && thisBatch.size() > 0) {
          try {
            // the server may return less publications than we ask for, so keep a Map
            Map<String, Map<String, Object>> fromServerMap = null;

            for (int i = 0; i < MAX_TRIES; i++) {
              BufferedReader br = new BufferedReader(getReader(thisBatch));
              StringBuffer buf = new StringBuffer();
              String line;
              while ((line = br.readLine()) != null) {
                buf.append(line + "\n");
              }
              fromServerMap = new HashMap<String, Map<String, Object>>();
              Throwable throwable = null;
              try {
                if (loadFullRecord) {
                  SAXParser.parse(
                      new InputSource(new StringReader(buf.toString())),
                      new FullRecordHandler(fromServerMap),
                      false);
                } else {
                  SAXParser.parse(
                      new InputSource(new StringReader(buf.toString())),
                      new SummaryRecordHandler(fromServerMap),
                      false);
                }
              } catch (Throwable e) {
                LOG.error("Couldn't parse PubMed XML", e);
                // try again or re-throw the Throwable
                throwable = e;
              }
              if (i == MAX_TRIES) {
                throw new RuntimeException(
                    "failed to parse: " + buf.toString() + " - tried " + MAX_TRIES + " times",
                    throwable);
              } else {
                if (throwable != null) {
                  // try again
                  continue;
                }
              }

              for (String id : fromServerMap.keySet()) {
                writeItems(writer, mapToItems(itemFactory, fromServerMap.get(id)));
              }
              addToDb(txn, db, fromServerMap);
              break;
            }
            thisBatch.clear();
          } finally {
            txn.commit();
            // start a new transaction incase there is an exception while parsing
            txn = env.beginTransaction(null, null);
          }
        }
      }
      writeItems(writer, authorMap.values());
      writeItems(writer, meshTerms.values());
      writer.write(FullRenderer.getFooter() + ENDL);
      writer.flush();
      writer.close();
    } catch (Throwable e) {
      throw new RuntimeException("failed to get all publications", e);
    } finally {
      txn.commit();
      db.close();
      Thread.currentThread().setContextClassLoader(cl);
    }
  }
Example #29
0
  /**
   * Rebuild index(es) in the backend instance. Note that the server will not explicitly initialize
   * this backend before calling this method.
   *
   * @param rebuildConfig The rebuild configuration.
   * @throws ConfigException If an unrecoverable problem arises during initialization.
   * @throws InitializationException If a problem occurs during initialization that is not related
   *     to the server configuration.
   * @throws DirectoryException If a Directory Server error occurs.
   */
  public void rebuildBackend(RebuildConfig rebuildConfig)
      throws InitializationException, ConfigException, DirectoryException {
    // If the backend already has the root container open, we must use the same
    // underlying root container
    boolean openRootContainer = rootContainer == null;

    /*
     * If the rootContainer is open, the backend is initialized by something
     * else. We can't do any rebuild of system indexes while others are using
     * this backend.
     */
    if (!openRootContainer && rebuildConfig.includesSystemIndex()) {
      Message message = ERR_JEB_REBUILD_BACKEND_ONLINE.get();
      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message);
    }

    try {
      EnvironmentConfig envConfig;
      if (openRootContainer) {
        envConfig = new EnvironmentConfig();
        envConfig.setAllowCreate(true);
        envConfig.setTransactional(false);
        envConfig.setDurability(Durability.COMMIT_NO_SYNC);
        envConfig.setLockTimeout(0, TimeUnit.SECONDS);
        envConfig.setTxnTimeout(0, TimeUnit.SECONDS);
        envConfig.setConfigParam(
            EnvironmentConfig.CLEANER_MIN_FILE_UTILIZATION,
            String.valueOf(cfg.getDBCleanerMinUtilization()));
        envConfig.setConfigParam(
            EnvironmentConfig.LOG_FILE_MAX, String.valueOf(cfg.getDBLogFileMax()));

        Importer importer = new Importer(rebuildConfig, cfg, envConfig);
        rootContainer = initializeRootContainer(envConfig);
        importer.rebuildIndexes(rootContainer);
      } else {
        envConfig = ConfigurableEnvironment.parseConfigEntry(cfg);

        Importer importer = new Importer(rebuildConfig, cfg, envConfig);
        importer.rebuildIndexes(rootContainer);
      }
    } catch (ExecutionException execEx) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, execEx);
      }
      Message message = ERR_EXECUTION_ERROR.get(execEx.getMessage());
      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message);
    } catch (InterruptedException intEx) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, intEx);
      }
      Message message = ERR_INTERRUPTED_ERROR.get(intEx.getMessage());
      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message);
    } catch (ConfigException ce) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, ce);
      }
      throw new DirectoryException(
          DirectoryServer.getServerErrorResultCode(), ce.getMessageObject());
    } catch (JebException e) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, e);
      }
      throw new DirectoryException(
          DirectoryServer.getServerErrorResultCode(), e.getMessageObject());
    } catch (InitializationException e) {
      if (debugEnabled()) {
        TRACER.debugCaught(DebugLogLevel.ERROR, e);
      }
      throw new InitializationException(e.getMessageObject());
    } finally {
      // If a root container was opened in this method as read only, close it
      // to leave the backend in the same state.
      if (openRootContainer && rootContainer != null) {
        try {
          rootContainer.close();
          rootContainer = null;
        } catch (DatabaseException e) {
          if (debugEnabled()) {
            TRACER.debugCaught(DebugLogLevel.ERROR, e);
          }
        }
      }
    }
  }
Example #30
0
 private EnvironmentConfig envConfig() {
   EnvironmentConfig environmentConfig = new EnvironmentConfig();
   environmentConfig.setAllowCreate(true);
   environmentConfig.setTransactional(false);
   return environmentConfig;
 }