/** Close all stores (closing a store automatically closes its indices). */
  public void close() throws DatabaseException {

    // Close secondary databases, then primary databases.
    supplierByCityDb.close();
    shipmentByPartDb.close();
    shipmentBySupplierDb.close();
    partDb.close();
    supplierDb.close();
    shipmentDb.close();
    // And don't forget to close the catalog and the environment.
    javaCatalog.close();
    env.close();
  }
  public void runTest(DatabaseType type) throws DatabaseException, FileNotFoundException {
    int i;
    DatabaseConfig conf = new DatabaseConfig();
    conf.setErrorStream(TestUtils.getErrorStream());
    conf.setErrorPrefix("HashCompareTest");
    conf.setType(type);
    if (type == DatabaseType.HASH) {
      conf.setHashComparator(new HashComparator());
    } else conf.setBtreeComparator(new BtreeComparator());
    conf.setAllowCreate(true);

    Database db = new Database(HASHCOMPARETEST_DBNAME, null, conf);

    DatabaseEntry key = new DatabaseEntry();
    DatabaseEntry data = new DatabaseEntry("world".getBytes());
    for (i = 0; i < 100; i++) {
      key.setData((new String("key" + i)).getBytes());
      db.put(null, key, data);
    }
    i = 0;
    Cursor dbc;
    dbc = db.openCursor(null, CursorConfig.DEFAULT);
    while (dbc.getNext(key, data, LockMode.DEFAULT) == OperationStatus.SUCCESS) {
      ++i;
    }
    //		System.out.println("retrieved " + i + " entries");
    dbc.close();
    db.close();
  }
Example #3
0
    public void run() {
      double gtps, itps;
      int n, ret;
      long start_time, end_time;

      //
      // Open the database files.
      //
      int err;
      try {
        DatabaseConfig config = new DatabaseConfig();
        config.setTransactional(true);
        adb = dbenv.openDatabase(null, "account", null, config);
        bdb = dbenv.openDatabase(null, "branch", null, config);
        tdb = dbenv.openDatabase(null, "teller", null, config);
        hdb = dbenv.openDatabase(null, "history", null, config);
      } catch (DatabaseException dbe) {
        TpcbExample.errExit(dbe, "Open of db files failed");
      } catch (FileNotFoundException fnfe) {
        TpcbExample.errExit(fnfe, "Open of db files failed, missing file");
      }

      start_time = (new Date()).getTime();
      for (txns = n = ntxns, failed = 0; n-- > 0; ) if ((ret = txn()) != 0) failed++;
      end_time = (new Date()).getTime();
      if (end_time == start_time) end_time++;

      System.out.println(
          getName()
              + ": "
              + (long) txns
              + " txns: "
              + failed
              + " failed, "
              + TpcbExample.showRounded((txns - failed) / (double) (end_time - start_time), 2)
              + " TPS");

      try {
        adb.close();
        bdb.close();
        tdb.close();
        hdb.close();
      } catch (DatabaseException dbe2) {
        TpcbExample.errExit(dbe2, "Close of db files failed");
      }
    }
  @After
  public void tearDown() {

    try {
      if (index1 != null) {
        index1.close();
      }
      if (index2 != null) {
        index2.close();
      }
      if (store1 != null) {
        store1.close();
      }
      if (store2 != null) {
        store2.close();
      }
      if (catalog != null) {
        catalog.close();
      }
      if (env != null) {
        env.close();
      }
    } catch (Exception e) {
      System.out.println("Ignored exception during tearDown: " + e);
    } finally {
      /* Ensure that GC can cleanup. */
      env = null;
      testEnv = null;
      catalog = null;
      store1 = null;
      store2 = null;
      index1 = null;
      index2 = null;
      factory = null;
      storeMap1 = null;
      storeMap2 = null;
      indexMap1 = null;
      indexMap2 = null;
    }
  }
  /** Closes the database. */
  private void close() throws Exception {

    if (catalog != null) {
      catalog.close();
      catalog = null;
    }
    if (db != null) {
      db.close();
      db = null;
    }
    if (env != null) {
      env.close();
      env = null;
    }
  }
  public void testReadOnlyEmptyCatalog() throws Exception {

    String file = "catalog.db";

    /* Create an empty database. */
    DatabaseConfig config = new DatabaseConfig();
    config.setAllowCreate(true);
    DbCompat.setTypeBtree(config);
    Database db = DbCompat.testOpenDatabase(env, null, file, null, config);
    db.close();

    /* Open the empty database read-only. */
    config.setAllowCreate(false);
    config.setReadOnly(true);
    db = DbCompat.testOpenDatabase(env, null, file, null, config);

    /* Expect exception when creating the catalog. */
    try {
      new StoredClassCatalog(db);
      fail();
    } catch (IllegalStateException e) {
    }
    db.close();
  }
Example #7
0
  /**
   * Generic query process
   *
   * @param query contains parsed query for process
   * @param db_name contains database to query
   * @param indices passes the indices that store the result
   * @throws DatabaseException when BerkeleyDB passes errors
   * @throws FileNotFoundException when respective .idx or .txt value not found
   */
  private void queryDB(String query, String db_name, ArrayList<Integer> indices)
      throws DatabaseException, FileNotFoundException {
    OperationStatus oprStatus;
    Database std_db = new Database(db_name, null, null);
    Cursor std_cursor = std_db.openCursor(null, null); // Create new cursor object
    DatabaseEntry key = new DatabaseEntry();
    DatabaseEntry data = new DatabaseEntry();

    key.setData(query.getBytes());
    key.setSize(query.length());

    // Returns OperationStatus
    oprStatus = std_cursor.getSearchKey(key, data, LockMode.DEFAULT);
    while (oprStatus == OperationStatus.SUCCESS) {
      String s = new String(data.getData());
      if (!(indices.contains(Integer.parseInt(s)))) {
        indices.add(Integer.parseInt(s));
      }
      oprStatus = std_cursor.getNextDup(key, data, LockMode.DEFAULT);
    }

    std_cursor.close();
    std_db.close();
  }
Example #8
0
  /**
   * Prints the results obtained by BerkeleyDB
   *
   * @throws DatabaseException whenever BerkeleyDB is violateed
   * @throws FileNotFoundException when .idx files not found or .txt files not found.
   * @throws ParseException when ParseDouble returns an error.
   */
  private void printResults() throws DatabaseException, FileNotFoundException, ParseException {

    // System.out.println("Num of indices before pprice rdate constraints: " + indices.size());
    if (indices.isEmpty()) {
      System.out.println("+=-=-=-=-=-=-=-=-=-=-=-=-=+");
      System.out.println("No results matching given query.");
      System.out.println("+=-=-=-=-=-=-=-=-=-=-=-=-=+");
    }

    Integer counter = 0;
    for (Integer index : indices) {

      OperationStatus oprStatus;
      Database std_db = new Database("rw.idx", null, null);
      Cursor std_cursor = std_db.openCursor(null, null); // Create new cursor object
      DatabaseEntry key = new DatabaseEntry();
      DatabaseEntry data = new DatabaseEntry();
      Product product = new Product();
      Review review = new Review();

      String searchkey = index.toString().toLowerCase();
      key.setData(searchkey.getBytes());
      key.setSize(searchkey.length());

      // Returns OperationStatus
      oprStatus = std_cursor.getSearchKey(key, data, LockMode.DEFAULT);
      Bill:
      {
        if (oprStatus == OperationStatus.SUCCESS) {
          String s = new String(data.getData());

          load_data(product, review, s);

          /** Filters low priority queue for pprice / rdate processes. */
          GenericStack<String> tmplow = new GenericStack<String>(lowpriorities);
          while (!tmplow.isEmpty()) {
            String subquery = tmplow.pop();

            if (subquery.matches("pprice.*")) {
              Double value =
                  Double.parseDouble(
                      subquery
                          .replace("pprice", "")
                          .replace(">", "")
                          .replace("=", "")
                          .replace("<", ""));

              if (product.getPrice().equals("unknown")) break Bill;
              if (subquery.matches("pprice<.*")
                  && !(Double.parseDouble(product.getPrice()) > value)) continue;
              else if (subquery.matches("pprice=.*")
                  && !(Double.parseDouble(product.getPrice()) == value)) continue;
              else if (subquery.matches("pprice>.*")
                  && !(Double.parseDouble(product.getPrice()) < value)) continue;
              else break Bill;

            } else if (subquery.matches("rdate.*")) {
              String comparator = subquery.substring(5, 6);
              DateFormat df = new SimpleDateFormat("yyyy/MM/dd hh:mm:ss");
              Date valuedate = df.parse(subquery.substring(6) + " 00:00:00");
              long valuedatedoesntmatataer =
                  (valuedate.getTime() / 1000)
                      - 25200; // delay set by 7hours - timezone difference.
              switch (comparator) {
                case "<":
                  if (!(Long.parseLong(review.getTime()) < valuedatedoesntmatataer)) {
                    break Bill;
                  } else {
                    break;
                  }
                case ">":
                  if (!(Long.parseLong(review.getTime()) > valuedatedoesntmatataer)) {
                    break Bill;
                  } else {
                    break;
                  }
                case "=":
                  if (!(Long.parseLong(review.getTime()) == valuedatedoesntmatataer)) {
                    break Bill;
                  } else {
                    break;
                  }
                default:
                  break Bill;
              }
            }
          }

          // System.out.print(" "+ index +" ");
          product.print();
          review.print();
          counter++;
        }
        std_cursor.close();
        std_db.close();
      }
    }
    System.out.println("+=-=-=-=-=-=-=-=-=-=-=-=-=+");
    System.out.println("Total records found: " + counter);
    System.out.println("+=-=-=-=-=-=-=-=-=-=-=-=-=+");
  }
Example #9
0
  public int doloop() throws DatabaseException {
    Database db = null;

    for (; ; ) {
      if (db == null) {
        DatabaseConfig dbconf = new DatabaseConfig();
        dbconf.setType(DatabaseType.BTREE);
        if (dbenv.getIsMaster()) {
          /*
           * Open database allowing create only if this is a master
           * database.  A client database uses polling to attempt
           * to open the database without allowing create until
           * it is successful.
           *
           * This polling logic for allowing create can be
           * simplified under some circumstances.  For example, if
           * the application can be sure a database is already
           * there, it would never need to open it allowing create.
           */
          dbconf.setAllowCreate(true);
        }
        dbconf.setTransactional(true);

        try {
          db = dbenv.openDatabase(null, RepConfig.progname, null, dbconf);
        } catch (java.io.FileNotFoundException e) {
          System.err.println("no stock database available yet.");
          if (db != null) {
            db.close(true);
            db = null;
          }
          try {
            Thread.sleep(RepConfig.SLEEPTIME);
          } catch (InterruptedException ie) {
          }
          continue;
        }
      }

      BufferedReader stdin = new BufferedReader(new InputStreamReader(System.in));

      /* Listen for input, and add it to the database. */
      System.out.print("QUOTESERVER");
      if (!dbenv.getIsMaster()) System.out.print("(read-only)");
      System.out.print("> ");
      System.out.flush();
      String nextline = null;
      try {
        nextline = stdin.readLine();
      } catch (IOException ioe) {
        System.err.println("Unable to get data from stdin");
        break;
      }
      String[] words = nextline.split("\\s");

      /* A blank line causes the DB to be dumped to stdout. */
      if (words.length == 0 || (words.length == 1 && words[0].length() == 0)) {
        try {
          if (dbenv.getInClientSync())
            System.err.println("Cannot read data during client initialization - please try again.");
          else printStocks(db);
        } catch (DeadlockException de) {
          continue;
        } catch (DatabaseException e) {
          /*
           * This could be DB_REP_HANDLE_DEAD, which
           * should close the database and continue.
           */
          System.err.println("Got db exception reading replication" + "DB: " + e);
          System.err.println(
              "Expected if it was due to a dead "
                  + "replication handle, otherwise an unexpected error.");
          db.close(true); /* Close no sync. */
          db = null;
          continue;
        }
        continue;
      }

      if (words.length == 1
          && (words[0].compareToIgnoreCase("quit") == 0
              || words[0].compareToIgnoreCase("exit") == 0)) {
        dbenv.setAppFinished(true);
        break;
      } else if (words.length != 2) {
        System.err.println("Format: TICKER VALUE");
        continue;
      }

      if (!dbenv.getIsMaster()) {
        System.err.println("Can't update client.");
        continue;
      }

      DatabaseEntry key = new DatabaseEntry(words[0].getBytes());
      DatabaseEntry data = new DatabaseEntry(words[1].getBytes());

      db.put(null, key, data);
    }
    if (db != null) db.close(true);
    return 0;
  }
Example #10
0
  //
  // Initialize the database to the number of accounts, branches,
  // history records, and tellers given to the constructor.
  //
  public void populate() {
    Database dbp = null;

    int err;
    int balance, idnum;
    int end_anum, end_bnum, end_tnum;
    int start_anum, start_bnum, start_tnum;
    int h_nelem;

    idnum = BEGID;
    balance = 500000;

    h_nelem = accounts;

    try {
      DatabaseConfig config = new DatabaseConfig();
      config.setType(DatabaseType.HASH);
      config.setHashNumElements(h_nelem);
      config.setAllowCreate(true);
      dbp = dbenv.openDatabase(null, "account", null, config);
    } catch (Exception e1) {
      // can be DatabaseException or FileNotFoundException
      errExit(e1, "Open of account file failed");
    }

    start_anum = idnum;
    populateTable(dbp, idnum, balance, h_nelem, "account");
    idnum += h_nelem;
    end_anum = idnum - 1;
    try {
      dbp.close();
    } catch (DatabaseException e2) {
      errExit(e2, "Account file close failed");
    }

    if (verbose)
      System.out.println(
          "Populated accounts: " + String.valueOf(start_anum) + " - " + String.valueOf(end_anum));

    //
    // Since the number of branches is very small, we want to use very
    // small pages and only 1 key per page.  This is the poor-man's way
    // of getting key locking instead of page locking.
    //
    h_nelem = (int) branches;

    try {
      DatabaseConfig config = new DatabaseConfig();
      config.setType(DatabaseType.HASH);
      config.setHashNumElements(h_nelem);
      config.setHashFillFactor(1);
      config.setPageSize(512);
      config.setAllowCreate(true);
      dbp = dbenv.openDatabase(null, "branch", null, config);
    } catch (Exception e3) {
      // can be DatabaseException or FileNotFoundException
      errExit(e3, "Branch file create failed");
    }

    start_bnum = idnum;
    populateTable(dbp, idnum, balance, h_nelem, "branch");
    idnum += h_nelem;
    end_bnum = idnum - 1;

    try {
      dbp.close();
    } catch (DatabaseException dbe4) {
      errExit(dbe4, "Close of branch file failed");
    }

    if (verbose)
      System.out.println(
          "Populated branches: " + String.valueOf(start_bnum) + " - " + String.valueOf(end_bnum));

    //
    // In the case of tellers, we also want small pages, but we'll let
    // the fill factor dynamically adjust itself.
    //
    h_nelem = (int) tellers;

    try {
      DatabaseConfig config = new DatabaseConfig();
      config.setType(DatabaseType.HASH);
      config.setHashNumElements(h_nelem);
      config.setHashFillFactor(0);
      config.setPageSize(512);
      config.setAllowCreate(true);
      dbp = dbenv.openDatabase(null, "teller", null, config);
    } catch (Exception e5) {
      // can be DatabaseException or FileNotFoundException
      errExit(e5, "Teller file create failed");
    }

    start_tnum = idnum;
    populateTable(dbp, idnum, balance, h_nelem, "teller");
    idnum += h_nelem;
    end_tnum = idnum - 1;

    try {
      dbp.close();
    } catch (DatabaseException e6) {
      errExit(e6, "Close of teller file failed");
    }

    if (verbose)
      System.out.println(
          "Populated tellers: " + String.valueOf(start_tnum) + " - " + String.valueOf(end_tnum));

    try {
      DatabaseConfig config = new DatabaseConfig();
      config.setType(DatabaseType.RECNO);
      config.setRecordLength(HISTORY_LEN);
      config.setAllowCreate(true);
      dbp = dbenv.openDatabase(null, "history", null, config);
    } catch (Exception e7) {
      // can be DatabaseException or FileNotFoundException
      errExit(e7, "Create of history file failed");
    }

    populateHistory(dbp);

    try {
      dbp.close();
    } catch (DatabaseException e8) {
      errExit(e8, "Close of history file failed");
    }
  }
Example #11
0
  @Test
  public void testDraining() throws Exception {
    EnvironmentConfig masterConfig = makeBasicConfig();
    masterConfig.setReplicationLimit(100000000);
    ReplicationManagerSiteConfig site = new ReplicationManagerSiteConfig("localhost", masterPort);
    site.setLocalSite(true);
    site.setLegacy(true);
    masterConfig.addReplicationManagerSite(site);

    site = new ReplicationManagerSiteConfig("localhost", clientPort);
    site.setLegacy(true);
    masterConfig.addReplicationManagerSite(site);
    site = new ReplicationManagerSiteConfig("localhost", client2Port);
    site.setLegacy(true);
    masterConfig.addReplicationManagerSite(site);
    site = new ReplicationManagerSiteConfig("localhost", client3Port);
    site.setLegacy(true);
    masterConfig.addReplicationManagerSite(site);

    Environment master = new Environment(mkdir("master"), masterConfig);
    setTimeouts(master);
    // Prevent connection retries, so that all connections
    // originate from clients
    master.setReplicationTimeout(ReplicationTimeoutType.CONNECTION_RETRY, Integer.MAX_VALUE);
    master.replicationManagerStart(2, ReplicationManagerStartPolicy.REP_MASTER);

    DatabaseConfig dc = new DatabaseConfig();
    dc.setTransactional(true);
    dc.setAllowCreate(true);
    dc.setType(DatabaseType.BTREE);
    dc.setPageSize(4096);
    Database db = master.openDatabase(null, "test.db", null, dc);

    DatabaseEntry key = new DatabaseEntry();
    DatabaseEntry value = new DatabaseEntry();
    value.setData(data);

    for (int i = 0; ((BtreeStats) db.getStats(null, null)).getPageCount() < 500; i++) {
      String k = "The record number is: " + i;
      key.setData(k.getBytes());
      db.put(null, key, value);
    }

    // tell fiddler to stop reading once it sees a PAGE message
    Socket s = new Socket("localhost", mgrPort);
    OutputStreamWriter w = new OutputStreamWriter(s.getOutputStream());

    String path1 = "{" + masterPort + "," + clientPort + "}"; // looks like {6000,6001}
    w.write("{init," + path1 + ",page_clog}\r\n");
    w.flush();
    BufferedReader br = new BufferedReader(new InputStreamReader(s.getInputStream()));
    br.readLine();
    assertEquals("ok", br.readLine());
    // create client
    //
    EnvironmentConfig ec = makeBasicConfig();
    site = new ReplicationManagerSiteConfig("localhost", clientPort);
    site.setLocalSite(true);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);
    site = new ReplicationManagerSiteConfig("localhost", masterPort);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);
    site = new ReplicationManagerSiteConfig("localhost", client2Port);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);
    site = new ReplicationManagerSiteConfig("localhost", client3Port);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);
    Environment client = new Environment(mkdir("client"), ec);
    setTimeouts(client);
    client.replicationManagerStart(1, ReplicationManagerStartPolicy.REP_CLIENT);

    // wait til it gets stuck
    Thread.sleep(5000); // FIXME

    // Do the same for another client, because the master has 2
    // msg processing threads.  (It's no longer possible to
    // configure just 1.)
    String path2 = "{" + masterPort + "," + client2Port + "}";
    w.write("{init," + path2 + ",page_clog}\r\n");
    w.flush();
    br = new BufferedReader(new InputStreamReader(s.getInputStream()));
    br.readLine();
    assertEquals("ok", br.readLine());

    ec = makeBasicConfig();
    site = new ReplicationManagerSiteConfig("localhost", client2Port);
    site.setLocalSite(true);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);
    site = new ReplicationManagerSiteConfig("localhost", masterPort);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);
    site = new ReplicationManagerSiteConfig("localhost", clientPort);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);
    site = new ReplicationManagerSiteConfig("localhost", client3Port);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);
    Environment client2 = new Environment(mkdir("client2"), ec);
    setTimeouts(client2);
    client2.replicationManagerStart(1, ReplicationManagerStartPolicy.REP_CLIENT);

    // wait til it gets stuck
    Thread.sleep(5000);

    // With the connection stuck, the master cannot write out log
    // records for new "live" transactions.  Knowing we didn't
    // write the record, we should not bother waiting for an ack
    // that cannot possibly arrive; so we should simply return
    // quickly.  The duration should be very quick, but anything
    // less than the ack timeout indicates correct behavior (in
    // case this test runs on a slow, overloaded system).
    //
    long startTime = System.currentTimeMillis();
    key.setData("one extra record".getBytes());
    db.put(null, key, value);
    long duration = System.currentTimeMillis() - startTime;
    assertTrue("txn duration: " + duration, duration < 29000);
    System.out.println("txn duration: " + duration);
    db.close();

    // Tell fiddler to close the connections.  That should trigger
    // us to abandon the timeout.  Then create another client and
    // see that it can complete its internal init quickly.  Since
    // we have limited threads at the master, this demonstrates
    // that they were abandoned.
    //
    path1 = "{" + clientPort + "," + masterPort + "}"; // looks like {6001,6000}
    w.write("{" + path1 + ",shutdown}\r\n");
    w.flush();
    assertEquals("ok", br.readLine());
    path2 = "{" + client2Port + "," + masterPort + "}"; // looks like {6001,6000}
    w.write("{" + path2 + ",shutdown}\r\n");
    w.flush();
    assertEquals("ok", br.readLine());

    ec = makeBasicConfig();
    site = new ReplicationManagerSiteConfig("localhost", client3Port);
    site.setLocalSite(true);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);
    site = new ReplicationManagerSiteConfig("localhost", masterPort);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);
    site = new ReplicationManagerSiteConfig("localhost", clientPort);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);
    site = new ReplicationManagerSiteConfig("localhost", client2Port);
    site.setLegacy(true);
    ec.addReplicationManagerSite(site);

    EventHandler clientMonitor = new EventHandler();
    ec.setEventHandler(clientMonitor);
    Environment client3 = new Environment(mkdir("client3"), ec);
    setTimeouts(client3);
    startTime = System.currentTimeMillis();
    client3.replicationManagerStart(2, ReplicationManagerStartPolicy.REP_CLIENT);
    clientMonitor.await();
    duration = System.currentTimeMillis() - startTime;
    assertTrue("sync duration: " + duration, duration < 20000); // 20 seconds should be plenty

    client3.close();
    master.close();

    w.write("shutdown\r\n");
    w.flush();
    assertEquals("ok", br.readLine());
    s.close();
  }