コード例 #1
0
  /** Save the profile information into the database */
  protected final void saveProfile(BenchmarkComponent baseClient) {

    // CONFIG_PROFILE
    Table catalog_tbl =
        catalogContext.database.getTables().get(SEATSConstants.TABLENAME_CONFIG_PROFILE);
    VoltTable vt = CatalogUtil.getVoltTable(catalog_tbl);
    assert (vt != null);
    vt.addRow(
        this.scale_factor, // CFP_SCALE_FACTOR
        this.airport_max_customer_id.toJSONString(), // CFP_AIPORT_MAX_CUSTOMER
        this.flight_start_date, // CFP_FLIGHT_START
        this.flight_upcoming_date, // CFP_FLIGHT_UPCOMING
        this.flight_past_days, // CFP_FLIGHT_PAST_DAYS
        this.flight_future_days, // CFP_FLIGHT_FUTURE_DAYS
        this.num_flights, // CFP_NUM_FLIGHTS
        this.num_customers, // CFP_NUM_CUSTOMERS
        this.num_reservations, // CFP_NUM_RESERVATIONS
        JSONUtil.toJSONString(this.code_id_xref) // CFP_CODE_ID_XREF
        );
    if (debug.val)
      LOG.debug(String.format("Saving profile information into %s\n%s", catalog_tbl, this));
    baseClient.loadVoltTable(catalog_tbl.getName(), vt);

    // CONFIG_HISTOGRAMS
    catalog_tbl =
        catalogContext.database.getTables().get(SEATSConstants.TABLENAME_CONFIG_HISTOGRAMS);
    vt = CatalogUtil.getVoltTable(catalog_tbl);
    assert (vt != null);

    for (Entry<String, Histogram<String>> e : this.airport_histograms.entrySet()) {
      vt.addRow(
          e.getKey(), // CFH_NAME
          e.getValue().toJSONString(), // CFH_DATA
          1 // CFH_IS_AIRPORT
          );
    } // FOR
    if (debug.val) LOG.debug("Saving airport histogram information into " + catalog_tbl);
    baseClient.loadVoltTable(catalog_tbl.getName(), vt);

    for (Entry<String, Histogram<String>> e : this.histograms.entrySet()) {
      vt.addRow(
          e.getKey(), // CFH_NAME
          e.getValue().toJSONString(), // CFH_DATA
          0 // CFH_IS_AIRPORT
          );
    } // FOR
    if (debug.val) LOG.debug("Saving benchmark histogram information into " + catalog_tbl);
    baseClient.loadVoltTable(catalog_tbl.getName(), vt);

    return;
  }
コード例 #2
0
  /**
   * Add two, check that only one comes out Mark first as done, second comes out
   *
   * @throws InterruptedException
   */
  @Test
  public void testTwoTransactions() throws InterruptedException {
    final long txn_id0 = 1000;
    final long txn_id1 = 2000;
    Collection<Integer> partitions0 = CatalogUtil.getAllPartitionIds(catalog_db);
    Collection<Integer> partitions1 = CatalogUtil.getAllPartitionIds(catalog_db);

    final MockCallback inner_callback0 = new MockCallback();
    TransactionInitWrapperCallback outer_callback0 =
        new TransactionInitWrapperCallback(hstore_site);
    outer_callback0.init(txn_id0, partitions0, inner_callback0);

    final MockCallback inner_callback1 = new MockCallback();
    TransactionInitWrapperCallback outer_callback1 =
        new TransactionInitWrapperCallback(hstore_site);
    outer_callback1.init(txn_id1, partitions1, inner_callback1);

    // insert the higher ID first but make sure it comes out second
    this.queue.insert(txn_id1, partitions1, outer_callback1);
    this.queue.insert(txn_id0, partitions0, outer_callback0);

    // create another thread to get the locks in order
    Thread t =
        new Thread() {
          public void run() {
            try {
              inner_callback0.lock.acquire();
              for (int partition = 0; partition < NUM_PARTITONS; ++partition) {
                queue.finished(txn_id0, Status.OK, partition);
              }
            } catch (InterruptedException e) {
            }
            try {
              inner_callback1.lock.acquire();
              for (int partition = 0; partition < NUM_PARTITONS; ++partition) {
                queue.finished(txn_id1, Status.OK, partition);
              }
            } catch (InterruptedException e) {
            }
          }
        };
    t.start();

    while (queue.isEmpty() == false) {
      queue.checkQueues();
      ThreadUtil.sleep(10);
    }

    // wait for all the locks to be acquired
    t.join();
  }
コード例 #3
0
ファイル: LocalCluster.java プロジェクト: repos-db/h-store
  @Override
  public boolean compile(VoltProjectBuilder builder) {
    if (m_compiled) {
      return true;
    }
    m_compiled =
        builder.compile(m_jarFileName, m_partitionPerSite, m_siteCount, m_replication, "localhost");

    // (1) Load catalog from Jar
    Catalog tmpCatalog = CatalogUtil.loadCatalogFromJar(m_jarFileName);

    // (2) Update catalog to include target cluster configuration
    ClusterConfiguration cc = new ClusterConfiguration();
    // Update cc with a bunch of hosts/sites/partitions
    for (int site = 0, currentPartition = 0; site < m_siteCount; ++site) {
      for (int partition = 0; partition < m_partitionPerSite; ++partition, ++currentPartition) {
        cc.addPartition("localhost", site, currentPartition);
      }
    }
    this.catalog = FixCatalog.addHostInfo(tmpCatalog, cc);

    // (3) Write updated catalog back out to jar file
    try {
      CatalogUtil.updateCatalogInJar(m_jarFileName, catalog);
    } catch (Exception e) {
      throw new RuntimeException(e);
    }

    tmpCatalog = CatalogUtil.loadCatalogFromJar(m_jarFileName);
    // System.err.println(CatalogInfo.getInfo(this.catalog, new File(m_jarFileName)));

    // Construct the base command that we will want to use to start
    // all of the "remote" HStoreSites
    List<String> siteCommand = new ArrayList<String>();
    CollectionUtil.addAll(siteCommand, "ant", "hstore-site", "-Djar=" + m_jarFileName);
    // Be sure to include our HStoreConf parameters
    for (Entry<String, String> e : this.confParams.entrySet()) {
      siteCommand.add(String.format("-D%s=%s", e.getKey(), e.getValue()));
    }
    // Lastly, we will include the site.id as the last parameter
    // so that we can easily change it
    siteCommand.add("-Dsite.id=-1");

    m_procBuilder = new ProcessBuilder(siteCommand.toArray(new String[0]));
    m_procBuilder.redirectErrorStream(true);
    // set the working directory to obj/release/prod
    // m_procBuilder.directory(new File(m_buildDir + File.separator + "prod"));

    return m_compiled;
  }
コード例 #4
0
ファイル: BaseTestCase.java プロジェクト: kanghong/h-store
 /**
  * Main setUp method for test cases. Given the ProjectType we will populate the static catalog
  * field members The full_catalog flag is a hack to work around OutofMemory issues with TPC-E
  *
  * @param type
  * @param fkeys
  * @param full_catalog
  * @throws Exception
  */
 protected void setUp(ProjectType type, boolean fkeys, boolean full_catalog) throws Exception {
   super.setUp();
   is_first = (is_first == null ? true : false);
   this.last_type = type;
   catalog = project_catalogs.get(type);
   catalog_db = project_databases.get(type);
   p_estimator = project_p_estimators.get(type);
   if (catalog == null) {
     AbstractProjectBuilder projectBuilder = AbstractProjectBuilder.getProjectBuilder(type);
     if (ENABLE_JAR_REUSE) {
       File jar_path = projectBuilder.getJarPath(true);
       if (jar_path.exists()) {
         LOG.debug("LOAD CACHE JAR: " + jar_path.getAbsolutePath());
         catalog = CatalogUtil.loadCatalogFromJar(jar_path.getAbsolutePath());
       } else {
         LOG.debug("MISSING JAR: " + jar_path.getAbsolutePath());
       }
     }
     if (catalog == null) {
       switch (type) {
         case TPCC:
           catalog = TPCCProjectBuilder.getTPCCSchemaCatalog(true);
           // Update the ProcParameter mapping used in the catalogs
           //
           // ParametersUtil.populateCatalog(CatalogUtil.getDatabase(catalog),
           // ParametersUtil.getParameterMapping(type));
           break;
         case TPCE:
           catalog = projectBuilder.createCatalog(fkeys, full_catalog);
           break;
         case TM1:
         case SEATS:
         case AUCTIONMARK:
         case MARKOV:
         case LOCALITY:
         case MAPREDUCE:
           catalog = projectBuilder.getFullCatalog(fkeys);
           break;
         default:
           assert (false) : "Invalid project type - " + type;
       } // SWITCH
     }
     if (type == ProjectType.TPCC)
       ParametersUtil.populateCatalog(
           CatalogUtil.getDatabase(catalog), ParametersUtil.getParameterMapping(type));
     this.init(type, catalog);
   }
 }
コード例 #5
0
ファイル: BaseTestCase.java プロジェクト: kanghong/h-store
 /**
  * Add fake partitions to the loaded catalog Assuming that there is one partition per site
  *
  * @param num_partitions
  */
 protected void addPartitions(int num_partitions) throws Exception {
   // HACK! If we already have this many partitions in the catalog, then we won't recreate it
   // This fixes problems where we need to reference the same catalog objects in multiple test
   // cases
   if (CatalogUtil.getNumberOfPartitions(catalog_db) != num_partitions) {
     ClusterConfiguration cc = new ClusterConfiguration();
     for (Integer i = 0; i < num_partitions; i++) {
       cc.addPartition("localhost", 0, i);
       // System.err.println("[" + i + "] " + Arrays.toString(triplets.lastElement()));
     } // FOR
     catalog = FixCatalog.addHostInfo(catalog, cc);
     this.init(this.last_type, catalog);
   }
   Cluster cluster = CatalogUtil.getCluster(catalog_db);
   assertEquals(num_partitions, cluster.getNum_partitions());
   assertEquals(num_partitions, CatalogUtil.getNumberOfPartitions(cluster));
 }
コード例 #6
0
ファイル: BaseTestCase.java プロジェクト: kanghong/h-store
  /**
   * Store the catalog for this ProjectType and generate the supporting classes
   *
   * @param type
   * @param catalog
   */
  private void init(ProjectType type, Catalog catalog) {
    assertNotNull(catalog);
    project_catalogs.put(type, catalog);

    catalog_db = CatalogUtil.getDatabase(catalog);
    assertNotNull(catalog_db);
    project_databases.put(type, catalog_db);

    p_estimator = new PartitionEstimator(catalog_db);
    assertNotNull(p_estimator);
    project_p_estimators.put(type, p_estimator);
  }
コード例 #7
0
  /** Populate Subscriber table per benchmark spec. */
  void genSubscriber(Table catalog_tbl) {
    final VoltTable table = CatalogUtil.getVoltTable(catalog_tbl);
    Object row[] = new Object[table.getColumnCount()];
    long total = 0;
    for (long s_id = 0; s_id < this.subscriberSize; s_id++) {
      int col = 0;
      row[col++] = s_id;
      row[col++] = TM1Util.padWithZero(s_id);

      // BIT_##
      for (int j = 0; j < 10; j++) {
        row[col++] = TM1Util.number(0, 1);
      } // FOR
      // HEX_##
      for (int j = 0; j < 10; j++) {
        row[col++] = TM1Util.number(0, 15);
      }
      // BYTE2_##
      for (int j = 0; j < 10; j++) {
        row[col++] = TM1Util.number(0, 255);
      }
      // MSC_LOCATION + VLR_LOCATION
      for (int j = 0; j < 2; j++) {
        row[col++] = TM1Util.number(0, Integer.MAX_VALUE);
      }
      assert col == table.getColumnCount();
      table.addRow(row);
      total++;

      if (table.getRowCount() >= TM1Constants.BATCH_SIZE) {
        if (d)
          LOG.debug(
              String.format(
                  "%s: %6d / %d", TM1Constants.TABLENAME_SUBSCRIBER, total, this.subscriberSize));
        loadVoltTable(TM1Constants.TABLENAME_SUBSCRIBER, table);
        table.clearRowData();
        assert (table.getRowCount() == 0);
      }
    } // FOR
    if (table.getRowCount() > 0) {
      if (d)
        LOG.debug(
            String.format(
                "%s: %6d / %d", TM1Constants.TABLENAME_SUBSCRIBER, total, this.subscriberSize));
      loadVoltTable(TM1Constants.TABLENAME_SUBSCRIBER, table);
      table.clearRowData();
      assert (table.getRowCount() == 0);
    }
  }
コード例 #8
0
  @Override
  protected void setUp() throws Exception {
    super.setUp(ProjectType.TPCC);
    addPartitions(NUM_PARTITONS);

    Site catalog_site = CollectionUtil.first(CatalogUtil.getCluster(catalog).getSites());
    assertNotNull(catalog_site);
    hstore_site = HStore.initialize(catalog_site, HStoreConf.singleton());
    for (int p = 0; p < NUM_PARTITONS; p++) {
      PartitionExecutor site = new MockPartitionExecutor(p, catalog, p_estimator);
      hstore_site.addPartitionExecutor(p, site);
    } // FOR

    this.queue = new TransactionQueueManager(hstore_site);
  }
コード例 #9
0
ファイル: VoltCompiler.java プロジェクト: repos-db/h-store
  /**
   * Return the name of the vertical partition for the given table name
   *
   * @param tableName
   * @return
   */
  private static String getNextVerticalPartitionName(
      Table catalog_tbl, Collection<Column> catalog_cols) {
    Database catalog_db = ((Database) catalog_tbl.getParent());

    Collection<String> colNames = new HashSet<String>();
    for (Column catalog_col : catalog_cols) {
      colNames.add(catalog_col.getName());
    }

    // Figure out how many vertical partition tables already exist for this table
    int next = 0;
    String prefix = "SYS_VP_" + catalog_tbl.getName() + "_";
    Pattern p = Pattern.compile(Pattern.quote(prefix) + "[\\d]+");
    for (Table otherTable : CatalogUtil.getSysTables(catalog_db)) {
      if (debug.get())
        LOG.debug(String.format("Checking whether '%s' matches prefix '%s'", otherTable, prefix));
      Matcher m = p.matcher(otherTable.getName());
      if (m.matches() == false) continue;

      // Check to make sure it's not the same vertical partition
      Collection<Column> otherColumns = otherTable.getColumns();
      if (debug.get())
        LOG.debug(
            String.format(
                "%s.%s <-> %s.%s",
                catalog_tbl.getName(), catalog_cols, otherTable.getName(), otherColumns));
      if (otherColumns.size() != colNames.size()) continue;
      boolean fail = false;
      for (Column otherCol : otherColumns) {
        if (colNames.contains(otherCol.getName()) == false) {
          fail = true;
          break;
        }
      }
      if (fail) continue;

      next++;
    } // FOR
    String viewName = String.format("%s%02d", prefix, next);
    assert (catalog_tbl.getViews().contains(viewName) == false);

    if (debug.get())
      LOG.debug(String.format("Next VerticalPartition name '%s' for %s", viewName, catalog_tbl));
    return (viewName);
  }
コード例 #10
0
  public SEATSProfile(CatalogContext catalogContext, AbstractRandomGenerator rng) {
    this.catalogContext = catalogContext;
    this.rng = rng;

    // Tuple Code to Tuple Id Mapping
    for (String xref[] : SEATSConstants.CODE_TO_ID_COLUMNS) {
      assert (xref.length == 3);
      String tableName = xref[0];
      String codeCol = xref[1];
      String idCol = xref[2];

      if (this.code_columns.containsKey(codeCol) == false) {
        this.code_columns.put(codeCol, idCol);
        this.code_id_xref.put(idCol, new HashMap<String, Long>());
        if (debug.val)
          LOG.debug(
              String.format(
                  "Added %s mapping from Code Column '%s' to Id Column '%s'",
                  tableName, codeCol, idCol));
      }
    } // FOR

    // Foreign Key Code to Ids Mapping
    // In this data structure, the key will be the name of the dependent column
    // and the value will be the name of the foreign key parent column
    // We then use this in conjunction with the Key->Id mapping to turn a code into
    // a foreign key column id. For example, if the child table AIRPORT has a column with a foreign
    // key reference to COUNTRY.CO_ID, then the data file for AIRPORT will have a value
    // 'USA' in the AP_CO_ID column. We can use mapping to get the id number for 'USA'.
    // Long winded and kind of screwy, but hey what else are you going to do?
    for (Table catalog_tbl : catalogContext.database.getTables()) {
      for (Column catalog_col : catalog_tbl.getColumns()) {
        Column catalog_fkey_col = CatalogUtil.getForeignKeyParent(catalog_col);
        if (catalog_fkey_col != null && this.code_id_xref.containsKey(catalog_fkey_col.getName())) {
          this.fkey_value_xref.put(catalog_col.getName(), catalog_fkey_col.getName());
          if (debug.val)
            LOG.debug(
                String.format(
                    "Added ForeignKey mapping from %s to %s",
                    catalog_col.fullName(), catalog_fkey_col.fullName()));
        }
      } // FOR
    } // FOR
  }
コード例 #11
0
ファイル: BaseTestCase.java プロジェクト: kanghong/h-store
 protected void initializeCluster(int num_hosts, int num_sites, int num_partitions)
     throws Exception {
   // HACK! If we already have this many partitions in the catalog, then we won't recreate it
   // This fixes problems where we need to reference the same catalog objects in multiple test
   // cases
   if (CatalogUtil.getNumberOfHosts(catalog_db) != num_hosts
       || CatalogUtil.getNumberOfSites(catalog_db) != (num_hosts * num_sites)
       || CatalogUtil.getNumberOfPartitions(catalog_db)
           != (num_hosts * num_sites * num_partitions)) {
     catalog = FixCatalog.addHostInfo(catalog, "localhost", num_hosts, num_sites, num_partitions);
     this.init(this.last_type, catalog);
   }
   Cluster cluster = CatalogUtil.getCluster(catalog_db);
   assertEquals(num_hosts, CatalogUtil.getNumberOfHosts(catalog_db));
   assertEquals((num_hosts * num_sites), CatalogUtil.getNumberOfSites(catalog_db));
   assertEquals(
       (num_hosts * num_sites * num_partitions), CatalogUtil.getNumberOfPartitions(cluster));
   assertEquals((num_hosts * num_sites * num_partitions), cluster.getNum_partitions());
 }
コード例 #12
0
  private void loadData(Table catalog_tbl) throws Exception {
    // Load in a bunch of dummy data for this table
    VoltTable vt = CatalogUtil.getVoltTable(catalog_tbl);
    assertNotNull(vt);
    for (int i = 0; i < NUM_TUPLES; i++) {
      Object row[] = VoltTableUtil.getRandomRow(catalog_tbl);

      if (catalog_tbl.getName().equalsIgnoreCase(TPCCConstants.TABLENAME_ORDER_LINE)) {
        row[0] = i; // OL_O_ID
        row[1] = (byte) i; // OL_D_ID
        row[2] = (short) i; // OL_W_ID
      }
      vt.addRow(row);
    } // FOR
    this.executor.loadTable(1000l, catalog_tbl, vt, false);

    int statsLocators[] = {catalog_tbl.getRelativeIndex()};
    VoltTable stats[] = this.ee.getStats(SysProcSelector.TABLE, statsLocators, false, 0L);
    assertEquals(1, stats.length);
    // System.err.println(VoltTableUtil.format(stats));
  }
コード例 #13
0
  /** Populate Access_Info table per benchmark spec. */
  void genAccessInfo(Table catalog_tbl) {
    final VoltTable table = CatalogUtil.getVoltTable(catalog_tbl);
    int[] arr = {1, 2, 3, 4};

    int[] ai_types = TM1Util.subArr(arr, 1, 4);
    long total = 0;
    for (long s_id = 0; s_id < this.subscriberSize; s_id++) {
      for (int ai_type : ai_types) {
        Object row[] = new Object[table.getColumnCount()];
        row[0] = s_id;
        row[1] = ai_type;
        row[2] = TM1Util.number(0, 255);
        row[3] = TM1Util.number(0, 255);
        row[4] = TM1Util.astring(3, 3);
        row[5] = TM1Util.astring(5, 5);
        table.addRow(row);
        total++;
      } // FOR
      if (table.getRowCount() >= TM1Constants.BATCH_SIZE) {
        if (d)
          LOG.debug(
              String.format(
                  "%s: %6d / %d",
                  TM1Constants.TABLENAME_ACCESS_INFO, total, ai_types.length * subscriberSize));
        loadVoltTable(TM1Constants.TABLENAME_ACCESS_INFO, table);
        table.clearRowData();
      }
    } // WHILE
    if (table.getRowCount() > 0) {
      if (d)
        LOG.debug(
            String.format(
                "%s: %6d / %d",
                TM1Constants.TABLENAME_ACCESS_INFO, total, ai_types.length * subscriberSize));
      loadVoltTable(TM1Constants.TABLENAME_ACCESS_INFO, table);
      table.clearRowData();
    }
  }
コード例 #14
0
ファイル: CatalogContext.java プロジェクト: sdhost/h-store
 private void initPlanFragments() {
   Set<PlanFragment> allFrags = new HashSet<PlanFragment>();
   for (Procedure proc : database.getProcedures()) {
     for (Statement stmt : proc.getStatements()) {
       allFrags.clear();
       allFrags.addAll(stmt.getFragments());
       allFrags.addAll(stmt.getMs_fragments());
       for (PlanFragment frag : allFrags) {
         Collection<Table> tables = CatalogUtil.getReferencedTables(frag);
         int tableIds[] = new int[tables.size()];
         int i = 0;
         for (Table tbl : tables) {
           tableIds[i++] = tbl.getRelativeIndex();
         } // FOR
         if (frag.getReadonly()) {
           this.fragmentReadTables.put(Long.valueOf(frag.getId()), tableIds);
         } else {
           this.fragmentWriteTables.put(Long.valueOf(frag.getId()), tableIds);
         }
       } // FOR (frag)
     } // FOR (stmt)
   } // FOR (proc)
 }
コード例 #15
0
  /**
   * Insert the txn into our queue and then call check This should immediately release our
   * transaction and invoke the inner_callback
   *
   * @throws InterruptedException
   */
  @Test
  public void testSingleTransaction() throws InterruptedException {
    long txn_id = 1000;
    Collection<Integer> partitions = CatalogUtil.getAllPartitionIds(catalog_db);

    MockCallback inner_callback = new MockCallback();
    TransactionInitWrapperCallback outer_callback = new TransactionInitWrapperCallback(hstore_site);
    outer_callback.init(txn_id, partitions, inner_callback);

    // Insert the txn into our queue and then call check
    // This should immediately release our transaction and invoke the inner_callback
    boolean ret = this.queue.insert(txn_id, partitions, outer_callback);
    assert (ret);

    int tries = 10;
    while (queue.isEmpty() == false && tries-- > 0) {
      queue.checkQueues();
      ThreadUtil.sleep(100);
    }
    assert (inner_callback.lock.availablePermits() > 0);
    // Block on the MockCallback's lock until our thread above is able to release everybody.
    // inner_callback.lock.acquire();
  }
コード例 #16
0
    @Override
    public void process(Pair<TransactionTrace, Integer> p) {
      assert (p != null);
      final TransactionTrace txn_trace = p.getFirst();
      final int i = p.getSecond(); // Interval
      final int txn_weight = (use_txn_weights ? txn_trace.getWeight() : 1);
      final String proc_key =
          CatalogKey.createKey(CatalogUtil.DEFAULT_DATABASE_NAME, txn_trace.getCatalogItemName());

      // Terrible Hack: Assume that we are using the SingleSitedCostModel
      // and that
      // it will return fixed values based on whether the txn is
      // single-partitioned or not
      SingleSitedCostModel singlesited_cost_model = (SingleSitedCostModel) cost_models[i];

      total_interval_txns[i] += txn_weight;
      total_interval_queries[i] += (txn_trace.getQueryCount() * txn_weight);
      histogram_procs.put(proc_key, txn_weight);

      try {
        singlesited_cost_model.estimateTransactionCost(catalogContext, workload, filter, txn_trace);
        TransactionCacheEntry txn_entry =
            singlesited_cost_model.getTransactionCacheEntry(txn_trace);
        assert (txn_entry != null) : "No txn entry for " + txn_trace;
        Collection<Integer> partitions = txn_entry.getTouchedPartitions();

        // If the txn runs on only one partition, then the cost is
        // nothing
        if (txn_entry.isSinglePartitioned()) {
          singlepartition_ctrs[i] += txn_weight;
          if (!partitions.isEmpty()) {
            assert (txn_entry.getAllTouchedPartitionsHistogram().getValueCount() == 1)
                : txn_entry
                    + " says it was single-partitioned but the partition count says otherwise:\n"
                    + txn_entry.debug();
            singlepartition_with_partitions_ctrs[i] += txn_weight;
          }
          histogram_sp_procs.put(proc_key, txn_weight);

          // If the txn runs on multiple partitions, then the cost
          // is...
          // XXX 2010-06-28: The number of partitions that the txn
          // touches divided by the total number of partitions
          // XXX 2010-07-02: The histogram for the total number of
          // partitions touched by all of the queries
          // in the transaction. This ensures that txns with just one
          // multi-partition query
          // isn't weighted the same as a txn with many
          // multi-partition queries
        } else {
          assert (!partitions.isEmpty()) : "No touched partitions for " + txn_trace;
          if (partitions.size() == 1
              && txn_entry.getExecutionPartition() != HStoreConstants.NULL_PARTITION_ID) {
            assert (CollectionUtil.first(partitions) != txn_entry.getExecutionPartition())
                : txn_entry.debug();
            exec_mismatch_ctrs[i] += txn_weight;
            partitions_touched[i] += txn_weight;
          } else {
            assert (partitions.size() > 1)
                : String.format(
                    "%s is not marked as single-partition but it only touches one partition\n%s",
                    txn_trace, txn_entry.debug());
          }
          partitions_touched[i] += (partitions.size() * txn_weight); // Txns
          multipartition_ctrs[i] += txn_weight;
          histogram_mp_procs.put(proc_key, txn_weight);
        }
        Integer base_partition = txn_entry.getExecutionPartition();
        if (base_partition != null) {
          exec_histogram[i].put(base_partition, txn_weight);
        } else {
          exec_histogram[i].put(all_partitions, txn_weight);
        }
        if (debug.val) { // &&
          // txn_trace.getCatalogItemName().equalsIgnoreCase("DeleteCallForwarding"))
          // {
          Procedure catalog_proc = txn_trace.getCatalogItem(catalogContext.database);
          Map<String, Object> inner = new LinkedHashMap<String, Object>();
          for (Statement catalog_stmt : catalog_proc.getStatements()) {
            inner.put(catalog_stmt.fullName(), CatalogUtil.getReferencedTables(catalog_stmt));
          }

          Map<String, Object> m = new LinkedHashMap<String, Object>();
          m.put(txn_trace.toString(), null);
          m.put("Interval", i);
          m.put("Single-Partition", txn_entry.isSinglePartitioned());
          m.put("Base Partition", base_partition);
          m.put("Touched Partitions", partitions);
          m.put(catalog_proc.fullName(), inner);
          LOG.debug(StringUtil.formatMaps(m));
        }

        // We need to keep a count of the number txns that didn't have
        // all of its queries estimated
        // completely so that we can update the access histograms down
        // below for entropy calculations
        // Note that this is at the txn level, not the query level.
        if (!txn_entry.isComplete()) {
          incomplete_txn_ctrs[i] += txn_weight;
          tmp_missingPartitions.clear();
          tmp_missingPartitions.addAll(all_partitions);
          tmp_missingPartitions.removeAll(txn_entry.getTouchedPartitions());
          // Update the histogram for this interval to keep track of
          // how many times we need to
          // increase the partition access histogram
          incomplete_txn_histogram[i].put(tmp_missingPartitions, txn_weight);
          if (trace.val) {
            Map<String, Object> m = new LinkedHashMap<String, Object>();
            m.put(String.format("Marking %s as incomplete in interval #%d", txn_trace, i), null);
            m.put("Examined Queries", txn_entry.getExaminedQueryCount());
            m.put("Total Queries", txn_entry.getTotalQueryCount());
            m.put("Touched Partitions", txn_entry.getTouchedPartitions());
            m.put("Missing Partitions", tmp_missingPartitions);
            LOG.trace(StringUtil.formatMaps(m));
          }
        }
      } catch (Exception ex) {
        CatalogUtil.saveCatalog(catalogContext.catalog, CatalogUtil.CATALOG_FILENAME);
        throw new RuntimeException(
            "Failed to estimate cost for " + txn_trace.getCatalogItemName() + " at interval " + i,
            ex);
      }
    }
コード例 #17
0
  /** Populate Special_Facility table and CallForwarding table per benchmark spec. */
  void genSpeAndCal(Table catalog_spe, Table catalog_cal) {
    VoltTable speTbl = CatalogUtil.getVoltTable(catalog_spe);
    VoltTable calTbl = CatalogUtil.getVoltTable(catalog_cal);

    long speTotal = 0;
    long calTotal = 0;
    int[] arrSpe = {1, 2, 3, 4};
    int[] arrCal = {0, 8, 6};

    for (long s_id = 0; s_id < this.subscriberSize; s_id++) {
      int[] sf_types = TM1Util.subArr(arrSpe, 1, 4);
      for (int sf_type : sf_types) {
        Object row_spe[] = new Object[speTbl.getColumnCount()];
        row_spe[0] = s_id;
        row_spe[1] = sf_type;
        row_spe[2] = TM1Util.isActive();
        row_spe[3] = TM1Util.number(0, 255);
        row_spe[4] = TM1Util.number(0, 255);
        row_spe[5] = TM1Util.astring(5, 5);
        speTbl.addRow(row_spe);
        speTotal++;

        // now call_forwarding
        int[] start_times = TM1Util.subArr(arrCal, 0, 3);
        for (int start_time : start_times) {
          Object row_cal[] = new Object[calTbl.getColumnCount()];
          row_cal[0] = s_id;
          row_cal[1] = sf_type;
          row_cal[2] = start_time;
          row_cal[3] = start_time + TM1Util.number(1, 8);
          row_cal[4] = TM1Util.nstring(15, 15);
          calTbl.addRow(row_cal);
          calTotal++;
        } // FOR
      } // FOR

      if (calTbl.getRowCount() >= TM1Constants.BATCH_SIZE) {
        if (d) LOG.debug(String.format("%s: %d", TM1Constants.TABLENAME_CALL_FORWARDING, calTotal));
        loadVoltTable(TM1Constants.TABLENAME_CALL_FORWARDING, calTbl);
        calTbl.clearRowData();
        assert (calTbl.getRowCount() == 0);
      }
      if (speTbl.getRowCount() >= TM1Constants.BATCH_SIZE) {
        if (d)
          LOG.debug(String.format("%s: %d", TM1Constants.TABLENAME_SPECIAL_FACILITY, speTotal));
        loadVoltTable(TM1Constants.TABLENAME_SPECIAL_FACILITY, speTbl);
        speTbl.clearRowData();
        assert (speTbl.getRowCount() == 0);
      }
    } // WHILE
    if (calTbl.getRowCount() > 0) {
      if (d) LOG.debug(String.format("%s: %d", TM1Constants.TABLENAME_CALL_FORWARDING, calTotal));
      loadVoltTable(TM1Constants.TABLENAME_CALL_FORWARDING, calTbl);
      calTbl.clearRowData();
      assert (calTbl.getRowCount() == 0);
    }
    if (speTbl.getRowCount() > 0) {
      if (d) LOG.debug(String.format("%s: %d", TM1Constants.TABLENAME_SPECIAL_FACILITY, speTotal));
      loadVoltTable(TM1Constants.TABLENAME_SPECIAL_FACILITY, speTbl);
      speTbl.clearRowData();
      assert (speTbl.getRowCount() == 0);
    }
  }
コード例 #18
0
ファイル: VoltCompiler.java プロジェクト: repos-db/h-store
  @SuppressWarnings("unchecked")
  public Catalog compileCatalog(final String projectFileURL, final ClusterConfig clusterConfig) {
    if (!clusterConfig.validate()) {
      addErr(clusterConfig.getErrorMsg());
      return null;
    }

    // Compiler instance is reusable. Clear the cache.
    cachedAddedClasses.clear();
    m_currentFilename = new File(projectFileURL).getName();
    m_jarBuilder = new JarBuilder(this);

    if (m_outputStream != null) {
      m_outputStream.println("\n** BEGIN PROJECT COMPILE: " + m_currentFilename + " **");
    }

    ProjectType project = null;

    try {
      JAXBContext jc = JAXBContext.newInstance("org.voltdb.compiler.projectfile");
      // This schema shot the sheriff.
      SchemaFactory sf = SchemaFactory.newInstance(javax.xml.XMLConstants.W3C_XML_SCHEMA_NS_URI);
      Schema schema = sf.newSchema(this.getClass().getResource("ProjectFileSchema.xsd"));
      Unmarshaller unmarshaller = jc.createUnmarshaller();
      // But did not shoot unmarshaller!
      unmarshaller.setSchema(schema);
      JAXBElement<ProjectType> result =
          (JAXBElement<ProjectType>) unmarshaller.unmarshal(new File(projectFileURL));
      project = result.getValue();
    } catch (JAXBException e) {
      // Convert some linked exceptions to more friendly errors.
      if (e.getLinkedException() instanceof java.io.FileNotFoundException) {
        addErr(e.getLinkedException().getMessage());
        return null;
      }
      if (e.getLinkedException() instanceof org.xml.sax.SAXParseException) {
        addErr("Error schema validating project.xml file. " + e.getLinkedException().getMessage());
        return null;
      }
      throw new RuntimeException(e);
    } catch (SAXException e) {
      addErr("Error schema validating project.xml file. " + e.getMessage());
      return null;
    }

    try {
      compileXMLRootNode(project);
    } catch (final VoltCompilerException e) {
      //            compilerLog.l7dlog( Level.ERROR,
      // LogKeys.compiler_VoltCompiler_FailedToCompileXML.name(), null);
      LOG.error(e.getMessage(), e);
      // e.printStackTrace();
      return null;
    }
    assert (m_catalog != null);

    try {
      ClusterCompiler.compile(m_catalog, clusterConfig);
    } catch (RuntimeException e) {
      addErr(e.getMessage());
      return null;
    }

    // Optimization: Vertical Partitioning
    if (m_enableVerticalPartitionOptimizations) {
      if (m_verticalPartitionPlanner == null) {
        m_verticalPartitionPlanner =
            new VerticalPartitionPlanner(CatalogUtil.getDatabase(m_catalog), true);
      }
      try {
        m_verticalPartitionPlanner.optimizeDatabase();
      } catch (Exception ex) {
        LOG.warn("Unexpected error", ex);
        addErr("Failed to apply vertical partition optimizations");
      }
    }

    // add epoch info to catalog
    final int epoch = (int) (TransactionIdManager.getEpoch() / 1000);
    m_catalog.getClusters().get("cluster").setLocalepoch(epoch);

    // done handling files
    m_currentFilename = null;
    return m_catalog;
  }
コード例 #19
0
ファイル: VoltCompiler.java プロジェクト: repos-db/h-store
  public static MaterializedViewInfo addVerticalPartition(
      final Table catalog_tbl, final Collection<Column> catalog_cols, final boolean createIndex)
      throws Exception {
    assert (catalog_cols.isEmpty() == false);
    Database catalog_db = ((Database) catalog_tbl.getParent());

    String viewName = getNextVerticalPartitionName(catalog_tbl, catalog_cols);
    if (debug.get())
      LOG.debug(
          String.format(
              "Adding Vertical Partition %s for %s: %s", viewName, catalog_tbl, catalog_cols));

    // Create a new virtual table
    Table virtual_tbl = catalog_db.getTables().get(viewName);
    if (virtual_tbl == null) {
      virtual_tbl = catalog_db.getTables().add(viewName);
    }
    virtual_tbl.setIsreplicated(true);
    virtual_tbl.setMaterializer(catalog_tbl);
    virtual_tbl.setSystable(true);
    virtual_tbl.getColumns().clear();

    // Create MaterializedView and link it to the virtual table
    MaterializedViewInfo catalog_view = catalog_tbl.getViews().add(viewName);
    catalog_view.setVerticalpartition(true);
    catalog_view.setDest(virtual_tbl);
    List<Column> indexColumns = new ArrayList<Column>();

    Column partition_col = catalog_tbl.getPartitioncolumn();
    if (partition_col instanceof VerticalPartitionColumn) {
      partition_col = ((VerticalPartitionColumn) partition_col).getHorizontalColumn();
    }
    if (debug.get()) LOG.debug(catalog_tbl.getName() + " Partition Column: " + partition_col);

    int i = 0;
    assert (catalog_cols != null);
    assert (catalog_cols.isEmpty() == false)
        : "No vertical partitioning columns for " + catalog_view.fullName();
    for (Column catalog_col : catalog_cols) {
      // MaterializedView ColumnRef
      ColumnRef catalog_ref = catalog_view.getGroupbycols().add(catalog_col.getName());
      catalog_ref.setColumn(catalog_col);
      catalog_ref.setIndex(i++);

      // VirtualTable Column
      Column virtual_col = virtual_tbl.getColumns().add(catalog_col.getName());
      virtual_col.setDefaulttype(catalog_col.getDefaulttype());
      virtual_col.setDefaultvalue(catalog_col.getDefaultvalue());
      virtual_col.setIndex(catalog_col.getIndex());
      virtual_col.setNullable(catalog_col.getNullable());
      virtual_col.setSize(catalog_col.getSize());
      virtual_col.setType(catalog_col.getType());
      if (debug.get())
        LOG.debug(String.format("Added VerticalPartition column %s", virtual_col.fullName()));

      // If they want an index, then we'll make one based on every column except for the column
      // that the table is partitioned on
      if (createIndex) {
        boolean include = true;
        if (partition_col instanceof MultiColumn) {
          include = (((MultiColumn) partition_col).contains(catalog_col) == false);
        } else if (catalog_col.equals(partition_col)) {
          include = false;
        }
        if (include) indexColumns.add(virtual_col);
      }
    } // FOR

    if (createIndex) {
      if (indexColumns.isEmpty()) {
        Map<String, Object> m = new ListOrderedMap<String, Object>();
        m.put("Partition Column", partition_col);
        m.put("VP Table Columns", virtual_tbl.getColumns());
        m.put("Passed-in Columns", CatalogUtil.debug(catalog_cols));
        LOG.error("Failed to find index columns\n" + StringUtil.formatMaps(m));
        throw new Exception(String.format("No columns selected for index on %s", viewName));
      }
      String idxName = "SYS_IDX_" + viewName;
      Index virtual_idx = virtual_tbl.getIndexes().get(idxName);
      if (virtual_idx == null) {
        virtual_idx = virtual_tbl.getIndexes().add(idxName);
      }
      virtual_idx.getColumns().clear();

      IndexType idxType =
          (indexColumns.size() == 1 ? IndexType.HASH_TABLE : IndexType.BALANCED_TREE);
      virtual_idx.setType(idxType.getValue());
      i = 0;
      for (Column catalog_col : indexColumns) {
        ColumnRef cref = virtual_idx.getColumns().add(catalog_col.getTypeName());
        cref.setColumn(catalog_col);
        cref.setIndex(i++);
      } // FOR

      if (debug.get())
        LOG.debug(
            String.format(
                "Created %s index '%s' for vertical partition '%s'", idxType, idxName, viewName));
    }
    return (catalog_view);
  }
コード例 #20
0
ファイル: ArgumentsParser.java プロジェクト: kanghong/h-store
  /** @throws Exception */
  private void loadWorkload() throws Exception {
    final boolean debug = LOG.isDebugEnabled();
    // Workload Trace
    if (this.params.containsKey(PARAM_WORKLOAD)) {
      assert (this.catalog_db != null) : "Missing catalog!";
      String path = new File(this.params.get(PARAM_WORKLOAD)).getAbsolutePath();

      boolean weightedTxns = this.getBooleanParam(PARAM_WORKLOAD_XACT_WEIGHTS, false);
      if (debug) LOG.debug("Use Transaction Weights in Limits: " + weightedTxns);

      // This will prune out duplicate trace records...
      if (params.containsKey(PARAM_WORKLOAD_REMOVE_DUPES)) {
        DuplicateTraceFilter filter = new DuplicateTraceFilter();
        this.workload_filter =
            (this.workload_filter != null ? filter.attach(this.workload_filter) : filter);
        if (debug) LOG.debug("Attached " + filter.debugImpl());
      }

      // TRANSACTION OFFSET
      if (params.containsKey(PARAM_WORKLOAD_XACT_OFFSET)) {
        this.workload_xact_offset = Long.parseLong(params.get(PARAM_WORKLOAD_XACT_OFFSET));
        ProcedureLimitFilter filter =
            new ProcedureLimitFilter(-1l, this.workload_xact_offset, weightedTxns);
        // Important! The offset should go in the front!
        this.workload_filter =
            (this.workload_filter != null ? filter.attach(this.workload_filter) : filter);
        if (debug) LOG.debug("Attached " + filter.debugImpl());
      }

      // BASE PARTITIONS
      if (params.containsKey(PARAM_WORKLOAD_RANDOM_PARTITIONS)
          || params.containsKey(PARAM_WORKLOAD_BASE_PARTITIONS)) {
        BasePartitionTxnFilter filter =
            new BasePartitionTxnFilter(new PartitionEstimator(catalog_db));

        // FIXED LIST
        if (params.containsKey(PARAM_WORKLOAD_BASE_PARTITIONS)) {
          for (String p_str : this.getParam(PARAM_WORKLOAD_BASE_PARTITIONS).split(",")) {
            workload_base_partitions.add(Integer.valueOf(p_str));
          } // FOR
          // RANDOM
        } else {
          double factor = this.getDoubleParam(PARAM_WORKLOAD_RANDOM_PARTITIONS);
          List<Integer> all_partitions =
              new ArrayList<Integer>(CatalogUtil.getAllPartitionIds(catalog_db));
          Collections.shuffle(all_partitions, new Random());
          workload_base_partitions.addAll(
              all_partitions.subList(0, (int) (all_partitions.size() * factor)));
        }
        filter.addPartitions(workload_base_partitions);
        this.workload_filter =
            (this.workload_filter != null ? this.workload_filter.attach(filter) : filter);
        if (debug) LOG.debug("Attached " + filter.debugImpl());
      }

      // Txn Limit
      this.workload_xact_limit = this.getLongParam(PARAM_WORKLOAD_XACT_LIMIT);
      Histogram<String> proc_histogram = null;

      // Include/exclude procedures from the traces
      if (params.containsKey(PARAM_WORKLOAD_PROC_INCLUDE)
          || params.containsKey(PARAM_WORKLOAD_PROC_EXCLUDE)) {
        Filter filter = new ProcedureNameFilter(weightedTxns);

        // INCLUDE
        String temp = params.get(PARAM_WORKLOAD_PROC_INCLUDE);
        if (temp != null && !temp.equals(ProcedureNameFilter.INCLUDE_ALL)) {

          // We can take the counts for PROC_INCLUDE and scale them
          // with the multiplier
          double multiplier = 1.0d;
          if (this.hasDoubleParam(PARAM_WORKLOAD_PROC_INCLUDE_MULTIPLIER)) {
            multiplier = this.getDoubleParam(PARAM_WORKLOAD_PROC_INCLUDE_MULTIPLIER);
            if (debug) LOG.debug("Workload Procedure Multiplier: " + multiplier);
          }

          // Default Txn Frequencies
          String procinclude = params.get(PARAM_WORKLOAD_PROC_INCLUDE);
          if (procinclude.equalsIgnoreCase("default")) {
            procinclude =
                AbstractProjectBuilder.getProjectBuilder(catalog_type)
                    .getTransactionFrequencyString();
          }

          Map<String, Integer> limits = new HashMap<String, Integer>();
          int total_unlimited = 0;
          int total = 0;
          for (String proc_name : procinclude.split(",")) {
            int limit = -1;
            // Check if there is a limit for this procedure
            if (proc_name.contains(":")) {
              String pieces[] = proc_name.split(":");
              proc_name = pieces[0];
              limit = (int) Math.round(Integer.parseInt(pieces[1]) * multiplier);
            }

            if (limit < 0) {
              if (proc_histogram == null) {
                if (debug) LOG.debug("Generating procedure histogram from workload file");
                proc_histogram = WorkloadUtil.getProcedureHistogram(new File(path));
              }
              limit = (int) proc_histogram.get(proc_name, 0);
              total_unlimited += limit;
            } else {
              total += limit;
            }
            limits.put(proc_name, limit);
          } // FOR
          // If we have a workload limit and some txns that we want
          // to get unlimited
          // records from, then we want to modify the other txns so
          // that we fill in the "gap"
          if (this.workload_xact_limit != null && total_unlimited > 0) {
            int remaining = this.workload_xact_limit.intValue() - total - total_unlimited;
            if (remaining > 0) {
              for (Entry<String, Integer> e : limits.entrySet()) {
                double ratio = e.getValue() / (double) total;
                e.setValue((int) Math.ceil(e.getValue() + (ratio * remaining)));
              } // FOR
            }
          }

          Histogram<String> proc_multiplier_histogram = null;
          if (debug) {
            if (proc_histogram != null) LOG.debug("Full Workload Histogram:\n" + proc_histogram);
            proc_multiplier_histogram = new Histogram<String>();
          }
          total = 0;
          for (Entry<String, Integer> e : limits.entrySet()) {
            if (debug) proc_multiplier_histogram.put(e.getKey(), e.getValue());
            ((ProcedureNameFilter) filter).include(e.getKey(), e.getValue());
            total += e.getValue();
          } // FOR
          if (debug)
            LOG.debug("Multiplier Histogram [total=" + total + "]:\n" + proc_multiplier_histogram);
        }

        // EXCLUDE
        temp = params.get(PARAM_WORKLOAD_PROC_EXCLUDE);
        if (temp != null) {
          for (String proc_name : params.get(PARAM_WORKLOAD_PROC_EXCLUDE).split(",")) {
            ((ProcedureNameFilter) filter).exclude(proc_name);
          } // FOR
        }

        // Sampling!!
        if (this.getBooleanParam(PARAM_WORKLOAD_PROC_SAMPLE, false)) {
          if (debug) LOG.debug("Attaching sampling filter");
          if (proc_histogram == null)
            proc_histogram = WorkloadUtil.getProcedureHistogram(new File(path));
          Map<String, Integer> proc_includes = ((ProcedureNameFilter) filter).getProcIncludes();
          SamplingFilter sampling_filter = new SamplingFilter(proc_includes, proc_histogram);
          filter = sampling_filter;
          if (debug) LOG.debug("Workload Procedure Histogram:\n" + proc_histogram);
        }

        // Attach our new filter to the chain (or make it the head if
        // it's the first one)
        this.workload_filter =
            (this.workload_filter != null ? this.workload_filter.attach(filter) : filter);
        if (debug) LOG.debug("Attached " + filter.debugImpl());
      }

      // TRANSACTION LIMIT
      if (this.workload_xact_limit != null) {
        ProcedureLimitFilter filter =
            new ProcedureLimitFilter(this.workload_xact_limit, weightedTxns);
        this.workload_filter =
            (this.workload_filter != null ? this.workload_filter.attach(filter) : filter);
        if (debug) LOG.debug("Attached " + filter.debugImpl());
      }

      // QUERY LIMIT
      if (params.containsKey(PARAM_WORKLOAD_QUERY_LIMIT)) {
        this.workload_query_limit = Long.parseLong(params.get(PARAM_WORKLOAD_QUERY_LIMIT));
        QueryLimitFilter filter = new QueryLimitFilter(this.workload_query_limit);
        this.workload_filter =
            (this.workload_filter != null ? this.workload_filter.attach(filter) : filter);
      }

      if (this.workload_filter != null && debug)
        LOG.debug("Workload Filters: " + this.workload_filter.toString());
      this.workload = new Workload(this.catalog);
      this.workload.load(path, this.catalog_db, this.workload_filter);
      this.workload_path = new File(path).getAbsolutePath();
      if (this.workload_filter != null) this.workload_filter.reset();
    }

    // Workload Statistics
    if (this.catalog_db != null) {
      this.stats = new WorkloadStatistics(this.catalog_db);
      if (this.params.containsKey(PARAM_STATS)) {
        String path = this.params.get(PARAM_STATS);
        if (debug) LOG.debug("Loading in workload statistics from '" + path + "'");
        this.stats_path = new File(path).getAbsolutePath();
        try {
          this.stats.load(path, this.catalog_db);
        } catch (Throwable ex) {
          throw new RuntimeException("Failed to load stats file '" + this.stats_path + "'", ex);
        }
      }

      // Scaling
      if (this.params.containsKey(PARAM_STATS_SCALE_FACTOR)) {
        double scale_factor = this.getDoubleParam(PARAM_STATS_SCALE_FACTOR);
        LOG.info("Scaling TableStatistics: " + scale_factor);
        AbstractTableStatisticsGenerator generator =
            AbstractTableStatisticsGenerator.factory(
                this.catalog_db, this.catalog_type, scale_factor);
        generator.apply(this.stats);
      }
    }
  }
コード例 #21
0
ファイル: ArgumentsParser.java プロジェクト: kanghong/h-store
 public void updateCatalog(Catalog catalog, File catalog_path) {
   this.catalog = catalog;
   this.catalog_db = CatalogUtil.getDatabase(catalog);
   if (catalog_path != null) this.catalog_path = catalog_path;
 }
コード例 #22
0
ファイル: LocalCluster.java プロジェクト: repos-db/h-store
  @Override
  public void startUp() {
    assert (!m_running);
    if (m_running) {
      return;
    }

    // set to true to spew startup timing data
    boolean logtime = true;
    long startTime = 0;
    if (logtime) {
      startTime = System.currentTimeMillis();
      System.out.println("********** Starting cluster at: " + startTime);
    }

    // create the in-process server
    //        Configuration config = new Configuration();
    //        config.m_backend = m_target;
    //        config.m_noLoadLibVOLTDB = (m_target == BackendTarget.HSQLDB_BACKEND);
    //        config.m_pathToCatalog = m_jarFileName;
    //        config.m_profilingLevel = ProcedureProfiler.Level.DISABLED;
    //        config.m_port = HStoreConstants.DEFAULT_PORT;

    HStoreConf hstore_conf = HStoreConf.singleton(HStoreConf.isInitialized() == false);
    hstore_conf.loadFromArgs(this.confParams);

    // create all the out-of-process servers
    // Loop through all of the sites in the catalog and start them
    int offset = m_procBuilder.command().size() - 1;
    for (Site catalog_site : CatalogUtil.getAllSites(this.catalog)) {
      final int site_id = catalog_site.getId();

      // If this is the first site, then start the HStoreSite in this JVM
      if (site_id == 0) {
        m_localServer = new ServerThread(hstore_conf, catalog_site);
        m_localServer.start();
      }
      // Otherwise, fork a new JVM that will run our other HStoreSites.
      // Remember that it is one JVM per HStoreSite
      else {
        try {
          m_procBuilder.command().set(offset, "-Dsite.id=" + site_id);
          Process proc = m_procBuilder.start();
          m_cluster.add(proc);
          // write output to obj/release/testoutput/<test name>-n.txt
          // this may need to be more unique? Also very useful to just
          // set this to a hardcoded path and use "tail -f" to debug.
          String testoutputdir = m_buildDir + File.separator + "testoutput";
          // make sure the directory exists
          File dir = new File(testoutputdir);
          if (dir.exists()) {
            assert (dir.isDirectory());
          } else {
            boolean status = dir.mkdirs();
            assert (status);
          }

          PipeToFile ptf =
              new PipeToFile(
                  testoutputdir + File.separator + getName() + "-" + site_id + ".txt",
                  proc.getInputStream());
          ptf.m_writer.write(m_procBuilder.command().toString() + "\n");
          m_pipes.add(ptf);
          Thread t = new Thread(ptf);
          t.setName("ClusterPipe:" + String.valueOf(site_id));
          t.start();
        } catch (IOException ex) {
          System.out.println("Failed to start cluster process:" + ex.getMessage());
          Logger.getLogger(LocalCluster.class.getName()).log(Level.SEVERE, null, ex);
          assert (false);
        }
      }
    }

    // spin until all the pipes see the magic "Server completed.." string.
    boolean allReady;
    do {
      if (logtime)
        System.out.println(
            "********** pre witness: " + (System.currentTimeMillis() - startTime) + " ms");
      allReady = true;
      for (PipeToFile pipeToFile : m_pipes) {
        if (pipeToFile.m_witnessedReady.get() != true) {
          try {
            // wait for explicit notification
            synchronized (pipeToFile) {
              pipeToFile.wait();
            }
          } catch (InterruptedException ex) {
            Logger.getLogger(LocalCluster.class.getName()).log(Level.SEVERE, null, ex);
          }
          allReady = false;
          break;
        }
      }
    } while (allReady == false);
    if (logtime)
      System.out.println(
          "********** post witness: " + (System.currentTimeMillis() - startTime) + " ms");

    // Finally, make sure the local server thread is running and wait if it is not.
    m_localServer.waitForInitialization();
    if (logtime)
      System.out.println("********** DONE: " + (System.currentTimeMillis() - startTime) + " ms");
    m_running = true;
  }
コード例 #23
0
ファイル: ArgumentsParser.java プロジェクト: kanghong/h-store
  /**
   * @param args
   * @throws Exception
   */
  @SuppressWarnings("unchecked")
  public void process(String[] args, String... required) throws Exception {
    final boolean debug = LOG.isDebugEnabled();

    if (debug) LOG.debug("Processing " + args.length + " parameters...");
    final Pattern p = Pattern.compile("=");
    for (int i = 0, cnt = args.length; i < cnt; i++) {
      final String arg = args[i];
      final String[] parts = p.split(arg, 2);
      if (parts[0].startsWith("-")) parts[0] = parts[0].substring(1);

      if (parts.length == 1) {
        if (parts[0].startsWith("${") == false) this.opt_params.add(parts[0]);
        continue;
      } else if (parts[0].equalsIgnoreCase("tag")) {
        continue;
      } else if (parts[1].startsWith("${") || parts[0].startsWith("#")) {
        continue;
      }
      if (debug) LOG.debug(String.format("%-35s = %s", parts[0], parts[1]));

      // DesignerHints Override
      if (parts[0].startsWith(PARAM_DESIGNER_HINTS_PREFIX)) {
        String param = parts[0].replace(PARAM_DESIGNER_HINTS_PREFIX, "").toLowerCase();
        try {
          Field f = DesignerHints.class.getField(param);
          this.hints_params.put(f.getName(), parts[1]);
          if (debug) LOG.debug(String.format("DesignerHints.%s = %s", param, parts[1]));
        } catch (NoSuchFieldException ex) {
          throw new Exception("Unknown DesignerHints parameter: " + param, ex);
        }

      }
      // HStoreConf Parameter
      else if (HStoreConf.isConfParameter(parts[0])) {
        this.conf_params.put(parts[0].toLowerCase(), parts[1]);
      }
      // ArgumentsParser Parameter
      else if (PARAMS.contains(parts[0].toLowerCase())) {
        this.params.put(parts[0].toLowerCase(), parts[1]);
      }
      // Invalid!
      else {
        String suggestions = "";
        i = 0;
        String end = CollectionUtil.last(parts[0].split("\\."));
        for (String param : PARAMS) {
          String param_end = CollectionUtil.last(param.split("\\."));
          if (param.startsWith(parts[0])
              || (end != null && param.endsWith(end))
              || (end != null && param_end != null && param_end.startsWith(end))) {
            if (suggestions.isEmpty()) suggestions = ". Possible Matches:";
            suggestions += String.format("\n [%02d] %s", ++i, param);
          }
        } // FOR
        throw new Exception("Unknown parameter '" + parts[0] + "'" + suggestions);
      }
    } // FOR

    // -------------------------------------------------------
    // CATALOGS
    // -------------------------------------------------------

    // Text File
    if (this.params.containsKey(PARAM_CATALOG)) {
      String path = this.params.get(PARAM_CATALOG);
      if (debug) LOG.debug("Loading catalog from file '" + path + "'");
      Catalog catalog = CatalogUtil.loadCatalog(path);
      if (catalog == null)
        throw new Exception("Failed to load catalog object from file '" + path + "'");
      this.updateCatalog(catalog, new File(path));
    }
    // Jar File
    else if (this.params.containsKey(PARAM_CATALOG_JAR)) {
      String path = this.params.get(PARAM_CATALOG_JAR);
      this.params.put(PARAM_CATALOG, path);
      File jar_file = new File(path);
      Catalog catalog = CatalogUtil.loadCatalogFromJar(path);
      if (catalog == null)
        throw new Exception("Failed to load catalog object from jar file '" + path + "'");
      if (debug) LOG.debug("Loaded catalog from jar file '" + path + "'");
      this.updateCatalog(catalog, jar_file);

      if (!this.params.containsKey(PARAM_CATALOG_TYPE)) {
        String jar_name = jar_file.getName();
        int jar_idx = jar_name.lastIndexOf(".jar");
        if (jar_idx != -1) {
          ProjectType type = ProjectType.get(jar_name.substring(0, jar_idx));
          if (type != null) {
            if (debug) LOG.debug("Set catalog type '" + type + "' from catalog jar file name");
            this.catalog_type = type;
            this.params.put(PARAM_CATALOG_TYPE, this.catalog_type.toString());
          }
        }
      }
    }
    // Schema File
    else if (this.params.containsKey(PARAM_CATALOG_SCHEMA)) {
      String path = this.params.get(PARAM_CATALOG_SCHEMA);
      Catalog catalog = CompilerUtil.compileCatalog(path);
      if (catalog == null) throw new Exception("Failed to load schema from '" + path + "'");
      if (debug) LOG.debug("Loaded catalog from schema file '" + path + "'");
      this.updateCatalog(catalog, new File(path));
    }

    // Catalog Type
    if (this.params.containsKey(PARAM_CATALOG_TYPE)) {
      String catalog_type = this.params.get(PARAM_CATALOG_TYPE);
      ProjectType type = ProjectType.get(catalog_type);
      if (type == null) {
        throw new Exception("Unknown catalog type '" + catalog_type + "'");
      }
      this.catalog_type = type;
    }

    // Update Cluster Configuration
    if (this.params.containsKey(ArgumentsParser.PARAM_CATALOG_HOSTS)) {
      ClusterConfiguration cc =
          new ClusterConfiguration(this.getParam(ArgumentsParser.PARAM_CATALOG_HOSTS));
      this.updateCatalog(FixCatalog.addHostInfo(this.catalog, cc), null);
    }

    // Check the requirements after loading the catalog, because some of the
    // above parameters will set the catalog one
    if (required != null && required.length > 0) this.require(required);

    // -------------------------------------------------------
    // PHYSICAL DESIGN COMPONENTS
    // -------------------------------------------------------
    if (this.params.containsKey(PARAM_PARTITION_PLAN)) {
      assert (this.catalog_db != null);
      File path = new File(this.params.get(PARAM_PARTITION_PLAN));
      boolean ignoreMissing =
          this.getBooleanParam(ArgumentsParser.PARAM_PARTITION_PLAN_IGNORE_MISSING, false);
      if (path.exists() || (path.exists() == false && ignoreMissing == false)) {
        if (debug) LOG.debug("Loading in partition plan from '" + path + "'");
        this.pplan = new PartitionPlan();
        this.pplan.load(path.getAbsolutePath(), this.catalog_db);

        // Apply!
        if (this.params.containsKey(PARAM_PARTITION_PLAN_APPLY)
            && this.getBooleanParam(PARAM_PARTITION_PLAN_APPLY)) {
          boolean secondaryIndexes =
              this.getBooleanParam(PARAM_PARTITION_PLAN_NO_SECONDARY, false) == false;
          LOG.info(
              String.format(
                  "Applying PartitionPlan '%s' to catalog [enableSecondary=%s]",
                  path.getName(), secondaryIndexes));
          this.pplan.apply(this.catalog_db, secondaryIndexes);
        }
      }
    }

    // -------------------------------------------------------
    // DESIGNER COMPONENTS
    // -------------------------------------------------------

    if (this.params.containsKey(PARAM_DESIGNER_THREADS)) {
      this.max_concurrent = Integer.valueOf(this.params.get(PARAM_DESIGNER_THREADS));
    }
    if (this.params.containsKey(PARAM_DESIGNER_INTERVALS)) {
      this.num_intervals = Integer.valueOf(this.params.get(PARAM_DESIGNER_INTERVALS));
    }
    if (this.params.containsKey(PARAM_DESIGNER_HINTS)) {
      String path = this.params.get(PARAM_DESIGNER_HINTS);
      if (debug)
        LOG.debug(
            "Loading in designer hints from '"
                + path
                + "'.\nForced Values:\n"
                + StringUtil.formatMaps(this.hints_params));
      this.designer_hints.load(path, catalog_db, this.hints_params);
    }
    if (this.params.containsKey(PARAM_DESIGNER_CHECKPOINT)) {
      this.designer_checkpoint = new File(this.params.get(PARAM_DESIGNER_CHECKPOINT));
    }

    String designer_attributes[] = {
      PARAM_DESIGNER_PARTITIONER,
      PARAM_DESIGNER_MAPPER,
      PARAM_DESIGNER_INDEXER,
      PARAM_DESIGNER_COSTMODEL
    };
    ClassLoader loader = ClassLoader.getSystemClassLoader();
    for (String key : designer_attributes) {
      if (this.params.containsKey(key)) {
        String target_name = this.params.get(key);
        Class<?> target_class = loader.loadClass(target_name);
        assert (target_class != null);
        if (debug) LOG.debug("Set " + key + " class to " + target_class.getName());

        if (key.equals(PARAM_DESIGNER_PARTITIONER)) {
          this.partitioner_class = (Class<? extends AbstractPartitioner>) target_class;
        } else if (key.equals(PARAM_DESIGNER_MAPPER)) {
          this.mapper_class = (Class<? extends AbstractMapper>) target_class;
        } else if (key.equals(PARAM_DESIGNER_INDEXER)) {
          this.indexer_class = (Class<? extends AbstractIndexSelector>) target_class;
        } else if (key.equals(PARAM_DESIGNER_COSTMODEL)) {
          this.costmodel_class = (Class<? extends AbstractCostModel>) target_class;

          // Special Case: TimeIntervalCostModel
          if (target_name.endsWith(TimeIntervalCostModel.class.getSimpleName())) {
            this.costmodel =
                new TimeIntervalCostModel<SingleSitedCostModel>(
                    this.catalog_db, SingleSitedCostModel.class, this.num_intervals);
          } else {
            this.costmodel =
                ClassUtil.newInstance(
                    this.costmodel_class,
                    new Object[] {this.catalog_db},
                    new Class[] {Database.class});
          }
        } else {
          assert (false) : "Invalid key '" + key + "'";
        }
      }
    } // FOR

    // -------------------------------------------------------
    // TRANSACTION ESTIMATION COMPONENTS
    // -------------------------------------------------------
    if (this.params.containsKey(PARAM_MAPPINGS)) {
      assert (this.catalog_db != null);
      File path = new File(this.params.get(PARAM_MAPPINGS));
      if (path.exists()) {
        this.param_mappings.load(path.getAbsolutePath(), this.catalog_db);
      } else {
        LOG.warn("The ParameterMappings file '" + path + "' does not exist");
      }
    }
    if (this.params.containsKey(PARAM_MARKOV_THRESHOLDS_VALUE)) {
      assert (this.catalog_db != null);
      float defaultValue = this.getDoubleParam(PARAM_MARKOV_THRESHOLDS_VALUE).floatValue();
      this.thresholds = new EstimationThresholds(defaultValue);
      this.params.put(PARAM_MARKOV_THRESHOLDS, this.thresholds.toString());
      LOG.debug("CREATED THRESHOLDS: " + this.thresholds);

    } else if (this.params.containsKey(PARAM_MARKOV_THRESHOLDS)) {
      assert (this.catalog_db != null);
      this.thresholds = new EstimationThresholds();
      File path = new File(this.params.get(PARAM_MARKOV_THRESHOLDS));
      if (path.exists()) {
        this.thresholds.load(path.getAbsolutePath(), this.catalog_db);
      } else {
        LOG.warn("The estimation thresholds file '" + path + "' does not exist");
      }
      LOG.debug("LOADED THRESHOLDS: " + this.thresholds);
    }

    // -------------------------------------------------------
    // HASHER
    // -------------------------------------------------------
    if (this.catalog != null) {
      if (this.params.containsKey(PARAM_HASHER_CLASS)) {
        String hasherClassName = this.params.get(PARAM_HASHER_CLASS);
        this.hasher_class = (Class<? extends AbstractHasher>) loader.loadClass(hasherClassName);
      }
      Constructor<? extends AbstractHasher> constructor =
          this.hasher_class.getConstructor(new Class[] {Database.class, Integer.class});
      int num_partitions = CatalogUtil.getNumberOfPartitions(this.catalog_db);
      this.hasher = constructor.newInstance(new Object[] {this.catalog_db, num_partitions});
      if (!(this.hasher instanceof DefaultHasher))
        LOG.debug("Loaded hasher " + this.hasher.getClass());

      if (this.params.containsKey(PARAM_HASHER_PROFILE)) {
        this.hasher.load(this.params.get(PARAM_HASHER_PROFILE), null);
      }
    }

    // -------------------------------------------------------
    // SAMPLE WORKLOAD TRACE
    // -------------------------------------------------------
    this.loadWorkload();
  }
コード例 #24
0
ファイル: TPCCSimulation.java プロジェクト: jsedgwick/h-store
  public TPCCSimulation(
      TPCCSimulation.ProcCaller client,
      RandomGenerator generator,
      Clock clock,
      ScaleParameters parameters,
      TPCCConfig config,
      double skewFactor,
      Catalog catalog) {
    assert parameters != null;
    this.client = client;
    this.generator = generator;
    this.clock = clock;
    this.parameters = parameters;
    this.affineWarehouse = lastAssignedWarehouseId;
    this.skewFactor = skewFactor;
    this.config = config;

    if (config.neworder_skew_warehouse) {
      if (debug.val) LOG.debug("Enabling W_ID Zipfian Skew: " + skewFactor);
      this.zipf =
          new RandomDistribution.Zipf(
              new Random(),
              parameters.starting_warehouse,
              parameters.last_warehouse + 1,
              Math.max(1.001d, this.skewFactor));

      this.custom_skew =
          new RandomDistribution.HotWarmCold(
              new Random(),
              parameters.starting_warehouse + 1,
              parameters.last_warehouse,
              TPCCConstants.HOT_DATA_WORKLOAD_SKEW,
              TPCCConstants.HOT_DATA_SIZE,
              TPCCConstants.WARM_DATA_WORKLOAD_SKEW,
              TPCCConstants.WARM_DATA_SIZE);
    }
    if (config.warehouse_debug) {
      LOG.info("Enabling WAREHOUSE debug mode");
    }

    lastAssignedWarehouseId += 1;
    if (lastAssignedWarehouseId > parameters.last_warehouse) lastAssignedWarehouseId = 1;

    if (debug.val) {
      LOG.debug(this.toString());
    }
    if (config.neworder_multip_remote) {
      synchronized (TPCCSimulation.class) {
        if (remoteWarehouseIds == null) {
          remoteWarehouseIds = new HashMap<Integer, List<Integer>>();
          HashMap<Integer, Integer> partitionToSite = new HashMap<Integer, Integer>();

          Database catalog_db = CatalogUtil.getDatabase(catalog);
          DefaultHasher hasher = new DefaultHasher(catalog_db);
          for (Site s : CatalogUtil.getCluster(catalog_db).getSites()) {
            for (Partition p : s.getPartitions()) partitionToSite.put(p.getId(), s.getId());
          } // FOR

          for (int w_id0 = parameters.starting_warehouse;
              w_id0 <= parameters.last_warehouse;
              w_id0++) {
            final int partition0 = hasher.hash(w_id0);
            final int site0 = partitionToSite.get(partition0);
            final List<Integer> rList = new ArrayList<Integer>();

            for (int w_id1 = parameters.starting_warehouse;
                w_id1 <= parameters.last_warehouse;
                w_id1++) {
              // Figure out what partition this W_ID maps to
              int partition1 = hasher.hash(w_id1);

              // Check whether this partition is on our same local site
              int site1 = partitionToSite.get(partition1);
              if (site0 != site1) rList.add(w_id1);
            } // FOR
            remoteWarehouseIds.put(w_id0, rList);
          } // FOR

          LOG.debug("NewOrder Remote W_ID Mapping\n" + StringUtil.formatMaps(remoteWarehouseIds));
        }
      } // SYNCH
    }
  }
コード例 #25
0
  public boolean compile(
      final VoltCompiler compiler,
      final String jarPath,
      final int sitesPerHost,
      final int hostCount,
      final int replication,
      final String leaderAddress) {
    assert (jarPath != null);
    assert (sitesPerHost >= 1);
    assert (hostCount >= 1);
    assert (leaderAddress != null);

    // this stuff could all be converted to org.voltdb.compiler.projectfile.*
    // jaxb objects and (WE ARE!) marshaled to XML. Just needs some elbow grease.

    DocumentBuilderFactory docFactory;
    DocumentBuilder docBuilder;
    Document doc;
    try {
      docFactory = DocumentBuilderFactory.newInstance();
      docBuilder = docFactory.newDocumentBuilder();
      doc = docBuilder.newDocument();
    } catch (final ParserConfigurationException e) {
      e.printStackTrace();
      return false;
    }

    // <project>
    final Element project = doc.createElement("project");
    doc.appendChild(project);

    // <security>
    final Element security = doc.createElement("security");
    security.setAttribute("enabled", Boolean.valueOf(m_securityEnabled).toString());
    project.appendChild(security);

    // <database>
    final Element database = doc.createElement("database");
    database.setAttribute("name", "database");
    database.setAttribute("project", this.project_name);
    project.appendChild(database);
    buildDatabaseElement(doc, database);

    // boilerplate to write this DOM object to file.
    StreamResult result;
    try {
      final Transformer transformer = TransformerFactory.newInstance().newTransformer();
      transformer.setOutputProperty(OutputKeys.INDENT, "yes");
      result = new StreamResult(new StringWriter());
      final DOMSource domSource = new DOMSource(doc);
      transformer.transform(domSource, result);
    } catch (final TransformerConfigurationException e) {
      e.printStackTrace();
      return false;
    } catch (final TransformerFactoryConfigurationError e) {
      e.printStackTrace();
      return false;
    } catch (final TransformerException e) {
      e.printStackTrace();
      return false;
    }

    //        String xml = result.getWriter().toString();
    //        System.out.println(xml);

    final File projectFile = writeStringToTempFile(result.getWriter().toString());
    final String projectPath = projectFile.getPath();
    LOG.debug("PROJECT XML: " + projectPath);

    ClusterConfig cc =
        (this.cluster_config.isEmpty()
            ? new ClusterConfig(hostCount, sitesPerHost, replication, leaderAddress)
            : this.cluster_config);
    final boolean success =
        compiler.compile(projectPath, cc, jarPath, m_compilerDebugPrintStream, m_procInfoOverrides);

    // HACK: If we have a ParameterMappingsSet that we need to apply
    // either from a file or a fixed mappings, then we have
    // to load the catalog into this JVM, apply the mappings, and then
    // update the jar file with the new catalog
    if (m_paramMappingsFile != null || m_paramMappings.isEmpty() == false) {
      File jarFile = new File(jarPath);
      Catalog catalog = CatalogUtil.loadCatalogFromJar(jarFile);
      assert (catalog != null);
      Database catalog_db = CatalogUtil.getDatabase(catalog);

      this.applyParameterMappings(catalog_db);

      // Construct a List of prefetchable Statements
      this.applyPrefetchableFlags(catalog_db);

      // Write it out!
      try {
        CatalogUtil.updateCatalogInJar(jarFile, catalog, m_paramMappingsFile);
      } catch (Exception ex) {
        String msg = "Failed to updated Catalog in jar file '" + jarPath + "'";
        throw new RuntimeException(msg, ex);
      }
    }

    return success;
  }
コード例 #26
0
ファイル: BaseTestCase.java プロジェクト: kanghong/h-store
 protected Cluster getCluster() {
   assertNotNull(catalog);
   Cluster catalog_clus = CatalogUtil.getCluster(catalog);
   assert (catalog_clus != null) : "Failed to retriever cluster object from catalog";
   return (catalog_clus);
 }
コード例 #27
0
ファイル: CatalogContext.java プロジェクト: sdhost/h-store
  public CatalogContext(Catalog catalog, File pathToCatalogJar) {
    // check the heck out of the given params in this immutable class
    assert (catalog != null);
    if (catalog == null) {
      throw new RuntimeException("Can't create CatalogContext with null catalog.");
    }

    this.jarPath = pathToCatalogJar;
    this.catalog = catalog;
    this.cluster = CatalogUtil.getCluster(this.catalog);
    this.database = CatalogUtil.getDatabase(this.catalog);
    this.hosts = this.cluster.getHosts();
    this.sites = this.cluster.getSites();

    if (this.jarPath != null) {
      this.catalogClassLoader = new JarClassLoader(this.jarPath.getAbsolutePath());
      this.paramMappings =
          ParametersUtil.getParameterMappingsSetFromJar(this.database, this.jarPath);
    } else {
      this.catalogClassLoader = null;
      this.paramMappings = null;
    }

    // ------------------------------------------------------------
    // PROCEDURES
    // ------------------------------------------------------------
    this.procedures = database.getProcedures();
    this.proceduresArray = new Procedure[this.procedures.size() + 1];
    for (Procedure proc : this.procedures) {
      this.proceduresArray[proc.getId()] = proc;
      if (proc.getSystemproc()) {
        this.sysProcedures.add(proc);
      } else if (proc.getMapreduce()) {
        this.mrProcedures.add(proc);
      } else {
        this.regularProcedures.add(proc);
      }
    } // FOR

    authSystem = new AuthSystem(database, cluster.getSecurityenabled());

    siteTracker = null; // new SiteTracker(cluster.getSites());

    // count nodes
    this.numberOfHosts = cluster.getHosts().size();

    // count exec sites
    this.numberOfSites = cluster.getSites().size();

    // ------------------------------------------------------------
    // PARTITIONS
    // ------------------------------------------------------------
    this.numberOfPartitions = cluster.getNum_partitions();
    this.partitions = new Partition[this.numberOfPartitions];
    this.partitionIdArray = new Integer[this.numberOfPartitions];
    this.partitionSingletons = new PartitionSet[this.numberOfPartitions];
    this.partitionSiteXref = new int[this.numberOfPartitions];
    for (Partition part : CatalogUtil.getAllPartitions(catalog)) {
      int p = part.getId();
      this.partitions[p] = part;
      this.partitionIdArray[p] = Integer.valueOf(p);
      this.partitionSingletons[p] = new PartitionSet(p);
      this.partitionIdCollection.add(this.partitionIdArray[p]);
      this.partitionSiteXref[part.getId()] = ((Site) part.getParent()).getId();
    } // FOR

    // ------------------------------------------------------------
    // TABLES
    // ------------------------------------------------------------
    for (Table tbl : database.getTables()) {
      if (tbl.getSystable()) {
        sysTables.add(tbl);
      } else if (tbl.getMapreduce()) {
        mapReduceTables.add(tbl);
      } else if (tbl.getMaterializer() != null) {
        viewTables.add(tbl);
      } else {
        dataTables.add(tbl);
        if (tbl.getIsreplicated()) {
          replicatedTables.add(tbl);
        }
        if (tbl.getEvictable()) {
          evictableTables.add(tbl);
        }
      }
    } // FOR

    // PLANFRAGMENTS
    this.initPlanFragments();
  }
コード例 #28
0
  /**
   * Add two disjoint partitions and third that touches all partitions Two come out right away and
   * get marked as done Third doesn't come out until everyone else is done
   *
   * @throws InterruptedException
   */
  @Test
  public void testDisjointTransactions() throws InterruptedException {
    final long txn_id0 = 1000;
    final long txn_id1 = 2000;
    final long txn_id2 = 3000;
    Collection<Integer> partitions0 = new HashSet<Integer>();
    partitions0.add(0);
    partitions0.add(2);
    Collection<Integer> partitions1 = new HashSet<Integer>();
    partitions1.add(1);
    partitions1.add(3);
    Collection<Integer> partitions2 = CatalogUtil.getAllPartitionIds(catalog_db);

    final MockCallback inner_callback0 = new MockCallback();
    TransactionInitWrapperCallback outer_callback0 =
        new TransactionInitWrapperCallback(hstore_site);
    outer_callback0.init(txn_id0, partitions0, inner_callback0);

    final MockCallback inner_callback1 = new MockCallback();
    TransactionInitWrapperCallback outer_callback1 =
        new TransactionInitWrapperCallback(hstore_site);
    outer_callback1.init(txn_id1, partitions1, inner_callback1);

    final MockCallback inner_callback2 = new MockCallback();
    TransactionInitWrapperCallback outer_callback2 =
        new TransactionInitWrapperCallback(hstore_site);
    outer_callback2.init(txn_id2, partitions2, inner_callback2);

    this.queue.insert(txn_id0, partitions0, outer_callback0);
    this.queue.insert(txn_id1, partitions1, outer_callback1);

    // create another thread to get the locks in order
    Thread t =
        new Thread() {
          public void run() {
            try {
              inner_callback0.lock.acquire();
              for (int partition = 0; partition < NUM_PARTITONS; ++partition) {
                queue.finished(txn_id0, Status.OK, partition);
              }
            } catch (InterruptedException e) {
            }
            try {
              inner_callback1.lock.acquire();
              for (int partition = 0; partition < NUM_PARTITONS; ++partition) {
                queue.finished(txn_id1, Status.OK, partition);
              }
            } catch (InterruptedException e) {
            }
            try {
              inner_callback2.lock.acquire();
              for (int partition = 0; partition < NUM_PARTITONS; ++partition) {
                queue.finished(txn_id2, Status.OK, partition);
              }
            } catch (InterruptedException e) {
            }
          }
        };
    t.start();

    // both of the first two disjoint txns should be released on the same call to checkQueues()
    while (queue.checkQueues() == false) {
      ThreadUtil.sleep(10);
    }
    assertTrue(queue.isEmpty());

    // add the third txn and wait for it
    this.queue.insert(txn_id2, partitions2, outer_callback2);
    while (queue.isEmpty() == false) {
      queue.checkQueues();
      ThreadUtil.sleep(10);
    }

    // wait for all the locks to be acquired
    t.join();
  }
コード例 #29
0
  /**
   * MAIN!
   *
   * @param vargs
   * @throws Exception
   */
  public static void main(String[] vargs) throws Exception {
    ArgumentsParser args = ArgumentsParser.load(vargs);
    args.require(
        ArgumentsParser.PARAM_CATALOG,
        ArgumentsParser.PARAM_WORKLOAD,
        ArgumentsParser.PARAM_PARTITION_PLAN,
        ArgumentsParser.PARAM_DESIGNER_INTERVALS
        // ArgumentsParser.PARAM_DESIGNER_HINTS
        );
    assert (args.workload.getTransactionCount() > 0)
        : "No transactions were loaded from " + args.workload;

    if (args.hasParam(ArgumentsParser.PARAM_CATALOG_HOSTS)) {
      ClusterConfiguration cc =
          new ClusterConfiguration(args.getParam(ArgumentsParser.PARAM_CATALOG_HOSTS));
      args.updateCatalog(FixCatalog.cloneCatalog(args.catalog, cc), null);
    }

    // If given a PartitionPlan, then update the catalog
    File pplan_path = new File(args.getParam(ArgumentsParser.PARAM_PARTITION_PLAN));
    if (pplan_path.exists()) {
      PartitionPlan pplan = new PartitionPlan();
      pplan.load(pplan_path, args.catalog_db);
      if (args.getBooleanParam(ArgumentsParser.PARAM_PARTITION_PLAN_REMOVE_PROCS, false)) {
        for (Procedure catalog_proc : pplan.proc_entries.keySet()) {
          pplan.setNullProcParameter(catalog_proc);
        } // FOR
      }
      if (args.getBooleanParam(ArgumentsParser.PARAM_PARTITION_PLAN_RANDOM_PROCS, false)) {
        for (Procedure catalog_proc : pplan.proc_entries.keySet()) {
          pplan.setRandomProcParameter(catalog_proc);
        } // FOR
      }
      pplan.apply(args.catalog_db);
      System.out.println("Applied PartitionPlan '" + pplan_path + "' to catalog\n" + pplan);
      System.out.print(StringUtil.DOUBLE_LINE);

      if (args.hasParam(ArgumentsParser.PARAM_PARTITION_PLAN_OUTPUT)) {
        String output = args.getParam(ArgumentsParser.PARAM_PARTITION_PLAN_OUTPUT);
        if (output.equals("-")) output = pplan_path.getAbsolutePath();
        pplan.save(new File(output));
        System.out.println("Saved PartitionPlan to '" + output + "'");
      }
    } else {
      System.err.println("PartitionPlan file '" + pplan_path + "' does not exist. Ignoring...");
    }
    System.out.flush();

    int num_intervals =
        args.num_intervals; // getIntParam(ArgumentsParser.PARAM_DESIGNER_INTERVALS);
    TimeIntervalCostModel<SingleSitedCostModel> costmodel =
        new TimeIntervalCostModel<SingleSitedCostModel>(
            args.catalogContext, SingleSitedCostModel.class, num_intervals);
    if (args.hasParam(ArgumentsParser.PARAM_DESIGNER_HINTS))
      costmodel.applyDesignerHints(args.designer_hints);
    double cost = costmodel.estimateWorkloadCost(args.catalogContext, args.workload);

    Map<String, Object> m = new LinkedHashMap<String, Object>();
    m.put("PARTITIONS", CatalogUtil.getNumberOfPartitions(args.catalog_db));
    m.put("INTERVALS", args.num_intervals);
    m.put("EXEC COST", costmodel.last_execution_cost);
    m.put("SKEW COST", costmodel.last_skew_cost);
    m.put("TOTAL COST", cost);
    m.put("PARTITIONS TOUCHED", costmodel.getTxnPartitionAccessHistogram().getSampleCount());
    System.out.println(StringUtil.formatMaps(m));

    // long total = 0;
    m.clear();
    // for (int i = 0; i < num_intervals; i++) {
    // SingleSitedCostModel cm = costmodel.getCostModel(i);
    // Histogram<Integer> h = cm.getTxnPartitionAccessHistogram();
    // m.put(String.format("Interval %02d", i),
    // cm.getTxnPartitionAccessHistogram());
    // total += h.getSampleCount();
    // h.setKeepZeroEntries(true);
    // for (Integer partition :
    // CatalogUtil.getAllPartitionIds(args.catalog_db)) {
    // if (h.contains(partition) == false) h.put(partition, 0);
    // }
    // System.out.println(StringUtil.box("Interval #" + i, "+", 100) + "\n"
    // + h);
    // System.out.println();
    // } // FOR
    // System.out.println(StringUtil.formatMaps(m));
    // System.err.println("TOTAL: " + total);

  }