@Override protected void setUp() throws Exception { super.setUp(ProjectType.TPCC); addPartitions(NUM_PARTITONS); Site catalog_site = CollectionUtil.first(CatalogUtil.getCluster(catalog).getSites()); assertNotNull(catalog_site); hstore_site = HStore.initialize(catalog_site, HStoreConf.singleton()); for (int p = 0; p < NUM_PARTITONS; p++) { PartitionExecutor site = new MockPartitionExecutor(p, catalog, p_estimator); hstore_site.addPartitionExecutor(p, site); } // FOR this.queue = new TransactionQueueManager(hstore_site); }
/** * Add fake partitions to the loaded catalog Assuming that there is one partition per site * * @param num_partitions */ protected void addPartitions(int num_partitions) throws Exception { // HACK! If we already have this many partitions in the catalog, then we won't recreate it // This fixes problems where we need to reference the same catalog objects in multiple test // cases if (CatalogUtil.getNumberOfPartitions(catalog_db) != num_partitions) { ClusterConfiguration cc = new ClusterConfiguration(); for (Integer i = 0; i < num_partitions; i++) { cc.addPartition("localhost", 0, i); // System.err.println("[" + i + "] " + Arrays.toString(triplets.lastElement())); } // FOR catalog = FixCatalog.addHostInfo(catalog, cc); this.init(this.last_type, catalog); } Cluster cluster = CatalogUtil.getCluster(catalog_db); assertEquals(num_partitions, cluster.getNum_partitions()); assertEquals(num_partitions, CatalogUtil.getNumberOfPartitions(cluster)); }
protected void initializeCluster(int num_hosts, int num_sites, int num_partitions) throws Exception { // HACK! If we already have this many partitions in the catalog, then we won't recreate it // This fixes problems where we need to reference the same catalog objects in multiple test // cases if (CatalogUtil.getNumberOfHosts(catalog_db) != num_hosts || CatalogUtil.getNumberOfSites(catalog_db) != (num_hosts * num_sites) || CatalogUtil.getNumberOfPartitions(catalog_db) != (num_hosts * num_sites * num_partitions)) { catalog = FixCatalog.addHostInfo(catalog, "localhost", num_hosts, num_sites, num_partitions); this.init(this.last_type, catalog); } Cluster cluster = CatalogUtil.getCluster(catalog_db); assertEquals(num_hosts, CatalogUtil.getNumberOfHosts(catalog_db)); assertEquals((num_hosts * num_sites), CatalogUtil.getNumberOfSites(catalog_db)); assertEquals( (num_hosts * num_sites * num_partitions), CatalogUtil.getNumberOfPartitions(cluster)); assertEquals((num_hosts * num_sites * num_partitions), cluster.getNum_partitions()); }
public TPCCSimulation( TPCCSimulation.ProcCaller client, RandomGenerator generator, Clock clock, ScaleParameters parameters, TPCCConfig config, double skewFactor, Catalog catalog) { assert parameters != null; this.client = client; this.generator = generator; this.clock = clock; this.parameters = parameters; this.affineWarehouse = lastAssignedWarehouseId; this.skewFactor = skewFactor; this.config = config; if (config.neworder_skew_warehouse) { if (debug.val) LOG.debug("Enabling W_ID Zipfian Skew: " + skewFactor); this.zipf = new RandomDistribution.Zipf( new Random(), parameters.starting_warehouse, parameters.last_warehouse + 1, Math.max(1.001d, this.skewFactor)); this.custom_skew = new RandomDistribution.HotWarmCold( new Random(), parameters.starting_warehouse + 1, parameters.last_warehouse, TPCCConstants.HOT_DATA_WORKLOAD_SKEW, TPCCConstants.HOT_DATA_SIZE, TPCCConstants.WARM_DATA_WORKLOAD_SKEW, TPCCConstants.WARM_DATA_SIZE); } if (config.warehouse_debug) { LOG.info("Enabling WAREHOUSE debug mode"); } lastAssignedWarehouseId += 1; if (lastAssignedWarehouseId > parameters.last_warehouse) lastAssignedWarehouseId = 1; if (debug.val) { LOG.debug(this.toString()); } if (config.neworder_multip_remote) { synchronized (TPCCSimulation.class) { if (remoteWarehouseIds == null) { remoteWarehouseIds = new HashMap<Integer, List<Integer>>(); HashMap<Integer, Integer> partitionToSite = new HashMap<Integer, Integer>(); Database catalog_db = CatalogUtil.getDatabase(catalog); DefaultHasher hasher = new DefaultHasher(catalog_db); for (Site s : CatalogUtil.getCluster(catalog_db).getSites()) { for (Partition p : s.getPartitions()) partitionToSite.put(p.getId(), s.getId()); } // FOR for (int w_id0 = parameters.starting_warehouse; w_id0 <= parameters.last_warehouse; w_id0++) { final int partition0 = hasher.hash(w_id0); final int site0 = partitionToSite.get(partition0); final List<Integer> rList = new ArrayList<Integer>(); for (int w_id1 = parameters.starting_warehouse; w_id1 <= parameters.last_warehouse; w_id1++) { // Figure out what partition this W_ID maps to int partition1 = hasher.hash(w_id1); // Check whether this partition is on our same local site int site1 = partitionToSite.get(partition1); if (site0 != site1) rList.add(w_id1); } // FOR remoteWarehouseIds.put(w_id0, rList); } // FOR LOG.debug("NewOrder Remote W_ID Mapping\n" + StringUtil.formatMaps(remoteWarehouseIds)); } } // SYNCH } }
public CatalogContext(Catalog catalog, File pathToCatalogJar) { // check the heck out of the given params in this immutable class assert (catalog != null); if (catalog == null) { throw new RuntimeException("Can't create CatalogContext with null catalog."); } this.jarPath = pathToCatalogJar; this.catalog = catalog; this.cluster = CatalogUtil.getCluster(this.catalog); this.database = CatalogUtil.getDatabase(this.catalog); this.hosts = this.cluster.getHosts(); this.sites = this.cluster.getSites(); if (this.jarPath != null) { this.catalogClassLoader = new JarClassLoader(this.jarPath.getAbsolutePath()); this.paramMappings = ParametersUtil.getParameterMappingsSetFromJar(this.database, this.jarPath); } else { this.catalogClassLoader = null; this.paramMappings = null; } // ------------------------------------------------------------ // PROCEDURES // ------------------------------------------------------------ this.procedures = database.getProcedures(); this.proceduresArray = new Procedure[this.procedures.size() + 1]; for (Procedure proc : this.procedures) { this.proceduresArray[proc.getId()] = proc; if (proc.getSystemproc()) { this.sysProcedures.add(proc); } else if (proc.getMapreduce()) { this.mrProcedures.add(proc); } else { this.regularProcedures.add(proc); } } // FOR authSystem = new AuthSystem(database, cluster.getSecurityenabled()); siteTracker = null; // new SiteTracker(cluster.getSites()); // count nodes this.numberOfHosts = cluster.getHosts().size(); // count exec sites this.numberOfSites = cluster.getSites().size(); // ------------------------------------------------------------ // PARTITIONS // ------------------------------------------------------------ this.numberOfPartitions = cluster.getNum_partitions(); this.partitions = new Partition[this.numberOfPartitions]; this.partitionIdArray = new Integer[this.numberOfPartitions]; this.partitionSingletons = new PartitionSet[this.numberOfPartitions]; this.partitionSiteXref = new int[this.numberOfPartitions]; for (Partition part : CatalogUtil.getAllPartitions(catalog)) { int p = part.getId(); this.partitions[p] = part; this.partitionIdArray[p] = Integer.valueOf(p); this.partitionSingletons[p] = new PartitionSet(p); this.partitionIdCollection.add(this.partitionIdArray[p]); this.partitionSiteXref[part.getId()] = ((Site) part.getParent()).getId(); } // FOR // ------------------------------------------------------------ // TABLES // ------------------------------------------------------------ for (Table tbl : database.getTables()) { if (tbl.getSystable()) { sysTables.add(tbl); } else if (tbl.getMapreduce()) { mapReduceTables.add(tbl); } else if (tbl.getMaterializer() != null) { viewTables.add(tbl); } else { dataTables.add(tbl); if (tbl.getIsreplicated()) { replicatedTables.add(tbl); } if (tbl.getEvictable()) { evictableTables.add(tbl); } } } // FOR // PLANFRAGMENTS this.initPlanFragments(); }
protected Cluster getCluster() { assertNotNull(catalog); Cluster catalog_clus = CatalogUtil.getCluster(catalog); assert (catalog_clus != null) : "Failed to retriever cluster object from catalog"; return (catalog_clus); }