public String toString() { StringBuilder buffer = new StringBuilder(); buffer.append("Hosts: ").append(this.getHosts().size()).append("\n"); buffer.append("Sites: ").append(this.sites.size()).append("\n"); buffer.append("Fragments: ").append(this.fragments.size()).append("\n"); SortedSet<String> hosts = new TreeSet<String>(); hosts.addAll(this.getHosts()); for (String host_key : hosts) { buffer.append("\nHost ").append(CatalogKey.getNameFromKey(host_key)).append("\n"); for (SiteEntry site : this.host_site_xref.get(host_key)) { buffer.append(SPACER).append("Site ").append(site.getId()).append("\n"); for (FragmentEntry fragment : site.getFragments()) { buffer .append(SPACER) .append(SPACER) .append("Fragment ") .append(fragment) .append(" Size=") .append(fragment.getEstimatedSize()) .append(" Heat=") .append(fragment.getEstimatedHeat()) .append("\n"); } // FOR } // FOR buffer.append("--------------------"); } // FOR return (buffer.toString()); }
public FragmentEntry getFragment(Table catalog_tbl, int hash) { String table_key = CatalogKey.createKey(catalog_tbl); for (FragmentEntry fragment : this.fragments) { if (fragment.getTableKey().equals(table_key) && fragment.getHashKey() == hash) { return (fragment); } } // FOR return (null); }
public Set<Host> getHosts(Database catalog_db) { Set<Host> hosts = new HashSet<Host>(); for (String host_key : this.getHosts()) { Host catalog_host = CatalogKey.getFromKey(catalog_db, host_key, Host.class); assert (catalog_host != null); hosts.add(catalog_host); } // FOR return (hosts); }
/** * Assign the SiteEntry to a particular Host * * @param catalog_host * @param site */ public void assign(Host catalog_host, SiteEntry site) { assert (catalog_host != null); assert (site != null); if (site.getHostKey() != null) { this.host_site_xref.get(site.getHostKey()).remove(site); } String host_key = CatalogKey.createKey(catalog_host); site.setHostKey(host_key); if (!this.host_site_xref.containsKey(host_key)) { this.host_site_xref.put(host_key, new TreeSet<SiteEntry>()); } this.host_site_xref.get(host_key).add(site); this.site_id_xref.put(site.getId(), site); this.sites.add(site); }
@Override public void process(Pair<TransactionTrace, Integer> p) { assert (p != null); final TransactionTrace txn_trace = p.getFirst(); final int i = p.getSecond(); // Interval final int txn_weight = (use_txn_weights ? txn_trace.getWeight() : 1); final String proc_key = CatalogKey.createKey(CatalogUtil.DEFAULT_DATABASE_NAME, txn_trace.getCatalogItemName()); // Terrible Hack: Assume that we are using the SingleSitedCostModel // and that // it will return fixed values based on whether the txn is // single-partitioned or not SingleSitedCostModel singlesited_cost_model = (SingleSitedCostModel) cost_models[i]; total_interval_txns[i] += txn_weight; total_interval_queries[i] += (txn_trace.getQueryCount() * txn_weight); histogram_procs.put(proc_key, txn_weight); try { singlesited_cost_model.estimateTransactionCost(catalogContext, workload, filter, txn_trace); TransactionCacheEntry txn_entry = singlesited_cost_model.getTransactionCacheEntry(txn_trace); assert (txn_entry != null) : "No txn entry for " + txn_trace; Collection<Integer> partitions = txn_entry.getTouchedPartitions(); // If the txn runs on only one partition, then the cost is // nothing if (txn_entry.isSinglePartitioned()) { singlepartition_ctrs[i] += txn_weight; if (!partitions.isEmpty()) { assert (txn_entry.getAllTouchedPartitionsHistogram().getValueCount() == 1) : txn_entry + " says it was single-partitioned but the partition count says otherwise:\n" + txn_entry.debug(); singlepartition_with_partitions_ctrs[i] += txn_weight; } histogram_sp_procs.put(proc_key, txn_weight); // If the txn runs on multiple partitions, then the cost // is... // XXX 2010-06-28: The number of partitions that the txn // touches divided by the total number of partitions // XXX 2010-07-02: The histogram for the total number of // partitions touched by all of the queries // in the transaction. This ensures that txns with just one // multi-partition query // isn't weighted the same as a txn with many // multi-partition queries } else { assert (!partitions.isEmpty()) : "No touched partitions for " + txn_trace; if (partitions.size() == 1 && txn_entry.getExecutionPartition() != HStoreConstants.NULL_PARTITION_ID) { assert (CollectionUtil.first(partitions) != txn_entry.getExecutionPartition()) : txn_entry.debug(); exec_mismatch_ctrs[i] += txn_weight; partitions_touched[i] += txn_weight; } else { assert (partitions.size() > 1) : String.format( "%s is not marked as single-partition but it only touches one partition\n%s", txn_trace, txn_entry.debug()); } partitions_touched[i] += (partitions.size() * txn_weight); // Txns multipartition_ctrs[i] += txn_weight; histogram_mp_procs.put(proc_key, txn_weight); } Integer base_partition = txn_entry.getExecutionPartition(); if (base_partition != null) { exec_histogram[i].put(base_partition, txn_weight); } else { exec_histogram[i].put(all_partitions, txn_weight); } if (debug.val) { // && // txn_trace.getCatalogItemName().equalsIgnoreCase("DeleteCallForwarding")) // { Procedure catalog_proc = txn_trace.getCatalogItem(catalogContext.database); Map<String, Object> inner = new LinkedHashMap<String, Object>(); for (Statement catalog_stmt : catalog_proc.getStatements()) { inner.put(catalog_stmt.fullName(), CatalogUtil.getReferencedTables(catalog_stmt)); } Map<String, Object> m = new LinkedHashMap<String, Object>(); m.put(txn_trace.toString(), null); m.put("Interval", i); m.put("Single-Partition", txn_entry.isSinglePartitioned()); m.put("Base Partition", base_partition); m.put("Touched Partitions", partitions); m.put(catalog_proc.fullName(), inner); LOG.debug(StringUtil.formatMaps(m)); } // We need to keep a count of the number txns that didn't have // all of its queries estimated // completely so that we can update the access histograms down // below for entropy calculations // Note that this is at the txn level, not the query level. if (!txn_entry.isComplete()) { incomplete_txn_ctrs[i] += txn_weight; tmp_missingPartitions.clear(); tmp_missingPartitions.addAll(all_partitions); tmp_missingPartitions.removeAll(txn_entry.getTouchedPartitions()); // Update the histogram for this interval to keep track of // how many times we need to // increase the partition access histogram incomplete_txn_histogram[i].put(tmp_missingPartitions, txn_weight); if (trace.val) { Map<String, Object> m = new LinkedHashMap<String, Object>(); m.put(String.format("Marking %s as incomplete in interval #%d", txn_trace, i), null); m.put("Examined Queries", txn_entry.getExaminedQueryCount()); m.put("Total Queries", txn_entry.getTotalQueryCount()); m.put("Touched Partitions", txn_entry.getTouchedPartitions()); m.put("Missing Partitions", tmp_missingPartitions); LOG.trace(StringUtil.formatMaps(m)); } } } catch (Exception ex) { CatalogUtil.saveCatalog(catalogContext.catalog, CatalogUtil.CATALOG_FILENAME); throw new RuntimeException( "Failed to estimate cost for " + txn_trace.getCatalogItemName() + " at interval " + i, ex); } }