Example #1
0
  @SuppressWarnings("unchecked")
  protected Map<String, Object>[] getDebugMaps() {
    List<Map<String, Object>> maps = new ArrayList<Map<String, Object>>();
    Map<String, Object> m;

    m = new LinkedHashMap<String, Object>();
    m.put("Transaction Id", this.txn_id);
    m.put("Procedure", this.catalog_proc);
    m.put("Base Partition", this.base_partition);
    m.put("Hash Code", this.hashCode());
    m.put("Pending Error", this.pending_error);
    maps.add(m);

    // Predictions
    m = new LinkedHashMap<String, Object>();
    m.put("Predict Single-Partitioned", this.predict_singlePartition);
    m.put("Predict Touched Partitions", this.getPredictTouchedPartitions());
    m.put("Predict Read Only", this.isPredictReadOnly());
    m.put("Predict Abortable", this.isPredictAbortable());
    maps.add(m);

    // Global State
    m = new LinkedHashMap<String, Object>();
    m.put("Marked Released", PartitionSet.toString(this.released));
    m.put("Marked Prepared", PartitionSet.toString(this.prepared));
    m.put("Marked Finished", PartitionSet.toString(this.finished));
    m.put("Marked Deletable", this.checkDeletableFlag());
    maps.add(m);

    // Prefetch State
    if (this.prefetch != null) {
      m = new LinkedHashMap<String, Object>();
      m.put("Prefetch Partitions", this.prefetch.partitions);
      m.put("Prefetch Fragments", StringUtil.join("\n", this.prefetch.fragments));
      m.put("Prefetch Parameters", StringUtil.join("\n", this.prefetch.params));
      m.put("Prefetch Raw Parameters", StringUtil.join("\n", this.prefetch.paramsRaw));
      maps.add(m);
    }

    // Partition Execution State
    m = new LinkedHashMap<String, Object>();
    m.put("Current Round State", Arrays.toString(this.round_state));
    m.put("Exec Read-Only", PartitionSet.toString(this.exec_readOnly));
    m.put("First UndoToken", Arrays.toString(this.exec_firstUndoToken));
    m.put("Last UndoToken", Arrays.toString(this.exec_lastUndoToken));
    m.put("No Undo Buffer", PartitionSet.toString(this.exec_noUndoBuffer));
    m.put("# of Rounds", Arrays.toString(this.round_ctr));
    m.put("Executed Work", PartitionSet.toString(this.exec_eeWork));
    maps.add(m);

    return ((Map<String, Object>[]) maps.toArray(new Map[0]));
  }
Example #2
0
 /**
  * Call VoltDB.crashVoltDB on behalf of the EE
  *
  * @param reason Reason the EE crashed
  */
 public static void crashVoltDB(String reason, String traces[], String filename, int lineno) {
   if (reason != null) {
     LOG.fatal("ExecutionEngine requested that we crash: " + reason);
     LOG.fatal("Error was in " + filename + ":" + lineno + "\n" + StringUtil.join("\n", traces));
   }
   VoltDB.crashVoltDB();
 }
  /** testLoadAirportFlights */
  public void testLoadAirportFlights() throws Exception {
    Map<String, Histogram<String>> histograms =
        SEATSHistogramUtil.loadAirportFlights(AIRLINE_DATA_DIR);
    assertNotNull(histograms);
    assertFalse(histograms.isEmpty());
    assert (histograms.size() >= 200);

    // Just some airports that we expect to be in there
    String airports[] = {"BWI", "LAX", "JFK", "MDW", "ATL", "SFO", "ORD"};
    for (String a : airports) {
      assertTrue(a, histograms.containsKey(a));
    } // FOR

    System.err.println(StringUtil.formatMaps(histograms));

    // We expect ATL to be the max
    //        assertEquals("ATL", histogram.getMaxCountValue());

    // Make sure the values are formatted correctly
    //        ListOrderedMap<String, Histogram<String>> m = new ListOrderedMap<String,
    // Histogram<String>>();
    Pattern p = Pattern.compile("[\\d\\w]{3,3}");
    for (String s_airport : histograms.keySet()) {
      assert (p.matcher(s_airport).matches()) : "Invalid source airport: " + s_airport;
      //            m.put(s_airport, histograms.get(s_airport));
      for (Object value : histograms.get(s_airport).values()) {
        assert (p.matcher(value.toString()).matches()) : "Invalid destination airport: " + value;
      } // FOR
    } // FOR
    //        System.err.println(StringUtil.formatMaps(m));
  }
Example #4
0
  public synchronized void tick(int counter) {
    this.tick_counter = counter;
    if (config.warehouse_debug) {
      Map<String, Histogram<Integer>> m = new ListOrderedMap<String, Histogram<Integer>>();
      m.put(
          String.format(
              "LAST ROUND\n - SampleCount=%d", this.lastWarehouseHistory.getSampleCount()),
          this.lastWarehouseHistory);
      m.put(
          String.format("TOTAL\n - SampleCount=%d", this.totalWarehouseHistory.getSampleCount()),
          this.totalWarehouseHistory);

      long total = this.totalWarehouseHistory.getSampleCount();
      LOG.info(
          String.format(
              "ROUND #%02d - Warehouse Temporal Skew - %d / %d [%.2f]\n%s",
              this.tick_counter,
              this.temporal_counter,
              total,
              (this.temporal_counter / (double) total),
              StringUtil.formatMaps(m)));
      LOG.info(StringUtil.SINGLE_LINE);
      this.lastWarehouseHistory.clearValues();
    }
  }
Example #5
0
 /**
  * Called by Java to store dependencies for the EE. Creates a private list of dependencies to be
  * manipulated by the tracker. Does not copy the table data - references WorkUnit's tables.
  *
  * @param dependencies
  */
 public void stashWorkUnitDependencies(final Map<Integer, List<VoltTable>> dependencies) {
   if (d)
     LOG.debug(
         String.format(
             "Stashing %d InputDependencies:\n%s",
             dependencies.size(), StringUtil.formatMaps(dependencies)));
   m_dependencyTracker.trackNewWorkUnit(dependencies);
 }
 private Map<String, Long> getCodeXref(String col_name) {
   Map<String, Long> m = this.code_id_xref.get(col_name);
   assert (m != null) : "Invalid code xref mapping column '" + col_name + "'";
   assert (m.isEmpty() == false)
       : "Empty code xref mapping for column '"
           + col_name
           + "'\n"
           + StringUtil.formatMaps(this.code_id_xref);
   return (m);
 }
 public String debug() {
   Map<String, Object> m = new LinkedHashMap<String, Object>();
   m.put("PartitionId", this.partitionId);
   m.put("# of Elements", this.size());
   m.put("Wait Time", this.waitTime);
   m.put("Next Time Remaining", Math.max(0, EstTime.currentTimeMillis() - this.blockTime));
   m.put("Next Txn", this.nextTxn);
   m.put("Last Popped Txn", this.lastTxnPopped);
   m.put("Last Seen Txn", this.lastSeenTxn);
   return (StringUtil.formatMaps(m));
 }
  protected QueueState checkQueueState() {
    QueueState newState = QueueState.UNBLOCKED;
    AbstractTransaction ts = super.peek();
    if (ts == null) {
      if (t) LOG.trace(String.format("Partition %d :: Queue is empty.", this.partitionId));
      newState = QueueState.BLOCKED_EMPTY;
    }
    // Check whether can unblock now
    else if (ts == this.nextTxn && this.state != QueueState.UNBLOCKED) {
      if (EstTime.currentTimeMillis() < this.blockTime) {
        newState = QueueState.BLOCKED_SAFETY;
      } else if (d) {
        LOG.debug(
            String.format(
                "Partition %d :: Wait time for %s has passed. Unblocking...",
                this.partitionId, this.nextTxn));
      }
    }
    // This is a new txn and we should wait...
    else if (this.nextTxn != ts) {
      long txnTimestamp =
          TransactionIdManager.getTimestampFromTransactionId(ts.getTransactionId().longValue());
      long timestamp = EstTime.currentTimeMillis();
      long waitTime = Math.max(0, this.waitTime - (timestamp - txnTimestamp));
      newState = (waitTime > 0 ? QueueState.BLOCKED_SAFETY : QueueState.UNBLOCKED);
      this.blockTime = timestamp + waitTime;
      this.nextTxn = ts;

      if (d) {
        String debug = "";
        if (t) {
          Map<String, Object> m = new LinkedHashMap<String, Object>();
          m.put("Txn Init Timestamp", txnTimestamp);
          m.put("Current Timestamp", timestamp);
          m.put("Block Time Remaining", (this.blockTime - timestamp));
          debug = "\n" + StringUtil.formatMaps(m);
        }
        LOG.debug(
            String.format(
                "Partition %d :: Blocking %s for %d ms [maxWait=%d]%s",
                this.partitionId, ts, (this.blockTime - timestamp), this.waitTime, debug));
      }
    }

    if (newState != this.state) {
      this.state = newState;
      if (d)
        LOG.debug(
            String.format(
                "Partition %d :: State:%s / NextTxn:%s",
                this.partitionId, this.state, this.nextTxn));
    }
    return this.state;
  }
 @Override
 public String toString() {
   Map<String, Object> m = new ListOrderedMap<String, Object>();
   m.put("Scale Factor", this.scale_factor);
   m.put("# of Reservations", this.num_reservations);
   m.put("Flight Start Date", this.flight_start_date);
   m.put("Flight Upcoming Date", this.flight_upcoming_date);
   m.put("Flight Past Days", this.flight_past_days);
   m.put("Flight Future Days", this.flight_future_days);
   m.put("Num Flights", this.num_flights);
   m.put("Num Customers", this.num_customers);
   m.put("Num Reservations", this.num_reservations);
   return (StringUtil.formatMaps(m));
 }
Example #10
0
 @Override
 public String toString() {
   Map<String, Object> m = new ListOrderedMap<String, Object>();
   m.put("Warehouses", parameters.warehouses);
   m.put(
       "W_ID Range",
       String.format("[%d, %d]", parameters.starting_warehouse, parameters.last_warehouse));
   m.put("Districts per Warehouse", parameters.districtsPerWarehouse);
   m.put("Custers per District", parameters.customersPerDistrict);
   m.put("Initial Orders per District", parameters.newOrdersPerDistrict);
   m.put("Items", parameters.items);
   m.put("Affine Warehouse", lastAssignedWarehouseId);
   m.put("Skew Factor", this.skewFactor);
   if (this.zipf != null && this.zipf.isHistoryEnabled()) {
     m.put("Skewed Warehouses", this.zipf.getHistory());
   }
   return ("TPCC Simulator Options\n" + StringUtil.formatMaps(m, this.config.debugMap()));
 }
Example #11
0
 /**
  * Store dependency tables for later retrieval by the EE.
  *
  * @param workunit
  */
 void trackNewWorkUnit(final Map<Integer, List<VoltTable>> dependencies) {
   for (final Entry<Integer, List<VoltTable>> e : dependencies.entrySet()) {
     // could do this optionally - debug only.
     if (d) verifyDependencySanity(e.getKey(), e.getValue());
     // create a new list of references to the workunit's table
     // to avoid any changes to the WorkUnit's list. But do not
     // copy the table data.
     ArrayDeque<VoltTable> deque = m_depsById.get(e.getKey());
     if (deque == null) {
       deque = new ArrayDeque<VoltTable>();
       // intentionally overwrite the previous dependency id.
       // would a lookup and a clear() be faster?
       m_depsById.put(e.getKey(), deque);
     } else {
       deque.clear();
     }
     deque.addAll(e.getValue());
   }
   if (d) LOG.debug("Current InputDepencies:\n" + StringUtil.formatMaps(m_depsById));
 }
Example #12
0
  public String debug() {
    long timestamp = System.currentTimeMillis();
    AbstractTransaction peek = super.peek();

    @SuppressWarnings("unchecked")
    Map<String, Object> m[] = new Map[3];
    int i = -1;

    m[++i] = new LinkedHashMap<String, Object>();
    m[i].put("PartitionId", this.partitionId);
    m[i].put("Current State", this.state);
    m[i].put("# of Elements", this.size());
    m[i].put("# of Popped", this.txnsPopped);
    m[i].put("Last Popped Txn", this.lastTxnPopped);
    m[i].put("Last Seen Txn", this.lastSeenTxnId);
    m[i].put("Last Safe Txn", this.lastSafeTxnId);

    m[++i] = new LinkedHashMap<String, Object>();
    m[i].put("Throttled", super.isThrottled());
    m[i].put("Threshold", super.getThrottleThreshold());
    m[i].put("Release", super.getThrottleRelease());
    m[i].put("Increase Delta", super.getThrottleThresholdIncreaseDelta());
    m[i].put("Max Size", super.getThrottleThresholdMaxSize());

    m[++i] = new LinkedHashMap<String, Object>();
    m[i].put("Peek Txn", (peek == null ? "null" : peek));
    m[i].put("Wait Time", this.waitTime + " ms");
    m[i].put("Current Time", timestamp);
    m[i].put(
        "Blocked Time",
        (this.blockTimestamp > 0
            ? this.blockTimestamp + (this.blockTimestamp < timestamp ? " **PASSED**" : "")
            : "--"));
    m[i].put(
        "Blocked Remaining",
        (this.blockTimestamp > 0 ? Math.max(0, this.blockTimestamp - timestamp) + " ms" : "--"));

    return (StringUtil.formatMaps(m));
  }
Example #13
0
  /**
   * This is the most important method of the queue. This will figure out the next state and how
   * long we must wait until we can release the next transaction. <B>Note:</B> I believe that this
   * is the only thing that needs to be synchronized
   *
   * @param afterRemoval If this flag is set to true, then it means that who ever is calling this
   *     method just removed something from the queue. That means that we need to go and check
   *     whether the lastSafeTxnId should change.
   * @return
   */
  private QueueState checkQueueState(boolean afterRemoval) {
    if (trace.val && super.isEmpty() == false)
      LOG.trace(
          String.format(
              "Partition %d :: checkQueueState(afterPoll=%s) [current=%s]",
              this.partitionId, afterRemoval, this.state));
    QueueState newState = (afterRemoval ? QueueState.BLOCKED_SAFETY : QueueState.UNBLOCKED);
    long currentTimestamp = -1l;
    AbstractTransaction ts = super.peek(); // BLOCKING
    Long txnId = null;
    if (ts == null) {
      //            if (trace.val)
      //                LOG.trace(String.format("Partition %d :: Queue is empty.",
      // this.partitionId));
      newState = QueueState.BLOCKED_EMPTY;
    }
    // Check whether can unblock now
    else {
      assert (ts.isInitialized())
          : String.format(
              "Unexpected uninitialized transaction %s [partition=%d]", ts, this.partitionId);
      txnId = ts.getTransactionId();
      // HACK: Ignore null txnIds
      if (txnId == null) {
        LOG.warn(
            String.format(
                "Partition %d :: Uninitialized transaction handle %s", this.partitionId, ts));
        return (this.state);
      }
      assert (txnId != null) : "Null transaction id from " + txnId;

      // If this txnId is greater than the last safe one that we've seen, then we know
      // that the lastSafeTxnId has been polled. That means that we need to
      // wait for an appropriate amount of time before we're allow to be executed.
      if (txnId.compareTo(this.lastSafeTxnId) > 0 && afterRemoval == false) {
        newState = QueueState.BLOCKED_ORDERING;
        if (debug.val)
          LOG.debug(
              String.format(
                  "Partition %d :: txnId[%d] > lastSafeTxnId[%d]",
                  this.partitionId, txnId, this.lastSafeTxnId));
      }
      // If our current block time is negative, then we know that we're the first txnId
      // that's been in the system. We'll also want to wait a bit before we're
      // allowed to be executed.
      else if (this.blockTimestamp == NULL_BLOCK_TIMESTAMP) {
        newState = QueueState.BLOCKED_SAFETY;
        if (debug.val)
          LOG.debug(
              String.format(
                  "Partition %d :: txnId[%d] ==> %s (blockTime=%d)",
                  this.partitionId, txnId, newState, this.blockTimestamp));
      }
      // Check whether it's safe to unblock this mofo
      else if ((currentTimestamp = System.currentTimeMillis()) < this.blockTimestamp) {
        newState = QueueState.BLOCKED_SAFETY;
        if (debug.val)
          LOG.debug(
              String.format(
                  "Partition %d :: txnId[%d] ==> %s (blockTime[%d] - current[%d] = %d)",
                  this.partitionId,
                  txnId,
                  newState,
                  this.blockTimestamp,
                  currentTimestamp,
                  Math.max(0, this.blockTimestamp - currentTimestamp)));
      }
      // We didn't find any reason to block this txn, so it's sail yo for it...
      else if (debug.val) {
        LOG.debug(
            String.format(
                "Partition %d :: Safe to Execute %d [currentTime=%d]",
                this.partitionId, txnId, System.currentTimeMillis()));
      }
    }

    if (newState != this.state) {
      // note if we get non-empty but blocked
      if ((newState == QueueState.BLOCKED_ORDERING) || (newState == QueueState.BLOCKED_SAFETY)) {
        if (trace.val)
          LOG.trace(
              String.format("Partition %d :: NewState=%s --> %s", this.partitionId, newState, ts));
        long txnTimestamp = TransactionIdManager.getTimestampFromTransactionId(txnId.longValue());
        if (currentTimestamp == -1) currentTimestamp = System.currentTimeMillis();

        // Calculate how long we need to wait before this txn is safe to run
        // If we're blocking on "safety", then we can use an offset based
        // on when the txnId was created. If we're blocking for "ordering",
        // then we'll want to wait for the full wait time.
        int waitTime = this.waitTime;
        if (newState == QueueState.BLOCKED_SAFETY) {
          waitTime = (int) Math.max(0, this.waitTime - (currentTimestamp - txnTimestamp));
        }

        this.blockTimestamp = currentTimestamp + waitTime;
        if (trace.val)
          LOG.trace(
              String.format(
                  "Partition %d :: SET blockTimestamp = %d --> %s",
                  this.partitionId, this.blockTimestamp, ts));

        if (this.blockTimestamp <= currentTimestamp) {
          newState = QueueState.UNBLOCKED;
        }
        if (this.profiler != null && this.lastSafeTxnId.equals(txnId) == false)
          this.profiler.waitTimes.put(newState == QueueState.UNBLOCKED ? 0 : waitTime);

        if (debug.val)
          LOG.debug(
              String.format(
                  "Partition %d :: SET lastSafeTxnId = %d --> %s",
                  this.partitionId, this.lastSafeTxnId, ts));

        if (trace.val) {
          LOG.trace(
              String.format(
                  "Partition %d :: SET lastSafeTxnId = %d --> %s",
                  this.partitionId, this.lastSafeTxnId, ts));

          String debug = "";
          if (trace.val) {
            Map<String, Object> m = new LinkedHashMap<String, Object>();
            m.put("Txn Init Timestamp", txnTimestamp);
            m.put("Current Timestamp", currentTimestamp);
            m.put("Block Time Remaining", (this.blockTimestamp - currentTimestamp));
            debug = "\n" + StringUtil.formatMaps(m);
          }
          LOG.trace(
              String.format(
                  "Partition %d :: Blocking %s for %d ms "
                      + "[maxWait=%d, origState=%s, newState=%s]\n%s%s",
                  this.partitionId,
                  ts,
                  (this.blockTimestamp - currentTimestamp),
                  this.waitTime,
                  this.state,
                  newState,
                  this.debug(),
                  debug));
        }
      } else if (newState == QueueState.UNBLOCKED) {
        if (currentTimestamp == -1) currentTimestamp = System.currentTimeMillis();
        if (this.blockTimestamp > currentTimestamp) {
          newState = QueueState.BLOCKED_SAFETY;
        }
      }
    } // IF

    // This txn should always becomes our next safeTxnId.
    // This is essentially the next txn
    // that should be executed, but somebody *could* come along and add in
    // a new txn with a lower id. But that's ok because we've synchronized setting
    // the id up above. This is actually probably the only part of this entire method
    // that needs to be protected...
    if (txnId != null) this.lastSafeTxnId = txnId;

    // Set the new state
    if (newState != this.state) {
      if (trace.val)
        LOG.trace(
            String.format(
                "Partition %d :: ORIG[%s]->NEW[%s] / LastSafeTxn:%d",
                this.partitionId, this.state, newState, this.lastSafeTxnId));
      if (this.profiler != null) {
        this.profiler.queueStates.get(this.state).stopIfStarted();
        this.profiler.queueStates.get(newState).start();
      }
      this.state = newState;

      // Always poke anybody that is blocking on this queue.
      // The txn may not be ready to run just yet, but at least they'll be
      // able to recompute a new sleep time.
      this.isReady.signal();
    } else if (this.profiler != null) {
      this.profiler.queueStates.get(this.state).restart();
    }

    // Sanity Check
    if ((this.state == QueueState.BLOCKED_ORDERING) || (this.state == QueueState.BLOCKED_SAFETY)) {
      assert (this.state != QueueState.BLOCKED_EMPTY);
    }

    // Make sure that we're always in a valid state to avoid livelock problems
    assert (this.state != QueueState.BLOCKED_SAFETY
            || (this.state == QueueState.BLOCKED_SAFETY
                && this.blockTimestamp != NULL_BLOCK_TIMESTAMP))
        : String.format("Invalid state %s with NULL blocked timestamp", this.state);
    assert (this.state != QueueState.BLOCKED_ORDERING
            || (this.state == QueueState.BLOCKED_ORDERING
                && this.blockTimestamp != NULL_BLOCK_TIMESTAMP))
        : String.format("Invalid state %s with NULL blocked timestamp", this.state);
    return this.state;
  }
  @Override
  protected double estimateWorkloadCostImpl(
      final CatalogContext catalogContext,
      final Workload workload,
      final Filter filter,
      final Double upper_bound)
      throws Exception {

    if (debug.val)
      LOG.debug(
          "Calculating workload execution cost across "
              + num_intervals
              + " intervals for "
              + num_partitions
              + " partitions");

    // (1) Grab the costs at the different time intervals
    //     Also create the ratios that we will use to weight the interval costs
    final AtomicLong total_txns = new AtomicLong(0);

    // final HashSet<Long> trace_ids[] = new HashSet[num_intervals];
    for (int i = 0; i < num_intervals; i++) {
      total_interval_txns[i] = 0;
      total_interval_queries[i] = 0;
      singlepartition_ctrs[i] = 0;
      singlepartition_with_partitions_ctrs[i] = 0;
      multipartition_ctrs[i] = 0;
      partitions_touched[i] = 0;
      incomplete_txn_ctrs[i] = 0;
      exec_mismatch_ctrs[i] = 0;
      incomplete_txn_histogram[i].clear();
      missing_txn_histogram[i].clear();
      exec_histogram[i].clear();
    } // FOR

    // (2) Now go through the workload and estimate the partitions that each txn
    //     will touch for the given catalog setups
    if (trace.val) {
      LOG.trace("Total # of Txns in Workload: " + workload.getTransactionCount());
      if (filter != null)
        LOG.trace(
            "Workload Filter Chain:       " + StringUtil.join("   ", "\n", filter.getFilters()));
    }

    // QUEUING THREAD
    tmp_consumers.clear();
    Producer<TransactionTrace, Pair<TransactionTrace, Integer>> producer =
        new Producer<TransactionTrace, Pair<TransactionTrace, Integer>>(
            CollectionUtil.iterable(workload.iterator(filter))) {
          @Override
          public Pair<Consumer<Pair<TransactionTrace, Integer>>, Pair<TransactionTrace, Integer>>
              transform(TransactionTrace txn_trace) {
            int i = workload.getTimeInterval(txn_trace, num_intervals);
            assert (i >= 0)
                : "Invalid time interval '" + i + "'\n" + txn_trace.debug(catalogContext.database);
            assert (i < num_intervals)
                : "Invalid interval: " + i + "\n" + txn_trace.debug(catalogContext.database);
            total_txns.incrementAndGet();
            Pair<TransactionTrace, Integer> p = Pair.of(txn_trace, i);
            return (Pair.of(tmp_consumers.get(i), p));
          }
        };

    // PROCESSING THREADS
    final int num_threads = ThreadUtil.getMaxGlobalThreads();
    int interval_ctr = 0;
    for (int thread = 0; thread < num_threads; thread++) {
      // First create a new IntervalProcessor/Consumer
      IntervalProcessor ip = new IntervalProcessor(catalogContext, workload, filter);

      // Then assign it to some number of intervals
      for (int i = 0, cnt = (int) Math.ceil(num_intervals / (double) num_threads); i < cnt; i++) {
        if (interval_ctr > num_intervals) break;
        tmp_consumers.put(interval_ctr++, ip);
        if (trace.val)
          LOG.trace(
              String.format("Interval #%02d => IntervalProcessor #%02d", interval_ctr - 1, thread));
      } // FOR

      // And make sure that we queue it up too
      producer.addConsumer(ip);
    } // FOR (threads)

    ThreadUtil.runGlobalPool(producer.getRunnablesList()); // BLOCKING
    if (debug.val) {
      int processed = 0;
      for (Consumer<?> c : producer.getConsumers()) {
        processed += c.getProcessedCounter();
      } // FOR
      assert (total_txns.get() == processed)
          : String.format("Expected[%d] != Processed[%d]", total_txns.get(), processed);
    }

    // We have to convert all of the costs into the range of [0.0, 1.0]
    // For each interval, divide the number of partitions touched by the total number
    // of partitions that the interval could have touched (worst case scenario)
    final double execution_costs[] = new double[num_intervals];
    StringBuilder sb = (this.isDebugEnabled() || debug.get() ? new StringBuilder() : null);
    Map<String, Object> debug_m = null;
    if (sb != null) {
      debug_m = new LinkedHashMap<String, Object>();
    }

    if (debug.val)
      LOG.debug("Calculating execution cost for " + this.num_intervals + " intervals...");
    long total_multipartition_txns = 0;
    for (int i = 0; i < this.num_intervals; i++) {
      interval_weights[i] = total_interval_txns[i] / (double) total_txns.get();
      long total_txns_in_interval = (long) total_interval_txns[i];
      long total_queries_in_interval = (long) total_interval_queries[i];
      long num_txns = this.cost_models[i].txn_ctr.get();
      long potential_txn_touches = (total_txns_in_interval * num_partitions); // TXNS
      double penalty = 0.0d;
      total_multipartition_txns += multipartition_ctrs[i];

      // Divide the total number of partitions touched by...
      // This is the total number of partitions that we could have touched
      // in this interval
      // And this is the total number of partitions that we did actually touch
      if (multipartition_ctrs[i] > 0) {
        assert (partitions_touched[i] > 0) : "No touched partitions for interval " + i;
        double cost = (partitions_touched[i] / (double) potential_txn_touches);

        if (this.use_multitpartition_penalty) {
          penalty =
              this.multipartition_penalty
                  * (1.0d + (multipartition_ctrs[i] / (double) total_txns_in_interval));
          assert (penalty >= 1.0) : "The multipartition penalty is less than one: " + penalty;
          cost *= penalty;
        }
        execution_costs[i] = Math.min(cost, (double) potential_txn_touches);
      }

      // For each txn that wasn't even evaluated, add all of the
      // partitions to the incomplete histogram
      if (num_txns < total_txns_in_interval) {
        if (trace.val)
          LOG.trace(
              "Adding "
                  + (total_txns_in_interval - num_txns)
                  + " entries to the incomplete histogram for interval #"
                  + i);
        for (long ii = num_txns; ii < total_txns_in_interval; ii++) {
          missing_txn_histogram[i].put(all_partitions);
        } // WHILE
      }

      if (sb != null) {
        tmp_penalties.add(penalty);
        tmp_total.add(total_txns_in_interval);
        tmp_touched.add(partitions_touched[i]);
        tmp_potential.add(potential_txn_touches);

        Map<String, Object> inner = new LinkedHashMap<String, Object>();
        inner.put("Partitions Touched", partitions_touched[i]);
        inner.put("Potential Touched", potential_txn_touches);
        inner.put("Multi-Partition Txns", multipartition_ctrs[i]);
        inner.put("Total Txns", total_txns_in_interval);
        inner.put("Total Queries", total_queries_in_interval);
        inner.put("Missing Txns", (total_txns_in_interval - num_txns));
        inner.put("Cost", String.format("%.05f", execution_costs[i]));
        inner.put("Exec Txns", exec_histogram[i].getSampleCount());
        debug_m.put("Interval #" + i, inner);
      }
    } // FOR

    if (sb != null) {
      Map<String, Object> m0 = new LinkedHashMap<String, Object>();
      m0.put("SinglePartition Txns", (total_txns.get() - total_multipartition_txns));
      m0.put("MultiPartition Txns", total_multipartition_txns);
      m0.put(
          "Total Txns",
          String.format(
              "%d [%.06f]",
              total_txns.get(), (1.0d - (total_multipartition_txns / (double) total_txns.get()))));

      Map<String, Object> m1 = new LinkedHashMap<String, Object>();
      m1.put("Touched Partitions", tmp_touched);
      m1.put("Potential Partitions", tmp_potential);
      m1.put("Total Partitions", tmp_total);
      m1.put("Penalties", tmp_penalties);

      sb.append(StringUtil.formatMaps(debug_m, m0, m1));
      if (debug.val) LOG.debug("**** Execution Cost ****\n" + sb);
      this.appendDebugMessage(sb);
    }

    // LOG.debug("Execution By Intervals:\n" + sb.toString());

    // (3) We then need to go through and grab the histograms of partitions were accessed
    if (sb != null) {
      if (debug.val)
        LOG.debug("Calculating skew factor for " + this.num_intervals + " intervals...");
      debug_histograms.clear();
      sb = new StringBuilder();
    }
    for (int i = 0; i < this.num_intervals; i++) {
      ObjectHistogram<Integer> histogram_txn = this.cost_models[i].getTxnPartitionAccessHistogram();
      ObjectHistogram<Integer> histogram_query =
          this.cost_models[i].getQueryPartitionAccessHistogram();
      this.histogram_query_partitions.put(histogram_query);
      long num_queries = this.cost_models[i].query_ctr.get();
      this.query_ctr.addAndGet(num_queries);

      // DEBUG
      SingleSitedCostModel inner_costModel = (SingleSitedCostModel) this.cost_models[i];
      boolean is_valid =
          (partitions_touched[i] + singlepartition_with_partitions_ctrs[i])
              == (this.cost_models[i].getTxnPartitionAccessHistogram().getSampleCount()
                  + exec_mismatch_ctrs[i]);
      if (!is_valid) {
        LOG.error("Transaction Entries: " + inner_costModel.getTransactionCacheEntries().size());
        ObjectHistogram<Integer> check = new ObjectHistogram<Integer>();
        for (TransactionCacheEntry tce : inner_costModel.getTransactionCacheEntries()) {
          check.put(tce.getTouchedPartitions());
          // LOG.error(tce.debug() + "\n");
        }
        LOG.error(
            "Check Touched Partitions: sample="
                + check.getSampleCount()
                + ", values="
                + check.getValueCount());
        LOG.error(
            "Cache Touched Partitions: sample="
                + this.cost_models[i].getTxnPartitionAccessHistogram().getSampleCount()
                + ", values="
                + this.cost_models[i].getTxnPartitionAccessHistogram().getValueCount());

        int qtotal = inner_costModel.getAllQueryCacheEntries().size();
        int ctr = 0;
        int multip = 0;
        for (QueryCacheEntry qce : inner_costModel.getAllQueryCacheEntries()) {
          ctr += (qce.getAllPartitions().isEmpty() ? 0 : 1);
          multip += (qce.getAllPartitions().size() > 1 ? 1 : 0);
        } // FOR
        LOG.error("# of QueryCacheEntries with Touched Partitions: " + ctr + " / " + qtotal);
        LOG.error("# of MultiP QueryCacheEntries: " + multip);
      }
      assert (is_valid)
          : String.format(
              "Partitions Touched by Txns Mismatch in Interval #%d\n"
                  + "(partitions_touched[%d] + singlepartition_with_partitions_ctrs[%d]) != "
                  + "(histogram_txn[%d] + exec_mismatch_ctrs[%d])",
              i,
              partitions_touched[i],
              singlepartition_with_partitions_ctrs[i],
              this.cost_models[i].getTxnPartitionAccessHistogram().getSampleCount(),
              exec_mismatch_ctrs[i]);

      this.histogram_java_partitions.put(this.cost_models[i].getJavaExecutionHistogram());
      this.histogram_txn_partitions.put(histogram_txn);
      long num_txns = this.cost_models[i].txn_ctr.get();
      assert (num_txns >= 0) : "The transaction counter at interval #" + i + " is " + num_txns;
      this.txn_ctr.addAndGet(num_txns);

      // Calculate the skew factor at this time interval
      // XXX: Should the number of txns be the total number of unique txns
      //      that were executed or the total number of times a txn touched the partitions?
      // XXX: What do we do when the number of elements that we are examining is zero?
      //      I guess the cost just needs to be zero?
      // XXX: What histogram do we want to use?
      target_histogram.clear();
      target_histogram.put(histogram_txn);

      // For each txn that we haven't gotten an estimate for at this interval,
      // we're going mark it as being broadcast to all partitions. That way the access
      // histogram will look uniform. Then as more information is added, we will
      // This is an attempt to make sure that the skew cost never decreases but only increases
      long total_txns_in_interval = (long) total_interval_txns[i];
      if (sb != null) {
        debug_histograms.put("Incomplete Txns", incomplete_txn_histogram[i]);
        debug_histograms.put("Missing Txns", missing_txn_histogram[i]);
        debug_histograms.put(
            "Target Partitions (BEFORE)", new ObjectHistogram<Integer>(target_histogram));
        debug_histograms.put("Target Partitions (AFTER)", target_histogram);
      }

      // Merge the values from incomplete histogram into the target
      // histogram
      target_histogram.put(incomplete_txn_histogram[i]);
      target_histogram.put(missing_txn_histogram[i]);
      exec_histogram[i].put(missing_txn_histogram[i]);

      long num_elements = target_histogram.getSampleCount();

      // The number of partition touches should never be greater than our
      // potential touches
      assert (num_elements <= (total_txns_in_interval * num_partitions))
          : "New Partitions Touched Sample Count ["
              + num_elements
              + "] < "
              + "Maximum Potential Touched Count ["
              + (total_txns_in_interval * num_partitions)
              + "]";

      if (sb != null) {
        Map<String, Object> m = new LinkedHashMap<String, Object>();
        for (String key : debug_histograms.keySet()) {
          ObjectHistogram<?> h = debug_histograms.get(key);
          m.put(
              key,
              String.format("[Sample=%d, Value=%d]\n%s", h.getSampleCount(), h.getValueCount(), h));
        } // FOR
        sb.append(
            String.format(
                "INTERVAL #%d [total_txns_in_interval=%d, num_txns=%d, incomplete_txns=%d]\n%s",
                i,
                total_txns_in_interval,
                num_txns,
                incomplete_txn_ctrs[i],
                StringUtil.formatMaps(m)));
      }

      // Txn Skew
      if (num_elements == 0) {
        txn_skews[i] = 0.0d;
      } else {
        txn_skews[i] = SkewFactorUtil.calculateSkew(num_partitions, num_elements, target_histogram);
      }

      // Exec Skew
      if (exec_histogram[i].getSampleCount() == 0) {
        exec_skews[i] = 0.0d;
      } else {
        exec_skews[i] =
            SkewFactorUtil.calculateSkew(
                num_partitions, exec_histogram[i].getSampleCount(), exec_histogram[i]);
      }
      total_skews[i] = (0.5 * exec_skews[i]) + (0.5 * txn_skews[i]);

      if (sb != null) {
        sb.append("Txn Skew   = " + MathUtil.roundToDecimals(txn_skews[i], 6) + "\n");
        sb.append("Exec Skew  = " + MathUtil.roundToDecimals(exec_skews[i], 6) + "\n");
        sb.append("Total Skew = " + MathUtil.roundToDecimals(total_skews[i], 6) + "\n");
        sb.append(StringUtil.DOUBLE_LINE);
      }
    } // FOR
    if (sb != null && sb.length() > 0) {
      if (debug.val) LOG.debug("**** Skew Factor ****\n" + sb);
      this.appendDebugMessage(sb);
    }
    if (trace.val) {
      for (int i = 0; i < num_intervals; i++) {
        LOG.trace(
            "Time Interval #"
                + i
                + "\n"
                + "Total # of Txns: "
                + this.cost_models[i].txn_ctr.get()
                + "\n"
                + "Multi-Partition Txns: "
                + multipartition_ctrs[i]
                + "\n"
                + "Execution Cost: "
                + execution_costs[i]
                + "\n"
                + "ProcHistogram:\n"
                + this.cost_models[i].getProcedureHistogram().toString()
                + "\n"
                +
                // "TransactionsPerPartitionHistogram:\n" +
                // this.cost_models[i].getTxnPartitionAccessHistogram()
                // + "\n" +
                StringUtil.SINGLE_LINE);
      }
    }

    // (3) We can now calculate the final total estimate cost of this workload as the following
    //     Just take the simple ratio of mp txns / all txns
    this.last_execution_cost =
        MathUtil.weightedMean(
            execution_costs,
            total_interval_txns); // MathUtil.roundToDecimals(MathUtil.geometricMean(execution_costs,
    // MathUtil.GEOMETRIC_MEAN_ZERO),
    // 10);

    // The final skew cost needs to be weighted by the percentage of txns running in that interval
    // This will cause the partitions with few txns
    this.last_skew_cost =
        MathUtil.weightedMean(
            total_skews, total_interval_txns); // roundToDecimals(MathUtil.geometricMean(entropies,
    // MathUtil.GEOMETRIC_MEAN_ZERO),
    // 10);
    double new_final_cost =
        (this.use_execution ? (this.execution_weight * this.last_execution_cost) : 0)
            + (this.use_skew ? (this.skew_weight * this.last_skew_cost) : 0);

    if (sb != null) {
      Map<String, Object> m = new LinkedHashMap<String, Object>();
      m.put("Total Txns", total_txns.get());
      m.put("Interval Txns", Arrays.toString(total_interval_txns));
      m.put("Execution Costs", Arrays.toString(execution_costs));
      m.put("Skew Factors", Arrays.toString(total_skews));
      m.put("Txn Skew", Arrays.toString(txn_skews));
      m.put("Exec Skew", Arrays.toString(exec_skews));
      m.put("Interval Weights", Arrays.toString(interval_weights));
      m.put(
          "Final Cost",
          String.format(
              "%f = %f + %f", new_final_cost, this.last_execution_cost, this.last_skew_cost));
      if (debug.val) LOG.debug(StringUtil.formatMaps(m));
      this.appendDebugMessage(StringUtil.formatMaps(m));
    }

    this.last_final_cost = new_final_cost;
    return (MathUtil.roundToDecimals(this.last_final_cost, 5));
  }
    @Override
    public void process(Pair<TransactionTrace, Integer> p) {
      assert (p != null);
      final TransactionTrace txn_trace = p.getFirst();
      final int i = p.getSecond(); // Interval
      final int txn_weight = (use_txn_weights ? txn_trace.getWeight() : 1);
      final String proc_key =
          CatalogKey.createKey(CatalogUtil.DEFAULT_DATABASE_NAME, txn_trace.getCatalogItemName());

      // Terrible Hack: Assume that we are using the SingleSitedCostModel
      // and that
      // it will return fixed values based on whether the txn is
      // single-partitioned or not
      SingleSitedCostModel singlesited_cost_model = (SingleSitedCostModel) cost_models[i];

      total_interval_txns[i] += txn_weight;
      total_interval_queries[i] += (txn_trace.getQueryCount() * txn_weight);
      histogram_procs.put(proc_key, txn_weight);

      try {
        singlesited_cost_model.estimateTransactionCost(catalogContext, workload, filter, txn_trace);
        TransactionCacheEntry txn_entry =
            singlesited_cost_model.getTransactionCacheEntry(txn_trace);
        assert (txn_entry != null) : "No txn entry for " + txn_trace;
        Collection<Integer> partitions = txn_entry.getTouchedPartitions();

        // If the txn runs on only one partition, then the cost is
        // nothing
        if (txn_entry.isSinglePartitioned()) {
          singlepartition_ctrs[i] += txn_weight;
          if (!partitions.isEmpty()) {
            assert (txn_entry.getAllTouchedPartitionsHistogram().getValueCount() == 1)
                : txn_entry
                    + " says it was single-partitioned but the partition count says otherwise:\n"
                    + txn_entry.debug();
            singlepartition_with_partitions_ctrs[i] += txn_weight;
          }
          histogram_sp_procs.put(proc_key, txn_weight);

          // If the txn runs on multiple partitions, then the cost
          // is...
          // XXX 2010-06-28: The number of partitions that the txn
          // touches divided by the total number of partitions
          // XXX 2010-07-02: The histogram for the total number of
          // partitions touched by all of the queries
          // in the transaction. This ensures that txns with just one
          // multi-partition query
          // isn't weighted the same as a txn with many
          // multi-partition queries
        } else {
          assert (!partitions.isEmpty()) : "No touched partitions for " + txn_trace;
          if (partitions.size() == 1
              && txn_entry.getExecutionPartition() != HStoreConstants.NULL_PARTITION_ID) {
            assert (CollectionUtil.first(partitions) != txn_entry.getExecutionPartition())
                : txn_entry.debug();
            exec_mismatch_ctrs[i] += txn_weight;
            partitions_touched[i] += txn_weight;
          } else {
            assert (partitions.size() > 1)
                : String.format(
                    "%s is not marked as single-partition but it only touches one partition\n%s",
                    txn_trace, txn_entry.debug());
          }
          partitions_touched[i] += (partitions.size() * txn_weight); // Txns
          multipartition_ctrs[i] += txn_weight;
          histogram_mp_procs.put(proc_key, txn_weight);
        }
        Integer base_partition = txn_entry.getExecutionPartition();
        if (base_partition != null) {
          exec_histogram[i].put(base_partition, txn_weight);
        } else {
          exec_histogram[i].put(all_partitions, txn_weight);
        }
        if (debug.val) { // &&
          // txn_trace.getCatalogItemName().equalsIgnoreCase("DeleteCallForwarding"))
          // {
          Procedure catalog_proc = txn_trace.getCatalogItem(catalogContext.database);
          Map<String, Object> inner = new LinkedHashMap<String, Object>();
          for (Statement catalog_stmt : catalog_proc.getStatements()) {
            inner.put(catalog_stmt.fullName(), CatalogUtil.getReferencedTables(catalog_stmt));
          }

          Map<String, Object> m = new LinkedHashMap<String, Object>();
          m.put(txn_trace.toString(), null);
          m.put("Interval", i);
          m.put("Single-Partition", txn_entry.isSinglePartitioned());
          m.put("Base Partition", base_partition);
          m.put("Touched Partitions", partitions);
          m.put(catalog_proc.fullName(), inner);
          LOG.debug(StringUtil.formatMaps(m));
        }

        // We need to keep a count of the number txns that didn't have
        // all of its queries estimated
        // completely so that we can update the access histograms down
        // below for entropy calculations
        // Note that this is at the txn level, not the query level.
        if (!txn_entry.isComplete()) {
          incomplete_txn_ctrs[i] += txn_weight;
          tmp_missingPartitions.clear();
          tmp_missingPartitions.addAll(all_partitions);
          tmp_missingPartitions.removeAll(txn_entry.getTouchedPartitions());
          // Update the histogram for this interval to keep track of
          // how many times we need to
          // increase the partition access histogram
          incomplete_txn_histogram[i].put(tmp_missingPartitions, txn_weight);
          if (trace.val) {
            Map<String, Object> m = new LinkedHashMap<String, Object>();
            m.put(String.format("Marking %s as incomplete in interval #%d", txn_trace, i), null);
            m.put("Examined Queries", txn_entry.getExaminedQueryCount());
            m.put("Total Queries", txn_entry.getTotalQueryCount());
            m.put("Touched Partitions", txn_entry.getTouchedPartitions());
            m.put("Missing Partitions", tmp_missingPartitions);
            LOG.trace(StringUtil.formatMaps(m));
          }
        }
      } catch (Exception ex) {
        CatalogUtil.saveCatalog(catalogContext.catalog, CatalogUtil.CATALOG_FILENAME);
        throw new RuntimeException(
            "Failed to estimate cost for " + txn_trace.getCatalogItemName() + " at interval " + i,
            ex);
      }
    }
Example #16
0
  private void buildDatabaseElement(Document doc, final Element database) {

    // /project/database/users
    final Element users = doc.createElement("users");
    database.appendChild(users);

    // users/user
    if (m_users.isEmpty()) {
      final Element user = doc.createElement("user");
      user.setAttribute("name", "default");
      user.setAttribute("groups", "default");
      user.setAttribute("password", "");
      user.setAttribute("sysproc", "true");
      user.setAttribute("adhoc", "true");
      users.appendChild(user);
    } else {
      for (final UserInfo info : m_users) {
        final Element user = doc.createElement("user");
        user.setAttribute("name", info.name);
        user.setAttribute("password", info.password);
        user.setAttribute("sysproc", info.sysproc ? "true" : "false");
        user.setAttribute("adhoc", info.adhoc ? "true" : "false");
        // build up user/@groups. This attribute must be redesigned
        if (info.groups.length > 0) {
          final StringBuilder groups = new StringBuilder();
          for (final String group : info.groups) {
            if (groups.length() > 0) groups.append(",");
            groups.append(group);
          }
          user.setAttribute("groups", groups.toString());
        }
        users.appendChild(user);
      }
    }

    // /project/database/groups
    final Element groups = doc.createElement("groups");
    database.appendChild(groups);

    // groups/group
    if (m_groups.isEmpty()) {
      final Element group = doc.createElement("group");
      group.setAttribute("name", "default");
      group.setAttribute("sysproc", "true");
      group.setAttribute("adhoc", "true");
      groups.appendChild(group);
    } else {
      for (final GroupInfo info : m_groups) {
        final Element group = doc.createElement("group");
        group.setAttribute("name", info.name);
        group.setAttribute("sysproc", info.sysproc ? "true" : "false");
        group.setAttribute("adhoc", info.adhoc ? "true" : "false");
        groups.appendChild(group);
      }
    }

    // /project/database/schemas
    final Element schemas = doc.createElement("schemas");
    database.appendChild(schemas);

    // schemas/schema
    for (final String schemaPath : m_schemas) {
      final Element schema = doc.createElement("schema");
      schema.setAttribute("path", schemaPath);
      schemas.appendChild(schema);
    }

    // /project/database/procedures
    final Element procedures = doc.createElement("procedures");
    database.appendChild(procedures);

    // procedures/procedure
    for (final ProcedureInfo procedure : m_procedures) {
      if (procedure.cls == null) continue;
      assert (procedure.sql == null);

      final Element proc = doc.createElement("procedure");
      proc.setAttribute("class", procedure.cls.getName());
      // build up @users. This attribute should be redesigned
      if (procedure.users.length > 0) {
        final StringBuilder userattr = new StringBuilder();
        for (final String user : procedure.users) {
          if (userattr.length() > 0) userattr.append(",");
          userattr.append(user);
        }
        proc.setAttribute("users", userattr.toString());
      }
      // build up @groups. This attribute should be redesigned
      if (procedure.groups.length > 0) {
        final StringBuilder groupattr = new StringBuilder();
        for (final String group : procedure.groups) {
          if (groupattr.length() > 0) groupattr.append(",");
          groupattr.append(group);
        }
        proc.setAttribute("groups", groupattr.toString());
      }

      // HACK: Prefetchable Statements
      if (m_prefetchQueries.containsKey(procedure.cls.getSimpleName())) {
        Collection<String> stmtNames = m_prefetchQueries.get(procedure.cls.getSimpleName());
        proc.setAttribute("prefetchable", StringUtil.join(",", stmtNames));
      }
      // HACK: Deferrable Statements
      if (m_deferQueries.containsKey(procedure.cls.getSimpleName())) {
        Collection<String> stmtNames = m_deferQueries.get(procedure.cls.getSimpleName());
        proc.setAttribute("deferrable", StringUtil.join(",", stmtNames));
      }

      procedures.appendChild(proc);
    }

    // procedures/procedures (that are stmtprocedures)
    for (final ProcedureInfo procedure : m_procedures) {
      if (procedure.sql == null) continue;
      assert (procedure.cls == null);

      final Element proc = doc.createElement("procedure");
      proc.setAttribute("class", procedure.name);
      if (procedure.partitionInfo != null) ;
      proc.setAttribute("partitioninfo", procedure.partitionInfo);
      // build up @users. This attribute should be redesigned
      if (procedure.users.length > 0) {
        final StringBuilder userattr = new StringBuilder();
        for (final String user : procedure.users) {
          if (userattr.length() > 0) userattr.append(",");
          userattr.append(user);
        }
        proc.setAttribute("users", userattr.toString());
      }
      // build up @groups. This attribute should be redesigned
      if (procedure.groups.length > 0) {
        final StringBuilder groupattr = new StringBuilder();
        for (final String group : procedure.groups) {
          if (groupattr.length() > 0) groupattr.append(",");
          groupattr.append(group);
        }
        proc.setAttribute("groups", groupattr.toString());
      }

      final Element sql = doc.createElement("sql");
      proc.appendChild(sql);

      final Text sqltext = doc.createTextNode(procedure.sql);
      sql.appendChild(sqltext);

      procedures.appendChild(proc);
    }

    if (m_partitionInfos.size() > 0) {
      // /project/database/partitions
      final Element partitions = doc.createElement("partitions");
      database.appendChild(partitions);

      // partitions/table
      for (final Entry<String, String> partitionInfo : m_partitionInfos.entrySet()) {
        final Element table = doc.createElement("partition");
        table.setAttribute("table", partitionInfo.getKey());
        table.setAttribute("column", partitionInfo.getValue());
        partitions.appendChild(table);
      }
    }

    // Evictable Tables
    if (m_evictableTables.isEmpty() == false) {
      final Element evictables = doc.createElement("evictables");
      database.appendChild(evictables);

      // Table entries
      for (String tableName : m_evictableTables) {
        final Element table = doc.createElement("evictable");
        table.setAttribute("table", tableName);
        evictables.appendChild(table);
      }
    }

    // Vertical Partitions
    if (m_replicatedSecondaryIndexes.size() > 0) {
      // /project/database/partitions
      final Element verticalpartitions = doc.createElement("verticalpartitions");
      database.appendChild(verticalpartitions);

      // partitions/table
      for (String tableName : m_replicatedSecondaryIndexes.keySet()) {
        Pair<Boolean, Collection<String>> p = m_replicatedSecondaryIndexes.get(tableName);
        Boolean createIndex = p.getFirst();
        Collection<String> columnNames = p.getSecond();

        final Element vp = doc.createElement("verticalpartition");
        vp.setAttribute("table", tableName);
        vp.setAttribute("indexed", createIndex.toString());
        for (final String columnName : columnNames) {
          final Element column = doc.createElement("column");
          column.setTextContent(columnName);
          vp.appendChild(column);
        } // FOR (cols)
        verticalpartitions.appendChild(vp);
      } // FOR (tables)
    }

    // /project/database/classdependencies
    final Element classdeps = doc.createElement("classdependencies");
    database.appendChild(classdeps);

    // classdependency
    for (final Class<?> supplemental : m_supplementals) {
      final Element supp = doc.createElement("classdependency");
      supp.setAttribute("class", supplemental.getName());
      classdeps.appendChild(supp);
    }

    // project/database/exports
    if (m_elloader != null) {
      final Element exports = doc.createElement("exports");
      database.appendChild(exports);

      final Element conn = doc.createElement("connector");
      conn.setAttribute("class", m_elloader);
      conn.setAttribute("enabled", m_elenabled ? "true" : "false");

      // turn list into stupid comma separated attribute list
      String usersattr = "";
      if (m_elAuthUsers != null) {
        for (String s : m_elAuthUsers) {
          if (usersattr.isEmpty()) {
            usersattr += s;
          } else {
            usersattr += "," + s;
          }
        }
        conn.setAttribute("users", usersattr);
      }

      // turn list into stupid comma separated attribute list
      String groupsattr = "";
      if (m_elAuthGroups != null) {
        for (String s : m_elAuthGroups) {
          if (groupsattr.isEmpty()) {
            groupsattr += s;
          } else {
            groupsattr += "," + s;
          }
        }
        conn.setAttribute("groups", groupsattr);
      }

      exports.appendChild(conn);

      if (m_eltTables.size() > 0) {
        final Element tables = doc.createElement("tables");
        conn.appendChild(tables);

        for (ELTTableInfo info : m_eltTables) {
          final Element table = doc.createElement("table");
          table.setAttribute("name", info.m_tablename);
          table.setAttribute("exportonly", info.m_export_only ? "true" : "false");
          tables.appendChild(table);
        }
      }
    }

    if (m_snapshotPath != null) {
      final Element snapshot = doc.createElement("snapshot");
      snapshot.setAttribute("frequency", m_snapshotFrequency);
      snapshot.setAttribute("path", m_snapshotPath);
      snapshot.setAttribute("prefix", m_snapshotPrefix);
      snapshot.setAttribute("retain", Integer.toString(m_snapshotRetain));
      database.appendChild(snapshot);
    }
  }
Example #17
0
  public static MaterializedViewInfo addVerticalPartition(
      final Table catalog_tbl, final Collection<Column> catalog_cols, final boolean createIndex)
      throws Exception {
    assert (catalog_cols.isEmpty() == false);
    Database catalog_db = ((Database) catalog_tbl.getParent());

    String viewName = getNextVerticalPartitionName(catalog_tbl, catalog_cols);
    if (debug.get())
      LOG.debug(
          String.format(
              "Adding Vertical Partition %s for %s: %s", viewName, catalog_tbl, catalog_cols));

    // Create a new virtual table
    Table virtual_tbl = catalog_db.getTables().get(viewName);
    if (virtual_tbl == null) {
      virtual_tbl = catalog_db.getTables().add(viewName);
    }
    virtual_tbl.setIsreplicated(true);
    virtual_tbl.setMaterializer(catalog_tbl);
    virtual_tbl.setSystable(true);
    virtual_tbl.getColumns().clear();

    // Create MaterializedView and link it to the virtual table
    MaterializedViewInfo catalog_view = catalog_tbl.getViews().add(viewName);
    catalog_view.setVerticalpartition(true);
    catalog_view.setDest(virtual_tbl);
    List<Column> indexColumns = new ArrayList<Column>();

    Column partition_col = catalog_tbl.getPartitioncolumn();
    if (partition_col instanceof VerticalPartitionColumn) {
      partition_col = ((VerticalPartitionColumn) partition_col).getHorizontalColumn();
    }
    if (debug.get()) LOG.debug(catalog_tbl.getName() + " Partition Column: " + partition_col);

    int i = 0;
    assert (catalog_cols != null);
    assert (catalog_cols.isEmpty() == false)
        : "No vertical partitioning columns for " + catalog_view.fullName();
    for (Column catalog_col : catalog_cols) {
      // MaterializedView ColumnRef
      ColumnRef catalog_ref = catalog_view.getGroupbycols().add(catalog_col.getName());
      catalog_ref.setColumn(catalog_col);
      catalog_ref.setIndex(i++);

      // VirtualTable Column
      Column virtual_col = virtual_tbl.getColumns().add(catalog_col.getName());
      virtual_col.setDefaulttype(catalog_col.getDefaulttype());
      virtual_col.setDefaultvalue(catalog_col.getDefaultvalue());
      virtual_col.setIndex(catalog_col.getIndex());
      virtual_col.setNullable(catalog_col.getNullable());
      virtual_col.setSize(catalog_col.getSize());
      virtual_col.setType(catalog_col.getType());
      if (debug.get())
        LOG.debug(String.format("Added VerticalPartition column %s", virtual_col.fullName()));

      // If they want an index, then we'll make one based on every column except for the column
      // that the table is partitioned on
      if (createIndex) {
        boolean include = true;
        if (partition_col instanceof MultiColumn) {
          include = (((MultiColumn) partition_col).contains(catalog_col) == false);
        } else if (catalog_col.equals(partition_col)) {
          include = false;
        }
        if (include) indexColumns.add(virtual_col);
      }
    } // FOR

    if (createIndex) {
      if (indexColumns.isEmpty()) {
        Map<String, Object> m = new ListOrderedMap<String, Object>();
        m.put("Partition Column", partition_col);
        m.put("VP Table Columns", virtual_tbl.getColumns());
        m.put("Passed-in Columns", CatalogUtil.debug(catalog_cols));
        LOG.error("Failed to find index columns\n" + StringUtil.formatMaps(m));
        throw new Exception(String.format("No columns selected for index on %s", viewName));
      }
      String idxName = "SYS_IDX_" + viewName;
      Index virtual_idx = virtual_tbl.getIndexes().get(idxName);
      if (virtual_idx == null) {
        virtual_idx = virtual_tbl.getIndexes().add(idxName);
      }
      virtual_idx.getColumns().clear();

      IndexType idxType =
          (indexColumns.size() == 1 ? IndexType.HASH_TABLE : IndexType.BALANCED_TREE);
      virtual_idx.setType(idxType.getValue());
      i = 0;
      for (Column catalog_col : indexColumns) {
        ColumnRef cref = virtual_idx.getColumns().add(catalog_col.getTypeName());
        cref.setColumn(catalog_col);
        cref.setIndex(i++);
      } // FOR

      if (debug.get())
        LOG.debug(
            String.format(
                "Created %s index '%s' for vertical partition '%s'", idxType, idxName, viewName));
    }
    return (catalog_view);
  }
Example #18
0
  /**
   * @param projectFileURL URL of the project file.
   * @param clusterConfig Object containing desired physical cluster parameters
   * @param jarOutputPath The location to put the finished JAR to.
   * @param output Where to print status/errors to, usually stdout.
   * @param procInfoOverrides Optional overridden values for procedure annotations.
   */
  public boolean compile(
      final String projectFileURL,
      final ClusterConfig clusterConfig,
      final String jarOutputPath,
      final PrintStream output,
      final Map<String, ProcInfoData> procInfoOverrides) {
    m_hsql = null;
    m_projectFileURL = projectFileURL;
    m_jarOutputPath = jarOutputPath;
    m_outputStream = output;
    // use this map as default annotation values
    m_procInfoOverrides = procInfoOverrides;

    LOG.l7dlog(
        Level.DEBUG,
        LogKeys.compiler_VoltCompiler_LeaderAndHostCountAndSitesPerHost.name(),
        new Object[] {
          clusterConfig.getLeaderAddress(),
          clusterConfig.getHostCount(),
          clusterConfig.getSitesPerHost()
        },
        null);

    // do all the work to get the catalog
    final Catalog catalog = compileCatalog(projectFileURL, clusterConfig);
    if (catalog == null) {
      LOG.error(
          "VoltCompiler had " + m_errors.size() + " errors\n" + StringUtil.join("\n", m_errors));
      return (false);
    }

    // WRITE CATALOG TO JAR HERE
    final String catalogCommands = catalog.serialize();

    byte[] catalogBytes = null;
    try {
      catalogBytes = catalogCommands.getBytes("UTF-8");
    } catch (final UnsupportedEncodingException e1) {
      addErr("Can't encode the compiled catalog file correctly");
      return false;
    }

    // Create Dtxn.Coordinator configuration for cluster
    //        byte[] dtxnConfBytes = null;
    //        try {
    //            dtxnConfBytes = HStoreDtxnConf.toHStoreDtxnConf(catalog).getBytes("UTF-8");
    //        } catch (final Exception e1) {
    //            addErr("Can't encode the Dtxn.Coordinator configuration file correctly");
    //            return false;
    //        }

    try {
      //            m_jarBuilder.addEntry("dtxn.conf", dtxnConfBytes);
      m_jarBuilder.addEntry(CatalogUtil.CATALOG_FILENAME, catalogBytes);
      m_jarBuilder.addEntry("project.xml", new File(projectFileURL));
      for (final Entry<String, String> e : m_ddlFilePaths.entrySet())
        m_jarBuilder.addEntry(e.getKey(), new File(e.getValue()));
      m_jarBuilder.writeJarToDisk(jarOutputPath);
    } catch (final VoltCompilerException e) {
      return false;
    }

    assert (!hasErrors());

    if (hasErrors()) {
      return false;
    }

    return true;
  }
Example #19
0
  public TPCCSimulation(
      TPCCSimulation.ProcCaller client,
      RandomGenerator generator,
      Clock clock,
      ScaleParameters parameters,
      TPCCConfig config,
      double skewFactor,
      Catalog catalog) {
    assert parameters != null;
    this.client = client;
    this.generator = generator;
    this.clock = clock;
    this.parameters = parameters;
    this.affineWarehouse = lastAssignedWarehouseId;
    this.skewFactor = skewFactor;
    this.config = config;

    if (config.neworder_skew_warehouse) {
      if (debug.val) LOG.debug("Enabling W_ID Zipfian Skew: " + skewFactor);
      this.zipf =
          new RandomDistribution.Zipf(
              new Random(),
              parameters.starting_warehouse,
              parameters.last_warehouse + 1,
              Math.max(1.001d, this.skewFactor));

      this.custom_skew =
          new RandomDistribution.HotWarmCold(
              new Random(),
              parameters.starting_warehouse + 1,
              parameters.last_warehouse,
              TPCCConstants.HOT_DATA_WORKLOAD_SKEW,
              TPCCConstants.HOT_DATA_SIZE,
              TPCCConstants.WARM_DATA_WORKLOAD_SKEW,
              TPCCConstants.WARM_DATA_SIZE);
    }
    if (config.warehouse_debug) {
      LOG.info("Enabling WAREHOUSE debug mode");
    }

    lastAssignedWarehouseId += 1;
    if (lastAssignedWarehouseId > parameters.last_warehouse) lastAssignedWarehouseId = 1;

    if (debug.val) {
      LOG.debug(this.toString());
    }
    if (config.neworder_multip_remote) {
      synchronized (TPCCSimulation.class) {
        if (remoteWarehouseIds == null) {
          remoteWarehouseIds = new HashMap<Integer, List<Integer>>();
          HashMap<Integer, Integer> partitionToSite = new HashMap<Integer, Integer>();

          Database catalog_db = CatalogUtil.getDatabase(catalog);
          DefaultHasher hasher = new DefaultHasher(catalog_db);
          for (Site s : CatalogUtil.getCluster(catalog_db).getSites()) {
            for (Partition p : s.getPartitions()) partitionToSite.put(p.getId(), s.getId());
          } // FOR

          for (int w_id0 = parameters.starting_warehouse;
              w_id0 <= parameters.last_warehouse;
              w_id0++) {
            final int partition0 = hasher.hash(w_id0);
            final int site0 = partitionToSite.get(partition0);
            final List<Integer> rList = new ArrayList<Integer>();

            for (int w_id1 = parameters.starting_warehouse;
                w_id1 <= parameters.last_warehouse;
                w_id1++) {
              // Figure out what partition this W_ID maps to
              int partition1 = hasher.hash(w_id1);

              // Check whether this partition is on our same local site
              int site1 = partitionToSite.get(partition1);
              if (site0 != site1) rList.add(w_id1);
            } // FOR
            remoteWarehouseIds.put(w_id0, rList);
          } // FOR

          LOG.debug("NewOrder Remote W_ID Mapping\n" + StringUtil.formatMaps(remoteWarehouseIds));
        }
      } // SYNCH
    }
  }
  /**
   * MAIN!
   *
   * @param vargs
   * @throws Exception
   */
  public static void main(String[] vargs) throws Exception {
    ArgumentsParser args = ArgumentsParser.load(vargs);
    args.require(
        ArgumentsParser.PARAM_CATALOG,
        ArgumentsParser.PARAM_WORKLOAD,
        ArgumentsParser.PARAM_PARTITION_PLAN,
        ArgumentsParser.PARAM_DESIGNER_INTERVALS
        // ArgumentsParser.PARAM_DESIGNER_HINTS
        );
    assert (args.workload.getTransactionCount() > 0)
        : "No transactions were loaded from " + args.workload;

    if (args.hasParam(ArgumentsParser.PARAM_CATALOG_HOSTS)) {
      ClusterConfiguration cc =
          new ClusterConfiguration(args.getParam(ArgumentsParser.PARAM_CATALOG_HOSTS));
      args.updateCatalog(FixCatalog.cloneCatalog(args.catalog, cc), null);
    }

    // If given a PartitionPlan, then update the catalog
    File pplan_path = new File(args.getParam(ArgumentsParser.PARAM_PARTITION_PLAN));
    if (pplan_path.exists()) {
      PartitionPlan pplan = new PartitionPlan();
      pplan.load(pplan_path, args.catalog_db);
      if (args.getBooleanParam(ArgumentsParser.PARAM_PARTITION_PLAN_REMOVE_PROCS, false)) {
        for (Procedure catalog_proc : pplan.proc_entries.keySet()) {
          pplan.setNullProcParameter(catalog_proc);
        } // FOR
      }
      if (args.getBooleanParam(ArgumentsParser.PARAM_PARTITION_PLAN_RANDOM_PROCS, false)) {
        for (Procedure catalog_proc : pplan.proc_entries.keySet()) {
          pplan.setRandomProcParameter(catalog_proc);
        } // FOR
      }
      pplan.apply(args.catalog_db);
      System.out.println("Applied PartitionPlan '" + pplan_path + "' to catalog\n" + pplan);
      System.out.print(StringUtil.DOUBLE_LINE);

      if (args.hasParam(ArgumentsParser.PARAM_PARTITION_PLAN_OUTPUT)) {
        String output = args.getParam(ArgumentsParser.PARAM_PARTITION_PLAN_OUTPUT);
        if (output.equals("-")) output = pplan_path.getAbsolutePath();
        pplan.save(new File(output));
        System.out.println("Saved PartitionPlan to '" + output + "'");
      }
    } else {
      System.err.println("PartitionPlan file '" + pplan_path + "' does not exist. Ignoring...");
    }
    System.out.flush();

    int num_intervals =
        args.num_intervals; // getIntParam(ArgumentsParser.PARAM_DESIGNER_INTERVALS);
    TimeIntervalCostModel<SingleSitedCostModel> costmodel =
        new TimeIntervalCostModel<SingleSitedCostModel>(
            args.catalogContext, SingleSitedCostModel.class, num_intervals);
    if (args.hasParam(ArgumentsParser.PARAM_DESIGNER_HINTS))
      costmodel.applyDesignerHints(args.designer_hints);
    double cost = costmodel.estimateWorkloadCost(args.catalogContext, args.workload);

    Map<String, Object> m = new LinkedHashMap<String, Object>();
    m.put("PARTITIONS", CatalogUtil.getNumberOfPartitions(args.catalog_db));
    m.put("INTERVALS", args.num_intervals);
    m.put("EXEC COST", costmodel.last_execution_cost);
    m.put("SKEW COST", costmodel.last_skew_cost);
    m.put("TOTAL COST", cost);
    m.put("PARTITIONS TOUCHED", costmodel.getTxnPartitionAccessHistogram().getSampleCount());
    System.out.println(StringUtil.formatMaps(m));

    // long total = 0;
    m.clear();
    // for (int i = 0; i < num_intervals; i++) {
    // SingleSitedCostModel cm = costmodel.getCostModel(i);
    // Histogram<Integer> h = cm.getTxnPartitionAccessHistogram();
    // m.put(String.format("Interval %02d", i),
    // cm.getTxnPartitionAccessHistogram());
    // total += h.getSampleCount();
    // h.setKeepZeroEntries(true);
    // for (Integer partition :
    // CatalogUtil.getAllPartitionIds(args.catalog_db)) {
    // if (h.contains(partition) == false) h.put(partition, 0);
    // }
    // System.out.println(StringUtil.box("Interval #" + i, "+", 100) + "\n"
    // + h);
    // System.out.println();
    // } // FOR
    // System.out.println(StringUtil.formatMaps(m));
    // System.err.println("TOTAL: " + total);

  }