Example #1
0
  ProcedureRunner(
      VoltProcedure procedure,
      SiteProcedureConnection site,
      SystemProcedureExecutionContext sysprocContext,
      Procedure catProc,
      CatalogSpecificPlanner csp) {
    assert (m_inputCRC.getValue() == 0L);

    if (procedure instanceof StmtProcedure) {
      m_procedureName = catProc.getTypeName().intern();
    } else {
      m_procedureName = procedure.getClass().getSimpleName();
    }
    m_procedure = procedure;
    m_isSysProc = procedure instanceof VoltSystemProcedure;
    m_catProc = catProc;
    m_site = site;
    m_systemProcedureContext = sysprocContext;
    m_csp = csp;

    m_procedure.init(this);

    m_statsCollector =
        new ProcedureStatsCollector(
            m_site.getCorrespondingSiteId(), m_site.getCorrespondingPartitionId(), m_catProc);
    VoltDB.instance()
        .getStatsAgent()
        .registerStatsSource(
            SysProcSelector.PROCEDURE, site.getCorrespondingSiteId(), m_statsCollector);

    reflect();
  }
Example #2
0
 public void voltLoadTable(
     String clusterName, String databaseName, String tableName, VoltTable data)
     throws VoltAbortException {
   if (data == null || data.getRowCount() == 0) {
     return;
   }
   try {
     m_site.loadTable(m_txnState.txnId, clusterName, databaseName, tableName, data);
   } catch (EEException e) {
     throw new VoltAbortException("Failed to load table: " + tableName);
   }
 }
Example #3
0
 /** If returns non-null, then using hsql backend */
 public HsqlBackend getHsqlBackendIfExists() {
   return m_site.getHsqlBackendIfExists();
 }
Example #4
0
  /*
   * Execute a batch of homogeneous queries, i.e. all reads or all writes.
   */
  VoltTable[] executeSlowHomogeneousBatch(final List<QueuedSQL> batch, final boolean finalTask) {

    BatchState state =
        new BatchState(batch.size(), m_txnState, m_site.getCorrespondingSiteId(), finalTask);

    // iterate over all sql in the batch, filling out the above data structures
    for (int i = 0; i < batch.size(); ++i) {
      QueuedSQL queuedSQL = batch.get(i);

      assert (queuedSQL.stmt != null);

      // Figure out what is needed to resume the proc
      int collectorOutputDepId = m_txnState.getNextDependencyId();
      state.m_depsToResume[i] = collectorOutputDepId;

      // Build the set of params for the frags
      FastSerializer fs = new FastSerializer();
      try {
        fs.writeObject(queuedSQL.params);
      } catch (IOException e) {
        throw new RuntimeException(
            "Error serializing parameters for SQL statement: "
                + queuedSQL.stmt.getText()
                + " with params: "
                + queuedSQL.params.toJSONString(),
            e);
      }
      ByteBuffer params = fs.getBuffer();
      assert (params != null);

      // populate the actual lists of fragments and params
      if (queuedSQL.stmt.catStmt != null) {
        // Pre-planned query.

        int numFrags = queuedSQL.stmt.catStmt.getFragments().size();
        assert (numFrags > 0);
        assert (numFrags <= 2);

        /*
         * This numfrags == 1 code is for routing multi-partition reads of a
         * replicated table to the local site. This was a broken performance
         * optimization. see https://issues.voltdb.com/browse/ENG-1232.
         *
         * The problem is that the fragments for the replicated read are not correctly
         * interleaved with the distributed writes to the replicated table that might
         * be in the same batch of SQL statements. We do end up doing the replicated
         * read locally but we break up the batches in the face of mixed reads and
         * writes
         */
        Iterator<PlanFragment> fragmentIter = queuedSQL.stmt.catStmt.getFragments().iterator();
        if (numFrags == 1) {
          PlanFragment frag = fragmentIter.next();
          state.addFragment(i, frag, params);
        } else {
          // collector/aggregator pair (guaranteed above that numFrags==2 here)
          PlanFragment frag1 = fragmentIter.next();
          assert (frag1 != null);
          PlanFragment frag2 = fragmentIter.next();
          assert (frag2 != null);
          // frags with no deps are usually collector frags that go to all partitions
          // figure out which frag is which type
          if (frag1.getHasdependencies() == false) {
            state.addFragmentPair(i, frag1, frag2, params);
          } else {
            state.addFragmentPair(i, frag2, frag1, params);
          }
        }
      } else {
        /*
         * Unplanned custom query. Requires an attached plan.
         * Set up collector and dependent aggregator fragments.
         */
        SQLStmtPlan plan = queuedSQL.stmt.getPlan();
        assert (plan != null);
        byte[] collectorFragment = plan.getCollectorFragment();
        byte[] aggregatorFragment = plan.getAggregatorFragment();
        assert (aggregatorFragment != null);

        if (collectorFragment == null) {
          // Multi-partition/non-replicated with collector and aggregator.
          state.addCustomFragment(i, aggregatorFragment, params);
        } else {
          // Multi-partition/replicated with just an aggregator fragment.
          state.addCustomFragmentPair(i, collectorFragment, aggregatorFragment, params);
        }
      }
    }

    // instruct the dtxn what's needed to resume the proc
    m_txnState.setupProcedureResume(finalTask, state.m_depsToResume);

    // create all the local work for the transaction
    for (int i = 0; i < state.m_depsForLocalTask.length; i++) {
      if (state.m_depsForLocalTask[i] < 0) continue;
      state.m_localTask.addInputDepId(i, state.m_depsForLocalTask[i]);
    }

    // note: non-transactional work only helps us if it's final work
    m_txnState.createLocalFragmentWork(
        state.m_localTask, state.m_localFragsAreNonTransactional && finalTask);

    if (!state.m_distributedTask.isEmpty()) {
      m_txnState.createAllParticipatingFragmentWork(state.m_distributedTask);
    }

    // recursively call recursableRun and don't allow it to shutdown
    Map<Integer, List<VoltTable>> mapResults = m_site.recursableRun(m_txnState);

    assert (mapResults != null);
    assert (state.m_depsToResume != null);
    assert (state.m_depsToResume.length == batch.size());

    // build an array of answers, assuming one result per expected id
    for (int i = 0; i < batch.size(); i++) {
      List<VoltTable> matchingTablesForId = mapResults.get(state.m_depsToResume[i]);
      assert (matchingTablesForId != null);
      assert (matchingTablesForId.size() == 1);
      state.m_results[i] = matchingTablesForId.get(0);

      // get isReplicated flag from either the catalog statement or the plan,
      // depending on whether it's pre-planned or unplanned
      final SQLStmt stmt = batch.get(i).stmt;
      boolean isReplicated;
      if (stmt.catStmt != null) {
        isReplicated = stmt.catStmt.getReplicatedtabledml();
      } else {
        final SQLStmtPlan plan = stmt.getPlan();
        assert (plan != null);
        isReplicated = plan.isReplicatedTableDML();
      }

      // if replicated divide by the replication factor
      if (isReplicated) {
        long newVal = state.m_results[i].asScalarLong() / m_site.getReplicatedDMLDivisor();
        state.m_results[i] =
            new VoltTable(new VoltTable.ColumnInfo("modified_tuples", VoltType.BIGINT));
        state.m_results[i].addRow(newVal);
      }
    }

    return state.m_results;
  }