private void processRejoiningFragmentWork(
      FragmentTaskMessage ftask, HashMap<Integer, List<VoltTable>> dependencies) {
    assert (ftask.getFragmentCount() > 0);
    assert (m_rejoinState == RejoinState.REJOINING);

    FragmentResponseMessage response = new FragmentResponseMessage(ftask, m_hsId);
    response.setRecovering(true);
    response.setStatus(FragmentResponseMessage.SUCCESS, null);

    // log the work done for replay
    if (!ftask.isReadOnly() && !ftask.isSysProcTask()) {
      assert (m_notice.isReadOnly() == false);
      assert (m_loggedFragments != null);
      m_loggedFragments.appendFragmentTask(ftask);
    }

    // add a dummy table for all of the expected dependency ids
    for (int i = 0; i < ftask.getFragmentCount(); i++) {
      response.addDependency(
          ftask.getOutputDepId(i),
          new VoltTable(new VoltTable.ColumnInfo("DUMMY", VoltType.BIGINT)));
    }

    m_mbox.send(response.getDestinationSiteId(), response);

    // If we're not the coordinator, the transaction is read-only,
    // and this was the final task, then we can try to move on after
    // we've finished this work.
    if (!isCoordinator() && isReadOnly() && ftask.isFinalTask()) {
      m_done = true;
    }
  }
  void processFragmentWork(
      FragmentTaskMessage ftask, HashMap<Integer, List<VoltTable>> dependencies) {
    assert (ftask.getFragmentCount() > 0);

    FragmentResponseMessage response = m_site.processFragmentTask(this, dependencies, ftask);
    if (response.getStatusCode() != FragmentResponseMessage.SUCCESS) {
      if (m_missingDependencies != null) m_missingDependencies.clear();
      m_readyWorkUnits.clear();

      if (m_isCoordinator) {
        // throw an exception which will back the runtime all the way
        // to the stored procedure invocation call, triggering undo
        // at that point
        if (response.getException() != null) {
          throw response.getException();
        } else {
          throw new FragmentFailureException();
        }
      } else {
        m_needsRollback = true;
        m_done = true;
      }
    }

    if (m_isCoordinator && (response.getDestinationSiteId() == response.getExecutorSiteId())) {
      processFragmentResponseDependencies(response);
    } else {
      m_mbox.send(response.getDestinationSiteId(), response);
      // If we're not the coordinator, the transaction is read-only,
      // and this was the final task, then we can try to move on after
      // we've finished this work.
      if (!isCoordinator() && isReadOnly() && ftask.isFinalTask()) {
        m_done = true;
      }
    }
  }
示例#3
0
  // Eventually, the master for a partition set will need to be able to dedupe
  // FragmentResponses from its replicas.
  public void handleFragmentResponseMessage(FragmentResponseMessage message) {
    // Send the message to the duplicate counter, if any
    DuplicateCounter counter =
        m_duplicateCounters.get(new DuplicateCounterKey(message.getTxnId(), message.getSpHandle()));
    if (counter != null) {
      int result = counter.offer(message);
      if (result == DuplicateCounter.DONE) {
        m_duplicateCounters.remove(
            new DuplicateCounterKey(message.getTxnId(), message.getSpHandle()));
        m_repairLogTruncationHandle = message.getSpHandle();
        FragmentResponseMessage resp = (FragmentResponseMessage) counter.getLastResponse();
        // MPI is tracking deps per partition HSID.  We need to make
        // sure we write ours into the message getting sent to the MPI
        resp.setExecutorSiteId(m_mailbox.getHSId());
        m_mailbox.send(counter.m_destinationId, resp);
      } else if (result == DuplicateCounter.MISMATCH) {
        VoltDB.crashLocalVoltDB("HASH MISMATCH running multi-part procedure.", true, null);
      }
      // doing duplicate suppresion: all done.
      return;
    }

    m_mailbox.send(message.getDestinationSiteId(), message);
  }