Пример #1
0
  @Override
  protected void onStateChanged() {
    boolean isLeader = isLeader();
    boolean hasLeader = hasLeader();
    changeSupport.onLeadershipChange(isLeader, hasLeader);
    treeChangeSupport.onLeadershipChange(isLeader, hasLeader);

    // If this actor is no longer the leader close all the transaction chains
    if (!isLeader) {
      if (LOG.isDebugEnabled()) {
        LOG.debug(
            "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
            persistenceId(),
            getId());
      }

      store.closeAllTransactionChains();

      commitCoordinator.abortPendingTransactions(
          "The transacton was aborted due to inflight leadership change.", this);
    }

    if (hasLeader && !isIsolatedLeader()) {
      messageRetrySupport.retryMessages();
    }
  }
Пример #2
0
  private void handleForwardedReadyTransaction(ForwardedReadyTransaction forwardedReady) {
    LOG.debug(
        "{}: handleForwardedReadyTransaction for {}",
        persistenceId(),
        forwardedReady.getTransactionID());

    boolean isLeaderActive = isLeaderActive();
    if (isLeader() && isLeaderActive) {
      commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
    } else {
      ActorSelection leader = getLeader();
      if (!isLeaderActive || leader == null) {
        messageRetrySupport.addMessageToRetry(
            forwardedReady,
            getSender(),
            "Could not commit transaction " + forwardedReady.getTransactionID());
      } else {
        LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);

        ReadyLocalTransaction readyLocal =
            new ReadyLocalTransaction(
                forwardedReady.getTransactionID(),
                forwardedReady.getTransaction().getSnapshot(),
                forwardedReady.isDoImmediateCommit());
        readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
        leader.forward(readyLocal, getContext());
      }
    }
  }
Пример #3
0
  private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
    LOG.debug(
        "{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());

    boolean isLeaderActive = isLeaderActive();
    if (isLeader() && isLeaderActive) {
      try {
        commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
      } catch (Exception e) {
        LOG.error(
            "{}: Error handling ReadyLocalTransaction for Tx {}",
            persistenceId(),
            message.getTransactionID(),
            e);
        getSender().tell(new akka.actor.Status.Failure(e), getSelf());
      }
    } else {
      ActorSelection leader = getLeader();
      if (!isLeaderActive || leader == null) {
        messageRetrySupport.addMessageToRetry(
            message, getSender(), "Could not commit transaction " + message.getTransactionID());
      } else {
        LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
        message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
        leader.forward(message, getContext());
      }
    }
  }
Пример #4
0
  private void handleBatchedModifications(BatchedModifications batched) {
    // This message is sent to prepare the modifications transaction directly on the Shard as an
    // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
    // BatchedModifications message, the caller sets the ready flag in the message indicating
    // modifications are complete. The reply contains the cohort actor path (this actor) for the
    // caller
    // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
    // ReadyTransaction message.

    // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
    // normally get here if we're not the leader as the front-end (TransactionProxy) should
    // determine
    // the primary/leader shard. However with timing and caching on the front-end, there's a small
    // window where it could have a stale leader during leadership transitions.
    //
    boolean isLeaderActive = isLeaderActive();
    if (isLeader() && isLeaderActive) {
      handleBatchedModificationsLocal(batched, getSender());
    } else {
      ActorSelection leader = getLeader();
      if (!isLeaderActive || leader == null) {
        messageRetrySupport.addMessageToRetry(
            batched, getSender(), "Could not commit transaction " + batched.getTransactionID());
      } else {
        // TODO: what if this is not the first batch and leadership changed in between batched
        // messages?
        // We could check if the commitCoordinator already has a cached entry and forward all the
        // previous
        // batched modifications.
        LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
        leader.forward(batched, getContext());
      }
    }
  }
Пример #5
0
  @Override
  protected void onLeaderChanged(String oldLeader, String newLeader) {
    shardMBean.incrementLeadershipChangeCount();

    if (hasLeader() && !isIsolatedLeader()) {
      messageRetrySupport.retryMessages();
    }
  }
Пример #6
0
  @Override
  public void postStop() {
    LOG.info("Stopping Shard {}", persistenceId());

    super.postStop();

    messageRetrySupport.close();

    if (txCommitTimeoutCheckSchedule != null) {
      txCommitTimeoutCheckSchedule.cancel();
    }

    commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);

    shardMBean.unregisterMBean();
  }
Пример #7
0
  @Override
  public void onReceiveCommand(final Object message) throws Exception {

    MessageTracker.Context context = appendEntriesReplyTracker.received(message);

    if (context.error().isPresent()) {
      LOG.trace(
          "{} : AppendEntriesReply failed to arrive at the expected interval {}",
          persistenceId(),
          context.error());
    }

    try {
      if (CreateTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
        handleCreateTransaction(message);
      } else if (BatchedModifications.class.isInstance(message)) {
        handleBatchedModifications((BatchedModifications) message);
      } else if (message instanceof ForwardedReadyTransaction) {
        handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
      } else if (message instanceof ReadyLocalTransaction) {
        handleReadyLocalTransaction((ReadyLocalTransaction) message);
      } else if (CanCommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
        handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
      } else if (CommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
        handleCommitTransaction(CommitTransaction.fromSerializable(message));
      } else if (AbortTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
        handleAbortTransaction(AbortTransaction.fromSerializable(message));
      } else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
        closeTransactionChain(CloseTransactionChain.fromSerializable(message));
      } else if (message instanceof RegisterChangeListener) {
        changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
      } else if (message instanceof RegisterDataTreeChangeListener) {
        treeChangeSupport.onMessage(
            (RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
      } else if (message instanceof UpdateSchemaContext) {
        updateSchemaContext((UpdateSchemaContext) message);
      } else if (message instanceof PeerAddressResolved) {
        PeerAddressResolved resolved = (PeerAddressResolved) message;
        setPeerAddress(resolved.getPeerId().toString(), resolved.getPeerAddress());
      } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
        commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
      } else if (message instanceof DatastoreContext) {
        onDatastoreContext((DatastoreContext) message);
      } else if (message instanceof RegisterRoleChangeListener) {
        roleChangeNotifier.get().forward(message, context());
      } else if (message instanceof FollowerInitialSyncUpStatus) {
        shardMBean.setFollowerInitialSyncStatus(
            ((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
        context().parent().tell(message, self());
      } else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) {
        sender().tell(getShardMBean(), self());
      } else if (message instanceof GetShardDataTree) {
        sender().tell(store.getDataTree(), self());
      } else if (message instanceof ServerRemoved) {
        context().parent().forward(message, context());
      } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
        messageRetrySupport.onTimerMessage(message);
      } else {
        super.onReceiveCommand(message);
      }
    } finally {
      context.done();
    }
  }