private void sendCommitCommand( TxInvocationContext ctx, CommitCommand command, Collection<Address> preparedOn) throws TimeoutException, InterruptedException { // we only send the commit command to the nodes that Collection<Address> recipients = dm.getAffectedNodes(ctx.getAffectedKeys()); // By default, use the configured commit sync settings boolean syncCommitPhase = configuration.isSyncCommitPhase(); for (Address a : preparedOn) { if (!recipients.contains(a)) { // However if we have prepared on some nodes and are now committing on different nodes, make // sure we // force sync commit so we can respond to prepare resend requests. syncCommitPhase = true; } } Map<Address, Response> responses = rpcManager.invokeRemotely(recipients, command, syncCommitPhase, true); if (!responses.isEmpty()) { List<Address> resendTo = new LinkedList<Address>(); for (Map.Entry<Address, Response> r : responses.entrySet()) { if (needToResendPrepare(r.getValue())) resendTo.add(r.getKey()); } if (!resendTo.isEmpty()) { log.debugf( "Need to resend prepares for %s to %s", command.getGlobalTransaction(), resendTo); PrepareCommand pc = buildPrepareCommandForResend(ctx, command); rpcManager.invokeRemotely(resendTo, pc, true, true); } } }
@Override public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable { Object retVal = invokeNextInterceptor(ctx, command); boolean sync = isSynchronous(ctx); if (shouldInvokeRemoteTxCommand(ctx)) { int newCacheViewId = -1; stateTransferLock.waitForStateTransferToEnd(ctx, command, newCacheViewId); if (command.isOnePhaseCommit()) flushL1Caches(ctx); // if we are one-phase, don't block on this future. Collection<Address> recipients = dm.getAffectedNodes(ctx.getAffectedKeys()); prepareOnAffectedNodes(ctx, command, recipients, sync); ((LocalTxInvocationContext) ctx).remoteLocksAcquired(recipients); } else if (isL1CacheEnabled && command.isOnePhaseCommit() && !ctx.isOriginLocal() && !ctx.getLockedKeys().isEmpty()) { // We fall into this block if we are a remote node, happen to be the primary data owner and // have locked keys. // it is still our responsibility to invalidate L1 caches in the cluster. flushL1Caches(ctx); } return retVal; }
@Override public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command) throws Throwable { if (ctx.isOriginLocal()) { int newCacheViewId = -1; stateTransferLock.waitForStateTransferToEnd(ctx, command, newCacheViewId); final Collection<Address> affectedNodes = dm.getAffectedNodes(command.getKeys()); ((LocalTxInvocationContext) ctx).remoteLocksAcquired(affectedNodes); rpcManager.invokeRemotely(affectedNodes, command, true, true); } return invokeNextInterceptor(ctx, command); }
@Override public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) throws Throwable { if (shouldInvokeRemoteTxCommand(ctx)) { rpcManager.invokeRemotely( dm.getAffectedNodes(ctx.getAffectedKeys()), command, configuration.isSyncRollbackPhase(), true); } return invokeNextInterceptor(ctx, command); }
/** * If we are within one transaction we won't do any replication as replication would only be * performed at commit time. If the operation didn't originate locally we won't do any replication * either. */ private Object handleWriteCommand( InvocationContext ctx, WriteCommand command, RecipientGenerator recipientGenerator, boolean skipRemoteGet, boolean skipL1Invalidation) throws Throwable { // see if we need to load values from remote srcs first if (ctx.isOriginLocal() && !skipRemoteGet) remoteGetBeforeWrite(ctx, command.isConditional(), recipientGenerator); boolean sync = isSynchronous(ctx); // if this is local mode then skip distributing if (isLocalModeForced(ctx)) { return invokeNextInterceptor(ctx, command); } // FIRST pass this call up the chain. Only if it succeeds (no exceptions) locally do we attempt // to distribute. Object returnValue = invokeNextInterceptor(ctx, command); if (command.isSuccessful()) { if (!ctx.isInTxScope()) { NotifyingNotifiableFuture<Object> futureToReturn = null; Future<?> invalidationFuture = null; if (ctx.isOriginLocal()) { int newCacheViewId = -1; stateTransferLock.waitForStateTransferToEnd(ctx, command, newCacheViewId); List<Address> rec = recipientGenerator.generateRecipients(); int numCallRecipients = rec == null ? 0 : rec.size(); if (trace) log.tracef("Invoking command %s on hosts %s", command, rec); boolean useFuture = ctx.isUseFutureReturnType(); if (isL1CacheEnabled && !skipL1Invalidation) // Handle the case where the put is local. If in unicast mode and this is not a data // owner, nothing happens. If in multicast mode, we this node will send the multicast if (rpcManager.getTransport().getMembers().size() > numCallRecipients) { // Command was successful, we have a number of receipients and L1 should be flushed, // so request any L1 invalidations from this node if (trace) log.tracef( "Put occuring on node, requesting L1 cache invalidation for keys %s. Other data owners are %s", command.getAffectedKeys(), dm.getAffectedNodes(command.getAffectedKeys())); if (useFuture) { futureToReturn = l1Manager.flushCache( recipientGenerator.getKeys(), returnValue, ctx.getOrigin(), !(command instanceof RemoveCommand)); } else { invalidationFuture = l1Manager.flushCacheWithSimpleFuture( recipientGenerator.getKeys(), returnValue, ctx.getOrigin(), !(command instanceof RemoveCommand)); } } else { if (trace) log.tracef("Not performing invalidation! numCallRecipients=%s", numCallRecipients); } if (!isSingleOwnerAndLocal(recipientGenerator)) { if (useFuture) { if (futureToReturn == null) futureToReturn = new NotifyingFutureImpl(returnValue); rpcManager.invokeRemotelyInFuture(rec, command, futureToReturn); return futureToReturn; } else { rpcManager.invokeRemotely(rec, command, sync); } } else if (useFuture && futureToReturn != null) { return futureToReturn; } if (invalidationFuture != null && sync) { invalidationFuture.get(); // wait for the inval command to complete if (trace) log.tracef("Finished invalidating keys %s ", recipientGenerator.getKeys()); } } else { // Piggyback remote puts and cause L1 invalidations if (isL1CacheEnabled && !skipL1Invalidation) { // Command was successful and L1 should be flushed, so request any L1 invalidations from // this node if (trace) log.tracef( "Put occuring on node, requesting cache invalidation for keys %s. Origin of command is remote", command.getAffectedKeys()); // If this is a remove command, then don't pass in the origin - since the entru would be // removed from the origin's L1 cache. invalidationFuture = l1Manager.flushCacheWithSimpleFuture( recipientGenerator.getKeys(), returnValue, ctx.getOrigin(), !(command instanceof RemoveCommand)); if (sync) { invalidationFuture.get(); // wait for the inval command to complete if (trace) log.tracef("Finished invalidating keys %s ", recipientGenerator.getKeys()); } } } } } return returnValue; }