/** flush current ops to the given members for the given region */ public static void flushTo(Set<InternalDistributedMember> targets, DistributedRegion region) { DM dm = region.getDistributionManager(); DistributedRegion r = region; boolean initialized = r.isInitialized(); if (initialized) { r.getDistributionAdvisor() .forceNewMembershipVersion(); // force a new "view" so we can track current ops try { r.getDistributionAdvisor().waitForCurrentOperations(); } catch (RegionDestroyedException e) { return; } } // send all state-flush messages and then wait for replies Set<ReplyProcessor21> processors = new HashSet<ReplyProcessor21>(); for (InternalDistributedMember target : targets) { StateStabilizationMessage gr = new StateStabilizationMessage(); gr.isSingleFlushTo = true; // new for flushTo operation gr.requestingMember = dm.getDistributionManagerId(); gr.setRecipient(target); ReplyProcessor21 processor = new ReplyProcessor21(dm, target); gr.processorId = processor.getProcessorId(); gr.channelState = dm.getMembershipManager().getMessageState(target, false); if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP) && ((gr.channelState != null) && (gr.channelState.size() > 0))) { logger.trace( LogMarker.STATE_FLUSH_OP, "channel states: {}", gr.channelStateDescription(gr.channelState)); } if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) { logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", gr); } dm.putOutgoing(gr); processors.add(processor); } for (ReplyProcessor21 processor : processors) { try { processor.waitForReplies(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return; } } }
@Override protected void process(DistributionManager dm) { logger.trace(LogMarker.STATE_FLUSH_OP, "Processing {}", this); if (dm.getDistributionManagerId().equals(relayRecipient)) { // no need to send a relay request to this process - just send the // ack back to the sender StateStabilizedMessage ga = new StateStabilizedMessage(); ga.sendingMember = relayRecipient; ga.setRecipient(this.getSender()); ga.setProcessorId(processorId); dm.putOutgoing(ga); } else { // 1) wait for all messages based on the membership version (or older) // at which the sender "joined" this region to be put on the pipe // 2) record the state of all communication channels from this process // to the relay point // 3) send a stabilization message to the relay point that holds the // communication channel state information StateStabilizationMessage gr = new StateStabilizationMessage(); gr.setRecipient((InternalDistributedMember) relayRecipient); gr.requestingMember = this.getSender(); gr.processorId = processorId; try { Set<DistributedRegion> regions; if (this.allRegions) { regions = getAllRegions(dm); } else { regions = Collections.singleton(this.getRegion(dm)); } for (DistributedRegion r : regions) { if (r == null) { if (logger.isTraceEnabled(LogMarker.DM)) { logger.trace(LogMarker.DM, "Region not found - skipping channel state assessment"); } } if (r != null) { if (this.allRegions && r.doesNotDistribute()) { // no need to flush a region that does no distribution continue; } boolean initialized = r.isInitialized(); if (initialized) { if (this.flushNewOps) { r.getDistributionAdvisor() .forceNewMembershipVersion(); // force a new "view" so we can track current // ops } try { r.getDistributionAdvisor().waitForCurrentOperations(); } catch (RegionDestroyedException e) { // continue with the next region } } boolean useMulticast = r.getMulticastEnabled() && r.getSystem().getConfig().getMcastPort() != 0; if (initialized) { Map channelStates = dm.getMembershipManager().getMessageState(relayRecipient, useMulticast); if (gr.channelState != null) { gr.channelState.putAll(channelStates); } else { gr.channelState = channelStates; } if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP) && ((gr.channelState != null) && (gr.channelState.size() > 0))) { logger.trace( LogMarker.STATE_FLUSH_OP, "channel states: {}", gr.channelStateDescription(gr.channelState)); } } } } } catch (CancelException cce) { // cache is closed - no distribution advisor available for the region so nothing to do but // send the stabilization message } catch (Exception e) { logger.fatal( LocalizedMessage.create( LocalizedStrings .StateFlushOperation_0__EXCEPTION_CAUGHT_WHILE_DETERMINING_CHANNEL_STATE, this), e); } catch (ThreadDeath td) { throw td; } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); // If this ever returns, rethrow the error. We're poisoned // now, so don't let this thread continue. throw err; } catch (Throwable t) { // Whenever you catch Error or Throwable, you must also // catch VirtualMachineError (see above). However, there is // _still_ a possibility that you are dealing with a cascading // error condition, so you also need to check to see if the JVM // is still usable: SystemFailure.checkFailure(); logger.fatal( LocalizedMessage.create( LocalizedStrings .StateFlushOperation_0__THROWABLE_CAUGHT_WHILE_DETERMINING_CHANNEL_STATE, this), t); } finally { if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) { logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", gr); } dm.putOutgoing(gr); } } }