/** flush current ops to the given members for the given region */ public static void flushTo(Set<InternalDistributedMember> targets, DistributedRegion region) { DM dm = region.getDistributionManager(); DistributedRegion r = region; boolean initialized = r.isInitialized(); if (initialized) { r.getDistributionAdvisor() .forceNewMembershipVersion(); // force a new "view" so we can track current ops try { r.getDistributionAdvisor().waitForCurrentOperations(); } catch (RegionDestroyedException e) { return; } } // send all state-flush messages and then wait for replies Set<ReplyProcessor21> processors = new HashSet<ReplyProcessor21>(); for (InternalDistributedMember target : targets) { StateStabilizationMessage gr = new StateStabilizationMessage(); gr.isSingleFlushTo = true; // new for flushTo operation gr.requestingMember = dm.getDistributionManagerId(); gr.setRecipient(target); ReplyProcessor21 processor = new ReplyProcessor21(dm, target); gr.processorId = processor.getProcessorId(); gr.channelState = dm.getMembershipManager().getMessageState(target, false); if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP) && ((gr.channelState != null) && (gr.channelState.size() > 0))) { logger.trace( LogMarker.STATE_FLUSH_OP, "channel states: {}", gr.channelStateDescription(gr.channelState)); } if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) { logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", gr); } dm.putOutgoing(gr); processors.add(processor); } for (ReplyProcessor21 processor : processors) { try { processor.waitForReplies(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return; } } }
protected DistributedRegion prepare(boolean isConcurrencyChecksEnabled) { GemFireCacheImpl cache = Fakes.cache(); // create region attributes and internal region arguments RegionAttributes ra = createRegionAttributes(isConcurrencyChecksEnabled); InternalRegionArguments ira = new InternalRegionArguments(); setInternalRegionArguments(ira); // create a region object DistributedRegion region = createAndDefineRegion(isConcurrencyChecksEnabled, ra, ira, cache); if (isConcurrencyChecksEnabled) { region.enableConcurrencyChecks(); } doNothing().when(region).notifyGatewaySender(any(), any()); doReturn(true).when(region).hasSeenEvent(any(EntryEventImpl.class)); return region; }
@Override protected void process(DistributionManager dm) { logger.trace(LogMarker.STATE_FLUSH_OP, "Processing {}", this); if (dm.getDistributionManagerId().equals(relayRecipient)) { // no need to send a relay request to this process - just send the // ack back to the sender StateStabilizedMessage ga = new StateStabilizedMessage(); ga.sendingMember = relayRecipient; ga.setRecipient(this.getSender()); ga.setProcessorId(processorId); dm.putOutgoing(ga); } else { // 1) wait for all messages based on the membership version (or older) // at which the sender "joined" this region to be put on the pipe // 2) record the state of all communication channels from this process // to the relay point // 3) send a stabilization message to the relay point that holds the // communication channel state information StateStabilizationMessage gr = new StateStabilizationMessage(); gr.setRecipient((InternalDistributedMember) relayRecipient); gr.requestingMember = this.getSender(); gr.processorId = processorId; try { Set<DistributedRegion> regions; if (this.allRegions) { regions = getAllRegions(dm); } else { regions = Collections.singleton(this.getRegion(dm)); } for (DistributedRegion r : regions) { if (r == null) { if (logger.isTraceEnabled(LogMarker.DM)) { logger.trace(LogMarker.DM, "Region not found - skipping channel state assessment"); } } if (r != null) { if (this.allRegions && r.doesNotDistribute()) { // no need to flush a region that does no distribution continue; } boolean initialized = r.isInitialized(); if (initialized) { if (this.flushNewOps) { r.getDistributionAdvisor() .forceNewMembershipVersion(); // force a new "view" so we can track current // ops } try { r.getDistributionAdvisor().waitForCurrentOperations(); } catch (RegionDestroyedException e) { // continue with the next region } } boolean useMulticast = r.getMulticastEnabled() && r.getSystem().getConfig().getMcastPort() != 0; if (initialized) { Map channelStates = dm.getMembershipManager().getMessageState(relayRecipient, useMulticast); if (gr.channelState != null) { gr.channelState.putAll(channelStates); } else { gr.channelState = channelStates; } if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP) && ((gr.channelState != null) && (gr.channelState.size() > 0))) { logger.trace( LogMarker.STATE_FLUSH_OP, "channel states: {}", gr.channelStateDescription(gr.channelState)); } } } } } catch (CancelException cce) { // cache is closed - no distribution advisor available for the region so nothing to do but // send the stabilization message } catch (Exception e) { logger.fatal( LocalizedMessage.create( LocalizedStrings .StateFlushOperation_0__EXCEPTION_CAUGHT_WHILE_DETERMINING_CHANNEL_STATE, this), e); } catch (ThreadDeath td) { throw td; } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); // If this ever returns, rethrow the error. We're poisoned // now, so don't let this thread continue. throw err; } catch (Throwable t) { // Whenever you catch Error or Throwable, you must also // catch VirtualMachineError (see above). However, there is // _still_ a possibility that you are dealing with a cascading // error condition, so you also need to check to see if the JVM // is still usable: SystemFailure.checkFailure(); logger.fatal( LocalizedMessage.create( LocalizedStrings .StateFlushOperation_0__THROWABLE_CAUGHT_WHILE_DETERMINING_CHANNEL_STATE, this), t); } finally { if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) { logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", gr); } dm.putOutgoing(gr); } } }
/** * flush state to the given target * * @param recipients The members who may be making state changes to the region. This is typically * taken from a CacheDistributionAdvisor membership set * @param target The member who should have all state flushed to it * @param processorType The execution processor type for the marker message that is sent to all * members using the given region * @param flushNewOps normally only ops that were started before region profile exchange are * flushed. Setting this to true causes the flush to wait for any started after the profile * exchange as well. * @throws InterruptedException If the operation is interrupted, usually for shutdown, an * InterruptedException will be thrown * @return true if the state was flushed, false if not */ public boolean flush( Set recipients, DistributedMember target, int processorType, boolean flushNewOps) throws InterruptedException { Set recips = recipients; // do not use recipients parameter past this point if (Thread.interrupted()) { throw new InterruptedException(); } InternalDistributedMember myId = this.dm.getDistributionManagerId(); if (!recips.contains(target) && !myId.equals(target)) { recips = new HashSet(recipients); recips.add(target); } // partial fix for bug 38773 - ensures that this cache will get both // a cache op and an adjunct message when creating a bucket region // if (recips.size() < 2 && !myId.equals(target)) { // return true; // no state to flush to a single holder of the region // } StateMarkerMessage smm = new StateMarkerMessage(); smm.relayRecipient = target; smm.processorType = processorType; smm.flushNewOps = flushNewOps; if (region == null) { smm.allRegions = true; } else { smm.regionPath = region.getFullPath(); } smm.setRecipients(recips); StateFlushReplyProcessor gfprocessor = new StateFlushReplyProcessor(dm, recips, target); smm.processorId = gfprocessor.getProcessorId(); if (region != null && region.isUsedForPartitionedRegionBucket() && region.getDistributionConfig().getAckSevereAlertThreshold() > 0) { smm.severeAlertEnabled = true; gfprocessor.enableSevereAlertProcessing(); } if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) { logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {} with processor {}", smm, gfprocessor); } Set failures = this.dm.putOutgoing(smm); if (failures != null) { if (failures.contains(target)) { if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) { logger.trace( LogMarker.STATE_FLUSH_OP, "failed to send StateMarkerMessage to target {}; returning from flush without waiting for replies", target); } return false; } gfprocessor.messageNotSentTo(failures); } try { // try { Thread.sleep(100); } catch (InterruptedException e) { // Thread.currentThread().interrupt(); } // DEBUGGING - stall before getting membership to // increase odds that target has left gfprocessor.waitForReplies(); if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) { logger.trace(LogMarker.STATE_FLUSH_OP, "Finished processing {}", smm); } } catch (ReplyException re) { logger.warn( LocalizedMessage.create( LocalizedStrings.StateFlushOperation_STATE_FLUSH_TERMINATED_WITH_EXCEPTION), re); return false; } return true; }
/** * Constructor for StateFlushOperation * * @param r The region whose state is to be flushed */ public StateFlushOperation(DistributedRegion r) { this.region = r; this.dm = r.getDistributionManager(); }