/* * For Distributed Tx */ private void setIfTransactionDistributed() { GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); if (cache != null) { if (cache.getTxManager() != null) { this.isTransactionDistributed = cache.getTxManager().isDistributed(); } } }
private DistributedRegion getRegion(DistributionManager dm) { if (region != null) { return region; } // set the init level requirement so that we don't hang in CacheFactory.getInstance() (bug // 36175) int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE); try { GemFireCacheImpl gfc = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem()); Region r = gfc.getRegionByPathForProcessing(this.regionPath); if (r instanceof DistributedRegion) { region = (DistributedRegion) r; } } finally { LocalRegion.setThreadInitLevelRequirement(oldLevel); } return region; }
/** returns a set of all DistributedRegions for allRegions processing */ private Set<DistributedRegion> getAllRegions(DistributionManager dm) { // set the init level requirement so that we don't hang in CacheFactory.getInstance() (bug // 36175) int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE); try { GemFireCacheImpl gfc = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem()); Set<DistributedRegion> result = new HashSet(); for (LocalRegion r : gfc.getAllRegions()) { // it's important not to check if the cache is closing, so access // the isDestroyed boolean directly if (r instanceof DistributedRegion && !r.isDestroyed) { result.add((DistributedRegion) r); } } return result; } finally { LocalRegion.setThreadInitLevelRequirement(oldLevel); } }
public void validateCacheConfig(GemFireCacheImpl cacheInstance) { // To fix bug 44961 only validate our attributes against the existing cache // if they have been explicitly set by the set. // So all the following "ifs" check that "*UserSet" is true. // If they have not then we might use a cache.xml that will specify them. // Since we don't have the cache.xml info here we need to only complain // if we are sure that we will be incompatible with the existing cache. if (this.pdxReadSerializedUserSet && this.pdxReadSerialized != cacheInstance.getPdxReadSerialized()) { throw new IllegalStateException( LocalizedStrings.CacheFactory_0_EXISTING_CACHE_WITH_DIFFERENT_CACHE_CONFIG .toLocalizedString("pdxReadSerialized: " + cacheInstance.getPdxReadSerialized())); } if (this.pdxDiskStoreUserSet && !equals(this.pdxDiskStore, cacheInstance.getPdxDiskStore())) { throw new IllegalStateException( LocalizedStrings.CacheFactory_0_EXISTING_CACHE_WITH_DIFFERENT_CACHE_CONFIG .toLocalizedString("pdxDiskStore: " + cacheInstance.getPdxDiskStore())); } if (this.pdxPersistentUserSet && this.pdxPersistent != cacheInstance.getPdxPersistent()) { throw new IllegalStateException( LocalizedStrings.CacheFactory_0_EXISTING_CACHE_WITH_DIFFERENT_CACHE_CONFIG .toLocalizedString("pdxPersistent: " + cacheInstance.getPdxPersistent())); } if (this.pdxIgnoreUnreadFieldsUserSet && this.pdxIgnoreUnreadFields != cacheInstance.getPdxIgnoreUnreadFields()) { throw new IllegalStateException( LocalizedStrings.CacheFactory_0_EXISTING_CACHE_WITH_DIFFERENT_CACHE_CONFIG .toLocalizedString( "pdxIgnoreUnreadFields: " + cacheInstance.getPdxIgnoreUnreadFields())); } if (this.pdxSerializerUserSet && !samePdxSerializer(this.pdxSerializer, cacheInstance.getPdxSerializer())) { throw new IllegalStateException( LocalizedStrings.CacheFactory_0_EXISTING_CACHE_WITH_DIFFERENT_CACHE_CONFIG .toLocalizedString("pdxSerializer: " + cacheInstance.getPdxSerializer())); } }
/** * Validates colocation of PartitionRegion <br> * This method used to be called when the RegionAttributes were created. But this was too early * since the region we are colocated with might not exist (yet). So it is now called when the PR * using these attributes is created. See bug 47197. * * <p>1. region passed in setColocatedWith should exist.<br> * 2. region passed should be of a PartitionedRegion <br> * 3. Custom partitioned should be enabled for colocated regions <br> * 4. totalNumBuckets should be same for colocated regions<br> * 5. redundancy of colocated regions should be same<br> * * @since 5.8Beta */ void validateColocation() { if (this.colocatedRegionName == null) { return; } Cache cache = GemFireCacheImpl.getInstance(); if (cache != null) { Region<?, ?> region = cache.getRegion(this.colocatedRegionName); { if (region == null) { throw new IllegalStateException( LocalizedStrings .PartitionAttributesImpl_REGION_SPECIFIED_IN_COLOCATEDWITH_IS_NOT_PRESENT_IT_SHOULD_BE_CREATED_BEFORE_SETTING_COLOCATED_WITH_THIS_REGION .toLocalizedString()); } if (!(region instanceof PartitionedRegion)) { throw new IllegalStateException( LocalizedStrings .PartitionAttributesImpl_SETTING_THE_ATTRIBUTE_COLOCATEDWITH_IS_SUPPORTED_ONLY_FOR_PARTITIONEDREGIONS .toLocalizedString()); } PartitionedRegion colocatedRegion = (PartitionedRegion) region; if (this.getTotalNumBuckets() != colocatedRegion.getPartitionAttributes().getTotalNumBuckets()) { throw new IllegalStateException( LocalizedStrings .PartitionAttributesImpl_CURRENT_PARTITIONEDREGIONS_TOTALNUMBUCKETS_SHOULD_BE_SAME_AS_TOTALNUMBUCKETS_OF_COLOCATED_PARTITIONEDREGION .toLocalizedString()); } if (this.getRedundancy() != colocatedRegion.getPartitionAttributes().getRedundantCopies()) { throw new IllegalStateException( LocalizedStrings .PartitionAttributesImpl_CURRENT_PARTITIONEDREGIONS_REDUNDANCY_SHOULD_BE_SAME_AS_THE_REDUNDANCY_OF_COLOCATED_PARTITIONEDREGION .toLocalizedString()); } } } }
/** * Upon receipt of the message, both process the message and send an acknowledgement, not * necessarily in that order. Note: Any hang in this message may cause a distributed deadlock for * those threads waiting for an acknowledgement. * * @throws PartitionedRegionException if the region does not exist (typically, if it has been * destroyed) */ @Override public void process(final DistributionManager dm) { Throwable thr = null; boolean sendReply = true; LocalRegion r = null; long startTime = 0; try { if (checkCacheClosing(dm) || checkDSClosing(dm)) { thr = new CacheClosedException( LocalizedStrings.PartitionMessage_REMOTE_CACHE_IS_CLOSED_0.toLocalizedString( dm.getId())); return; } GemFireCacheImpl gfc = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem()); r = gfc.getRegionByPathForProcessing(this.regionPath); if (r == null && failIfRegionMissing()) { // if the distributed system is disconnecting, don't send a reply saying // the partitioned region can't be found (bug 36585) thr = new RegionDestroyedException( LocalizedStrings.RemoteOperationMessage_0_COULD_NOT_FIND_REGION_1.toLocalizedString( new Object[] {dm.getDistributionManagerId(), regionPath}), regionPath); return; // reply sent in finally block below } thr = UNHANDLED_EXCEPTION; // [bruce] r might be null here, so we have to go to the cache instance to get the txmgr TXManagerImpl txMgr = GemFireCacheImpl.getInstance().getTxManager(); TXStateProxy tx = null; try { tx = txMgr.masqueradeAs(this); sendReply = operateOnRegion(dm, r, startTime); } finally { txMgr.unmasquerade(tx); } thr = null; } catch (RemoteOperationException fre) { thr = fre; } catch (DistributedSystemDisconnectedException se) { // bug 37026: this is too noisy... // throw new CacheClosedException("remote system shutting down"); // thr = se; cache is closed, no point trying to send a reply thr = null; sendReply = false; if (logger.isDebugEnabled()) { logger.debug("shutdown caught, abandoning message: {}", se.getMessage(), se); } } catch (RegionDestroyedException rde) { // [bruce] RDE does not always mean that the sender's region is also // destroyed, so we must send back an exception. If the sender's // region is also destroyed, who cares if we send it an exception // if (pr != null && pr.isClosed) { thr = new ForceReattemptException( LocalizedStrings.PartitionMessage_REGION_IS_DESTROYED_IN_0.toLocalizedString( dm.getDistributionManagerId()), rde); // } } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); // If this ever returns, rethrow the error. We're poisoned // now, so don't let this thread continue. throw err; } catch (Throwable t) { // Whenever you catch Error or Throwable, you must also // catch VirtualMachineError (see above). However, there is // _still_ a possibility that you are dealing with a cascading // error condition, so you also need to check to see if the JVM // is still usable: SystemFailure.checkFailure(); // log the exception at fine level if there is no reply to the message thr = null; if (sendReply) { if (!checkDSClosing(dm)) { thr = t; } else { // don't pass arbitrary runtime exceptions and errors back if this // cache/vm is closing thr = new ForceReattemptException( LocalizedStrings.PartitionMessage_DISTRIBUTED_SYSTEM_IS_DISCONNECTING .toLocalizedString()); } } if (logger.isTraceEnabled(LogMarker.DM) && (t instanceof RuntimeException)) { logger.trace(LogMarker.DM, "Exception caught while processing message", t); } } finally { if (sendReply) { ReplyException rex = null; if (thr != null) { // don't transmit the exception if this message was to a listener // and this listener is shutting down rex = new ReplyException(thr); } // Send the reply if the operateOnPartitionedRegion returned true sendReply(getSender(), this.processorId, dm, rex, r, startTime); } } }
/** check to see if the cache is closing */ public final boolean checkCacheClosing(DistributionManager dm) { GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); // return (cache != null && cache.isClosed()); return cache == null || cache.isClosed(); }