コード例 #1
0
 private DistributedRegion getRegion(DistributionManager dm) {
   if (region != null) {
     return region;
   }
   // set the init level requirement so that we don't hang in CacheFactory.getInstance() (bug
   // 36175)
   int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
   try {
     GemFireCacheImpl gfc = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem());
     Region r = gfc.getRegionByPathForProcessing(this.regionPath);
     if (r instanceof DistributedRegion) {
       region = (DistributedRegion) r;
     }
   } finally {
     LocalRegion.setThreadInitLevelRequirement(oldLevel);
   }
   return region;
 }
コード例 #2
0
 /** returns a set of all DistributedRegions for allRegions processing */
 private Set<DistributedRegion> getAllRegions(DistributionManager dm) {
   // set the init level requirement so that we don't hang in CacheFactory.getInstance() (bug
   // 36175)
   int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
   try {
     GemFireCacheImpl gfc = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem());
     Set<DistributedRegion> result = new HashSet();
     for (LocalRegion r : gfc.getAllRegions()) {
       // it's important not to check if the cache is closing, so access
       // the isDestroyed boolean directly
       if (r instanceof DistributedRegion && !r.isDestroyed) {
         result.add((DistributedRegion) r);
       }
     }
     return result;
   } finally {
     LocalRegion.setThreadInitLevelRequirement(oldLevel);
   }
 }
コード例 #3
0
  /**
   * Upon receipt of the message, both process the message and send an acknowledgement, not
   * necessarily in that order. Note: Any hang in this message may cause a distributed deadlock for
   * those threads waiting for an acknowledgement.
   *
   * @throws PartitionedRegionException if the region does not exist (typically, if it has been
   *     destroyed)
   */
  @Override
  public void process(final DistributionManager dm) {
    Throwable thr = null;
    boolean sendReply = true;
    LocalRegion r = null;
    long startTime = 0;
    try {
      if (checkCacheClosing(dm) || checkDSClosing(dm)) {
        thr =
            new CacheClosedException(
                LocalizedStrings.PartitionMessage_REMOTE_CACHE_IS_CLOSED_0.toLocalizedString(
                    dm.getId()));
        return;
      }
      GemFireCacheImpl gfc = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem());
      r = gfc.getRegionByPathForProcessing(this.regionPath);
      if (r == null && failIfRegionMissing()) {
        // if the distributed system is disconnecting, don't send a reply saying
        // the partitioned region can't be found (bug 36585)
        thr =
            new RegionDestroyedException(
                LocalizedStrings.RemoteOperationMessage_0_COULD_NOT_FIND_REGION_1.toLocalizedString(
                    new Object[] {dm.getDistributionManagerId(), regionPath}),
                regionPath);
        return; // reply sent in finally block below
      }

      thr = UNHANDLED_EXCEPTION;

      // [bruce] r might be null here, so we have to go to the cache instance to get the txmgr
      TXManagerImpl txMgr = GemFireCacheImpl.getInstance().getTxManager();
      TXStateProxy tx = null;
      try {
        tx = txMgr.masqueradeAs(this);
        sendReply = operateOnRegion(dm, r, startTime);
      } finally {
        txMgr.unmasquerade(tx);
      }
      thr = null;

    } catch (RemoteOperationException fre) {
      thr = fre;
    } catch (DistributedSystemDisconnectedException se) {
      // bug 37026: this is too noisy...
      //      throw new CacheClosedException("remote system shutting down");
      //      thr = se; cache is closed, no point trying to send a reply
      thr = null;
      sendReply = false;
      if (logger.isDebugEnabled()) {
        logger.debug("shutdown caught, abandoning message: {}", se.getMessage(), se);
      }
    } catch (RegionDestroyedException rde) {
      // [bruce] RDE does not always mean that the sender's region is also
      //         destroyed, so we must send back an exception.  If the sender's
      //         region is also destroyed, who cares if we send it an exception
      // if (pr != null && pr.isClosed) {
      thr =
          new ForceReattemptException(
              LocalizedStrings.PartitionMessage_REGION_IS_DESTROYED_IN_0.toLocalizedString(
                  dm.getDistributionManagerId()),
              rde);
      // }
    } catch (VirtualMachineError err) {
      SystemFailure.initiateFailure(err);
      // If this ever returns, rethrow the error.  We're poisoned
      // now, so don't let this thread continue.
      throw err;
    } catch (Throwable t) {
      // Whenever you catch Error or Throwable, you must also
      // catch VirtualMachineError (see above).  However, there is
      // _still_ a possibility that you are dealing with a cascading
      // error condition, so you also need to check to see if the JVM
      // is still usable:
      SystemFailure.checkFailure();
      // log the exception at fine level if there is no reply to the message
      thr = null;
      if (sendReply) {
        if (!checkDSClosing(dm)) {
          thr = t;
        } else {
          // don't pass arbitrary runtime exceptions and errors back if this
          // cache/vm is closing
          thr =
              new ForceReattemptException(
                  LocalizedStrings.PartitionMessage_DISTRIBUTED_SYSTEM_IS_DISCONNECTING
                      .toLocalizedString());
        }
      }
      if (logger.isTraceEnabled(LogMarker.DM) && (t instanceof RuntimeException)) {
        logger.trace(LogMarker.DM, "Exception caught while processing message", t);
      }
    } finally {
      if (sendReply) {
        ReplyException rex = null;

        if (thr != null) {
          // don't transmit the exception if this message was to a listener
          // and this listener is shutting down
          rex = new ReplyException(thr);
        }

        // Send the reply if the operateOnPartitionedRegion returned true
        sendReply(getSender(), this.processorId, dm, rex, r, startTime);
      }
    }
  }