コード例 #1
0
 private boolean createDefinedIndexesForPR(
     HashSet<Index> indexes,
     PartitionedRegion region,
     HashSet<IndexCreationData> icds,
     HashMap<String, Exception> exceptionsMap) {
   try {
     indexes.addAll(((PartitionedRegion) region).createIndexes(false, icds));
   } catch (IndexCreationException e1) {
     logger.info(
         LocalizedMessage.create(
             LocalizedStrings
                 .DefaultQueryService_EXCEPTION_WHILE_CREATING_INDEX_ON_PR_DEFAULT_QUERY_PROCESSOR),
         e1);
   } catch (CacheException e1) {
     logger.info(
         LocalizedMessage.create(
             LocalizedStrings
                 .DefaultQueryService_EXCEPTION_WHILE_CREATING_INDEX_ON_PR_DEFAULT_QUERY_PROCESSOR),
         e1);
     return true;
   } catch (ForceReattemptException e1) {
     logger.info(
         LocalizedMessage.create(
             LocalizedStrings
                 .DefaultQueryService_EXCEPTION_WHILE_CREATING_INDEX_ON_PR_DEFAULT_QUERY_PROCESSOR),
         e1);
     return true;
   } catch (MultiIndexCreationException e) {
     exceptionsMap.putAll(e.getExceptionsMap());
     return true;
   }
   return false;
 }
コード例 #2
0
    @Override
    public void run2() {
      if (pool.getCancelCriterion().cancelInProgress() != null) {
        return;
      }
      synchronized (recoveryScheduledLock) {
        recoveryScheduled = false;
      }
      Object[] objects = InternalInstantiator.getInstantiatorsForSerialization();
      if (objects.length == 0) {
        return;
      }
      EventID eventId = InternalInstantiator.generateEventId();
      // Fix for bug:40930
      if (eventId == null) {
        background.schedule(new RecoveryTask(), pingInterval, TimeUnit.MILLISECONDS);
        recoveryScheduled = true;
      } else {
        try {
          RegisterInstantiatorsOp.execute(pool, objects, eventId);
        } catch (CancelException e) {
          throw e;
        } catch (RejectedExecutionException e) {
          // This is probably because we've started to shut down.
          pool.getCancelCriterion().checkCancelInProgress(e);
          throw e; // weird
        } catch (Exception e) {
          pool.getCancelCriterion().checkCancelInProgress(e);

          // If an exception occurred on the server, don't retry
          Throwable cause = e.getCause();
          if (cause instanceof ClassNotFoundException) {
            logger.warn(
                LocalizedMessage.create(
                    LocalizedStrings
                        .InstantiatorRecoveryListener_INSTANTIATORRECOVERYTASK_ERROR_CLASSNOTFOUNDEXCEPTION,
                    cause.getMessage()));
          } else {
            logger.warn(
                LocalizedMessage.create(
                    LocalizedStrings
                        .InstantiatorRecoveryListener_INSTANTIATORRECOVERYTASK_ERROR_RECOVERING_INSTANTIATORS),
                e);
          }
        } finally {
          pool.releaseThreadLocalConnection();
        }
      }
    }
コード例 #3
0
  /** By default the partition can use up to 100% of the allocated off-heap memory. */
  private int computeOffHeapLocalMaxMemory() {

    long availableOffHeapMemoryInMB = 0;
    if (testAvailableOffHeapMemory != null) {
      availableOffHeapMemoryInMB =
          OffHeapStorage.parseOffHeapMemorySize(testAvailableOffHeapMemory) / (1024 * 1024);
    } else if (InternalDistributedSystem.getAnyInstance() == null) {
      this.localMaxMemoryExists = false;
      return OFF_HEAP_LOCAL_MAX_MEMORY_PLACEHOLDER; // fix 52033: return non-negative, non-zero
      // temporary placeholder for
      // offHeapLocalMaxMemory
    } else {
      String offHeapSizeConfigValue =
          InternalDistributedSystem.getAnyInstance().getOriginalConfig().getOffHeapMemorySize();
      availableOffHeapMemoryInMB =
          OffHeapStorage.parseOffHeapMemorySize(offHeapSizeConfigValue) / (1024 * 1024);
    }

    if (availableOffHeapMemoryInMB > Integer.MAX_VALUE) {
      logger.warn(
          LocalizedMessage.create(
              LocalizedStrings
                  .PartitionAttributesImpl_REDUCED_LOCAL_MAX_MEMORY_FOR_PARTITION_ATTRIBUTES_WHEN_SETTING_FROM_AVAILABLE_OFF_HEAP_MEMORY_SIZE));
      return Integer.MAX_VALUE;
    }

    this.localMaxMemoryExists = true;
    return (int) availableOffHeapMemoryInMB;
  }
コード例 #4
0
  public void removeIndexes(Region region) {

    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index Operation is not supported on the Server Region.");
    }

    // removing indexes on paritioned region will reguire sending message and
    // remvoing all the local indexes on the local bucket regions.
    if (region instanceof PartitionedRegion) {
      try {
        // not remotely orignated
        ((PartitionedRegion) region).removeIndexes(false);
      } catch (ForceReattemptException ex) {
        // will have to throw a proper exception relating to remove index.
        logger.info(
            LocalizedMessage.create(
                LocalizedStrings.DefaultQueryService_EXCEPTION_REMOVING_INDEX___0),
            ex);
      }
    }
    IndexManager indexManager = IndexUtils.getIndexManager(region, false);
    if (indexManager == null) return;

    indexManager.removeIndexes();
  }
コード例 #5
0
  public void removeIndex(Index index) {

    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index Operation is not supported on the Server Region.");
    }

    Region region = index.getRegion();
    if (region instanceof PartitionedRegion) {
      try {
        ((PartitionedRegion) region).removeIndex(index, false);
      } catch (ForceReattemptException ex) {
        logger.info(
            LocalizedMessage.create(
                LocalizedStrings.DefaultQueryService_EXCEPTION_REMOVING_INDEX___0),
            ex);
      }
      return;
    }
    // get write lock for indexes in replicated region
    // for PR lock will be taken in PartitionRegion.removeIndex
    ((AbstractIndex) index).acquireIndexWriteLockForRemove();
    try {
      IndexManager indexManager = ((LocalRegion) index.getRegion()).getIndexManager();
      indexManager.removeIndex(index);
    } finally {
      ((AbstractIndex) index).releaseIndexWriteLockForRemove();
    }
  }
コード例 #6
0
 /**
  * deserializes the membership id, if necessary, and returns it. All access to membershipId should
  * be through this method
  */
 public DistributedMember getDistributedMember() {
   if (memberId == null) {
     ByteArrayInputStream bais = new ByteArrayInputStream(identity);
     DataInputStream dis = new VersionedDataInputStream(bais, Version.CURRENT);
     try {
       memberId = (DistributedMember) DataSerializer.readObject(dis);
     } catch (Exception e) {
       logger.error(
           LocalizedMessage.create(
               LocalizedStrings.ClientProxyMembershipID_UNABLE_TO_DESERIALIZE_MEMBERSHIP_ID),
           e);
     }
   }
   return memberId;
 }
コード例 #7
0
  @Override
  public void cmdExecute(Message msg, ServerConnection servConn, long start) throws IOException {
    // requiresResponse = true; NOT NEEDED... ALWAYS SEND ERROR RESPONSE

    logger.fatal(
        LocalizedMessage.create(
            LocalizedStrings.Default_0_UNKNOWN_MESSAGE_TYPE_1_WITH_TX_2_FROM_3,
            new Object[] {
              servConn.getName(),
              MessageType.getString(msg.getMessageType()),
              Integer.valueOf(msg.getTransactionId()),
              servConn.getSocketString()
            }));
    writeErrorResponse(msg, MessageType.UNKNOWN_MESSAGE_TYPE_ERROR, servConn);
    // responded = true; NOT NEEDED... ALWAYS SEND ERROR RESPONSE
  }
コード例 #8
0
 /* (non-Javadoc)
  * @see com.gemstone.gemfire.distributed.internal.DistributionMessage#process(com.gemstone.gemfire.distributed.internal.DistributionManager)
  */
 @Override
 protected void process(DistributionManager dm) {
   Throwable thr = null;
   JmxManagerProfile p = null;
   try {
     final GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
     if (cache != null && !cache.isClosed()) {
       final JmxManagerAdvisor adv = cache.getJmxManagerAdvisor();
       p = this.profile;
       if (p != null) {
         adv.putProfile(p);
       }
     } else {
       if (logger.isDebugEnabled()) {
         logger.debug("No cache {}", this);
       }
     }
   } catch (CancelException e) {
     if (logger.isDebugEnabled()) {
       logger.debug("Cache closed, ", this);
     }
   } catch (VirtualMachineError err) {
     SystemFailure.initiateFailure(err);
     // If this ever returns, rethrow the error.  We're poisoned
     // now, so don't let this thread continue.
     throw err;
   } catch (Throwable t) {
     // Whenever you catch Error or Throwable, you must also
     // catch VirtualMachineError (see above).  However, there is
     // _still_ a possibility that you are dealing with a cascading
     // error condition, so you also need to check to see if the JVM
     // is still usable:
     SystemFailure.checkFailure();
     thr = t;
   } finally {
     if (thr != null) {
       dm.getCancelCriterion().checkCancelInProgress(null);
       logger.info(
           LocalizedMessage.create(
               LocalizedStrings.ResourceAdvisor_MEMBER_CAUGHT_EXCEPTION_PROCESSING_PROFILE,
               new Object[] {p, toString()},
               thr));
     }
   }
 }
コード例 #9
0
 /** schedules the given expiration task */
 public ExpiryTask addExpiryTask(ExpiryTask task) {
   try {
     if (logger.isTraceEnabled()) {
       logger.trace(
           LocalizedMessage.create(
               LocalizedStrings.ExpirationScheduler_SCHEDULING__0__TO_FIRE_IN__1__MS,
               new Object[] {task, Long.valueOf(task.getExpiryMillis())}));
     }
     // To fix bug 52267 do not create a Date here; instead calculate the relative duration.
     timer.schedule(task, task.getExpiryMillis());
   } catch (EntryNotFoundException e) {
     // ignore - there are unsynchronized paths that allow an entry to
     // be destroyed out from under us.
     return null;
   } catch (IllegalStateException e) {
     // task must have been cancelled by another thread so don't schedule it
     return null;
   }
   return task;
 }
コード例 #10
0
  /**
   * Adds a new lru node for the entry between the current tail and head of the list.
   *
   * @param aNode Description of the Parameter
   */
  public final void appendEntry(final LRUClockNode aNode) {
    synchronized (this.lock) {
      if (aNode.nextLRUNode() != null || aNode.prevLRUNode() != null) {
        return;
      }

      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
        logger.trace(
            LogMarker.LRU_CLOCK,
            LocalizedMessage.create(
                LocalizedStrings.NewLRUClockHand_ADDING_ANODE_TO_LRU_LIST, aNode));
      }
      aNode.setNextLRUNode(this.tail);
      this.tail.prevLRUNode().setNextLRUNode(aNode);
      aNode.setPrevLRUNode(this.tail.prevLRUNode());
      this.tail.setPrevLRUNode(aNode);

      this.size++;
    }
  }
コード例 #11
0
 /** remove an entry from the pipe... (marks it evicted to be skipped later) */
 public boolean unlinkEntry(LRUClockNode entry) {
   if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
     logger.trace(
         LogMarker.LRU_CLOCK,
         LocalizedMessage.create(LocalizedStrings.NewLRUClockHand_UNLINKENTRY_CALLED, entry));
   }
   entry.setEvicted();
   stats().incDestroys();
   synchronized (lock) {
     LRUClockNode next = entry.nextLRUNode();
     LRUClockNode prev = entry.prevLRUNode();
     if (next == null || prev == null) {
       // not in the list anymore.
       return false;
     }
     next.setPrevLRUNode(prev);
     prev.setNextLRUNode(next);
     entry.setNextLRUNode(null);
     entry.setPrevLRUNode(null);
   }
   return true;
 }
コード例 #12
0
 @Override
 public void memberDeparted(final InternalDistributedMember id, final boolean crashed) {
   if (id != null) {
     if (removeMember(id, true)) {
       this.prce =
           new ForceReattemptException(
               LocalizedStrings
                   .PartitionMessage_PARTITIONRESPONSE_GOT_MEMBERDEPARTED_EVENT_FOR_0_CRASHED_1
                   .toLocalizedString(new Object[] {id, Boolean.valueOf(crashed)}));
     }
     checkIfDone();
   } else {
     Exception e =
         new Exception(
             LocalizedStrings.PartitionMessage_MEMBERDEPARTED_GOT_NULL_MEMBERID
                 .toLocalizedString());
     logger.info(
         LocalizedMessage.create(
             LocalizedStrings.PartitionMessage_MEMBERDEPARTED_GOT_NULL_MEMBERID_CRASHED_0,
             Boolean.valueOf(crashed)),
         e);
   }
 }
コード例 #13
0
 @Override
 protected void process(DistributionManager dm) {
   logger.trace(LogMarker.STATE_FLUSH_OP, "Processing {}", this);
   if (dm.getDistributionManagerId().equals(relayRecipient)) {
     // no need to send a relay request to this process - just send the
     // ack back to the sender
     StateStabilizedMessage ga = new StateStabilizedMessage();
     ga.sendingMember = relayRecipient;
     ga.setRecipient(this.getSender());
     ga.setProcessorId(processorId);
     dm.putOutgoing(ga);
   } else {
     // 1) wait for all messages based on the membership version (or older)
     //    at which the sender "joined" this region to be put on the pipe
     // 2) record the state of all communication channels from this process
     //    to the relay point
     // 3) send a stabilization message to the relay point that holds the
     //    communication channel state information
     StateStabilizationMessage gr = new StateStabilizationMessage();
     gr.setRecipient((InternalDistributedMember) relayRecipient);
     gr.requestingMember = this.getSender();
     gr.processorId = processorId;
     try {
       Set<DistributedRegion> regions;
       if (this.allRegions) {
         regions = getAllRegions(dm);
       } else {
         regions = Collections.singleton(this.getRegion(dm));
       }
       for (DistributedRegion r : regions) {
         if (r == null) {
           if (logger.isTraceEnabled(LogMarker.DM)) {
             logger.trace(LogMarker.DM, "Region not found - skipping channel state assessment");
           }
         }
         if (r != null) {
           if (this.allRegions && r.doesNotDistribute()) {
             // no need to flush a region that does no distribution
             continue;
           }
           boolean initialized = r.isInitialized();
           if (initialized) {
             if (this.flushNewOps) {
               r.getDistributionAdvisor()
                   .forceNewMembershipVersion(); // force a new "view" so we can track current
               // ops
             }
             try {
               r.getDistributionAdvisor().waitForCurrentOperations();
             } catch (RegionDestroyedException e) {
               // continue with the next region
             }
           }
           boolean useMulticast =
               r.getMulticastEnabled() && r.getSystem().getConfig().getMcastPort() != 0;
           if (initialized) {
             Map channelStates =
                 dm.getMembershipManager().getMessageState(relayRecipient, useMulticast);
             if (gr.channelState != null) {
               gr.channelState.putAll(channelStates);
             } else {
               gr.channelState = channelStates;
             }
             if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)
                 && ((gr.channelState != null) && (gr.channelState.size() > 0))) {
               logger.trace(
                   LogMarker.STATE_FLUSH_OP,
                   "channel states: {}",
                   gr.channelStateDescription(gr.channelState));
             }
           }
         }
       }
     } catch (CancelException cce) {
       // cache is closed - no distribution advisor available for the region so nothing to do but
       // send the stabilization message
     } catch (Exception e) {
       logger.fatal(
           LocalizedMessage.create(
               LocalizedStrings
                   .StateFlushOperation_0__EXCEPTION_CAUGHT_WHILE_DETERMINING_CHANNEL_STATE,
               this),
           e);
     } catch (ThreadDeath td) {
       throw td;
     } catch (VirtualMachineError err) {
       SystemFailure.initiateFailure(err);
       // If this ever returns, rethrow the error.  We're poisoned
       // now, so don't let this thread continue.
       throw err;
     } catch (Throwable t) {
       // Whenever you catch Error or Throwable, you must also
       // catch VirtualMachineError (see above).  However, there is
       // _still_ a possibility that you are dealing with a cascading
       // error condition, so you also need to check to see if the JVM
       // is still usable:
       SystemFailure.checkFailure();
       logger.fatal(
           LocalizedMessage.create(
               LocalizedStrings
                   .StateFlushOperation_0__THROWABLE_CAUGHT_WHILE_DETERMINING_CHANNEL_STATE,
               this),
           t);
     } finally {
       if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
         logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", gr);
       }
       dm.putOutgoing(gr);
     }
   }
 }
コード例 #14
0
  /**
   * flush state to the given target
   *
   * @param recipients The members who may be making state changes to the region. This is typically
   *     taken from a CacheDistributionAdvisor membership set
   * @param target The member who should have all state flushed to it
   * @param processorType The execution processor type for the marker message that is sent to all
   *     members using the given region
   * @param flushNewOps normally only ops that were started before region profile exchange are
   *     flushed. Setting this to true causes the flush to wait for any started after the profile
   *     exchange as well.
   * @throws InterruptedException If the operation is interrupted, usually for shutdown, an
   *     InterruptedException will be thrown
   * @return true if the state was flushed, false if not
   */
  public boolean flush(
      Set recipients, DistributedMember target, int processorType, boolean flushNewOps)
      throws InterruptedException {

    Set recips = recipients; // do not use recipients parameter past this point
    if (Thread.interrupted()) {
      throw new InterruptedException();
    }

    InternalDistributedMember myId = this.dm.getDistributionManagerId();

    if (!recips.contains(target) && !myId.equals(target)) {
      recips = new HashSet(recipients);
      recips.add(target);
    }
    // partial fix for bug 38773 - ensures that this cache will get both
    // a cache op and an adjunct message when creating a bucket region
    //    if (recips.size() < 2 && !myId.equals(target)) {
    //      return true; // no state to flush to a single holder of the region
    //    }
    StateMarkerMessage smm = new StateMarkerMessage();
    smm.relayRecipient = target;
    smm.processorType = processorType;
    smm.flushNewOps = flushNewOps;
    if (region == null) {
      smm.allRegions = true;
    } else {
      smm.regionPath = region.getFullPath();
    }
    smm.setRecipients(recips);

    StateFlushReplyProcessor gfprocessor = new StateFlushReplyProcessor(dm, recips, target);
    smm.processorId = gfprocessor.getProcessorId();
    if (region != null
        && region.isUsedForPartitionedRegionBucket()
        && region.getDistributionConfig().getAckSevereAlertThreshold() > 0) {
      smm.severeAlertEnabled = true;
      gfprocessor.enableSevereAlertProcessing();
    }
    if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
      logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {} with processor {}", smm, gfprocessor);
    }
    Set failures = this.dm.putOutgoing(smm);
    if (failures != null) {
      if (failures.contains(target)) {
        if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
          logger.trace(
              LogMarker.STATE_FLUSH_OP,
              "failed to send StateMarkerMessage to target {}; returning from flush without waiting for replies",
              target);
        }
        return false;
      }
      gfprocessor.messageNotSentTo(failures);
    }

    try {
      //      try { Thread.sleep(100); } catch (InterruptedException e) {
      // Thread.currentThread().interrupt(); } // DEBUGGING - stall before getting membership to
      // increase odds that target has left
      gfprocessor.waitForReplies();
      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
        logger.trace(LogMarker.STATE_FLUSH_OP, "Finished processing {}", smm);
      }
    } catch (ReplyException re) {
      logger.warn(
          LocalizedMessage.create(
              LocalizedStrings.StateFlushOperation_STATE_FLUSH_TERMINATED_WITH_EXCEPTION),
          re);
      return false;
    }
    return true;
  }
  @SuppressWarnings("unchecked")
  @Override
  public void cmdExecute(Message msg, ServerConnection servConn, long start)
      throws IOException, ClassNotFoundException, InterruptedException {
    String regionFullPath = null;
    CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
    regionFullPath = msg.getPart(0).getString();
    String errMessage = "";
    if (regionFullPath == null) {
      logger.warn(
          LocalizedMessage.create(
              LocalizedStrings.GetClientPartitionAttributes_THE_INPUT_REGION_PATH_IS_NULL));
      errMessage =
          LocalizedStrings.GetClientPartitionAttributes_THE_INPUT_REGION_PATH_IS_NULL
              .toLocalizedString();
      writeErrorResponse(
          msg, MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR, errMessage.toString(), servConn);
      servConn.setAsTrue(RESPONDED);
    } else {
      Region region = crHelper.getRegion(regionFullPath);
      if (region == null) {
        logger.warn(
            LocalizedMessage.create(
                LocalizedStrings
                    .GetClientPartitionAttributes_REGION_NOT_FOUND_FOR_SPECIFIED_REGION_PATH,
                regionFullPath));
        errMessage =
            LocalizedStrings.GetClientPartitionAttributes_REGION_NOT_FOUND.toLocalizedString()
                + regionFullPath;
        writeErrorResponse(
            msg,
            MessageType.GET_CLIENT_PARTITION_ATTRIBUTES_ERROR,
            errMessage.toString(),
            servConn);
        servConn.setAsTrue(RESPONDED);
      } else {
        try {
          Message responseMsg = servConn.getResponseMessage();
          responseMsg.setTransactionId(msg.getTransactionId());
          responseMsg.setMessageType(MessageType.RESPONSE_CLIENT_PARTITION_ATTRIBUTES);

          PartitionedRegion prRgion = (PartitionedRegion) region;

          PartitionResolver partitionResolver = prRgion.getPartitionResolver();
          int numParts = 2; // MINUMUM PARTS
          if (partitionResolver != null) {
            numParts++;
          }
          responseMsg.setNumberOfParts(numParts);
          // PART 1
          responseMsg.addObjPart(prRgion.getTotalNumberOfBuckets());

          // PART 2
          if (partitionResolver != null) {
            responseMsg.addObjPart(partitionResolver.getClass().toString().substring(6));
          }

          // PART 3
          String leaderRegionPath = null;
          PartitionedRegion leaderRegion = null;
          String leaderRegionName = prRgion.getColocatedWith();
          if (leaderRegionName != null) {
            Cache cache = prRgion.getCache();
            while (leaderRegionName != null) {
              leaderRegion = (PartitionedRegion) cache.getRegion(leaderRegionName);
              if (leaderRegion.getColocatedWith() == null) {
                leaderRegionPath = leaderRegion.getFullPath();
                break;
              } else {
                leaderRegionName = leaderRegion.getColocatedWith();
              }
            }
          }
          responseMsg.addObjPart(leaderRegionPath);
          responseMsg.send();
          msg.flush();
        } catch (Exception e) {
          writeException(msg, e, false, servConn);
        } finally {
          servConn.setAsTrue(Command.RESPONDED);
        }
      }
    }
  }
コード例 #16
0
  /**
   * return the Entry that is considered least recently used. The entry will no longer be in the
   * pipe (unless it is the last empty marker).
   */
  public LRUClockNode getLRUEntry() {
    long numEvals = 0;

    for (; ; ) {
      LRUClockNode aNode = null;
      aNode = getHeadEntry();

      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
        logger.trace(LogMarker.LRU_CLOCK, "lru considering {}", aNode);
      }

      if (aNode == null) { // hit the end of the list
        this.stats.incEvaluations(numEvals);
        return aNode;
      } // hit the end of the list

      numEvals++;

      // If this Entry is part of a transaction, skip it since
      // eviction should not cause commit conflicts
      synchronized (aNode) {
        if (aNode instanceof AbstractRegionEntry) {
          if (((AbstractRegionEntry) aNode).isInUseByTransaction()) {
            if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
              logger.trace(
                  LogMarker.LRU_CLOCK,
                  LocalizedMessage.create(
                      LocalizedStrings
                          .NewLRUClockHand_REMOVING_TRANSACTIONAL_ENTRY_FROM_CONSIDERATION));
            }
            continue;
          }
        }
        if (aNode.testEvicted()) {
          if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
            logger.trace(
                LogMarker.LRU_CLOCK,
                LocalizedMessage.create(LocalizedStrings.NewLRUClockHand_DISCARDING_EVICTED_ENTRY));
          }
          continue;
        }

        // At this point we have any acceptable entry.  Now
        // use various criteria to determine if it's good enough
        // to return, or if we need to add it back to the list.
        if (maxEntries > 0 && numEvals > maxEntries) {
          if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
            logger.trace(
                LogMarker.LRU_CLOCK,
                LocalizedMessage.create(
                    LocalizedStrings.NewLRUClockHand_GREEDILY_PICKING_AN_AVAILABLE_ENTRY));
          }
          this.stats.incGreedyReturns(1);
          // fall through, return this node
        } else if (aNode.testRecentlyUsed()) {
          // Throw it back, it's in the working set
          aNode.unsetRecentlyUsed();
          // aNode.setInList();
          if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
            logger.trace(
                LogMarker.LRU_CLOCK,
                LocalizedMessage.create(
                    LocalizedStrings.NewLRUClockHand_SKIPPING_RECENTLY_USED_ENTRY, aNode));
          }
          appendEntry(aNode);
          continue; // keep looking
        } else {
          if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
            logger.trace(
                LogMarker.LRU_CLOCK,
                LocalizedMessage.create(
                    LocalizedStrings.NewLRUClockHand_RETURNING_UNUSED_ENTRY, aNode));
          }
          // fall through, return this node
        }

        // Return the current node.
        this.stats.incEvaluations(numEvals);
        return aNode;
      } // synchronized
    } // for
  }