/** * Callback for backup update response. * * @param nodeId Backup node ID. * @param updateRes Update response. */ public void onResult(UUID nodeId, GridDhtAtomicUpdateResponse updateRes) { if (log.isDebugEnabled()) log.debug( "Received DHT atomic update future result [nodeId=" + nodeId + ", updateRes=" + updateRes + ']'); if (updateRes.error() != null) this.updateRes.addFailedKeys(updateRes.failedKeys(), updateRes.error()); if (!F.isEmpty(updateRes.nearEvicted())) { for (KeyCacheObject key : updateRes.nearEvicted()) { GridDhtCacheEntry entry = nearReadersEntries.get(key); try { entry.removeReader(nodeId, updateRes.messageId()); } catch (GridCacheEntryRemovedException e) { if (log.isDebugEnabled()) log.debug("Entry with evicted reader was removed [entry=" + entry + ", err=" + e + ']'); } } } registerResponse(nodeId); }
/** {@inheritDoc} */ @SuppressWarnings({"CatchGenericClass"}) @Override public final void run() { try { body(); } catch (InterruptedException e) { if (log.isDebugEnabled()) log.debug("Caught interrupted exception: " + e); Thread.currentThread().interrupt(); } // Catch everything to make sure that it gets logged properly and // not to kill any threads from the underlying thread pool. catch (Throwable e) { U.error(log, "Runtime error caught during grid runnable execution: " + this, e); if (e instanceof Error) throw e; } finally { cleanup(); if (log.isDebugEnabled()) { if (isInterrupted()) log.debug( "Grid runnable finished due to interruption without cancellation: " + getName()); else log.debug("Grid runnable finished normally: " + getName()); } } }
/** * Waits for renting partitions. * * @return {@code True} if mapping was changed. * @throws IgniteCheckedException If failed. */ private boolean waitForRent() throws IgniteCheckedException { boolean changed = false; // Synchronously wait for all renting partitions to complete. for (Iterator<GridDhtLocalPartition> it = locParts.values().iterator(); it.hasNext(); ) { GridDhtLocalPartition p = it.next(); GridDhtPartitionState state = p.state(); if (state == RENTING || state == EVICTED) { if (log.isDebugEnabled()) log.debug("Waiting for renting partition: " + p); // Wait for partition to empty out. p.rent(true).get(); if (log.isDebugEnabled()) log.debug("Finished waiting for renting partition: " + p); // Remove evicted partition. it.remove(); changed = true; } } return changed; }
/** * Creates new HTTP requests handler. * * @param hnd Handler. * @param authChecker Authentication checking closure. * @param log Logger. */ GridJettyRestHandler( GridRestProtocolHandler hnd, IgniteClosure<String, Boolean> authChecker, IgniteLogger log) { assert hnd != null; assert log != null; this.hnd = hnd; this.log = log; this.authChecker = authChecker; this.jsonMapper = new GridJettyObjectMapper(); // Init default page and favicon. try { initDefaultPage(); if (log.isDebugEnabled()) log.debug("Initialized default page."); } catch (IOException e) { U.warn(log, "Failed to initialize default page: " + e.getMessage()); } try { initFavicon(); if (log.isDebugEnabled()) log.debug( favicon != null ? "Initialized favicon, size: " + favicon.length : "Favicon is null."); } catch (IOException e) { U.warn(log, "Failed to initialize favicon: " + e.getMessage()); } }
/** * @param cacheCtx Cache context. * @throws IgniteCheckedException If failed. */ private void initTopology(GridCacheContext cacheCtx) throws IgniteCheckedException { if (stopping(cacheCtx.cacheId())) return; if (canCalculateAffinity(cacheCtx)) { if (log.isDebugEnabled()) log.debug( "Will recalculate affinity [locNodeId=" + cctx.localNodeId() + ", exchId=" + exchId + ']'); cacheCtx.affinity().calculateAffinity(exchId.topologyVersion(), discoEvt); } else { if (log.isDebugEnabled()) log.debug( "Will request affinity from remote node [locNodeId=" + cctx.localNodeId() + ", exchId=" + exchId + ']'); // Fetch affinity assignment from remote node. GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture( cacheCtx, exchId.topologyVersion(), CU.affinityNodes(cacheCtx, exchId.topologyVersion())); fetchFut.init(); List<List<ClusterNode>> affAssignment = fetchFut.get(); if (log.isDebugEnabled()) log.debug( "Fetched affinity from remote node, initializing affinity assignment [locNodeId=" + cctx.localNodeId() + ", topVer=" + exchId.topologyVersion() + ']'); if (affAssignment == null) { affAssignment = new ArrayList<>(cacheCtx.affinity().partitions()); List<ClusterNode> empty = Collections.emptyList(); for (int i = 0; i < cacheCtx.affinity().partitions(); i++) affAssignment.add(empty); } cacheCtx.affinity().initializeAffinity(exchId.topologyVersion(), affAssignment); } }
/** * @param nodeId Sender. * @param res Result. */ public void onResult(UUID nodeId, GridDhtTxFinishResponse res) { if (!isDone()) { boolean found = false; for (IgniteInternalFuture<IgniteInternalTx> fut : futures()) { if (isMini(fut)) { MiniFuture f = (MiniFuture) fut; if (f.futureId().equals(res.miniId())) { found = true; assert f.node().id().equals(nodeId); f.onResult(res); } } } if (!found) { if (msgLog.isDebugEnabled()) { msgLog.debug( "DHT finish fut, failed to find mini future [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nodeId + ", res=" + res + ", fut=" + this + ']'); } } } else { if (msgLog.isDebugEnabled()) { msgLog.debug( "DHT finish fut, failed to find mini future [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nodeId + ", res=" + res + ", fut=" + this + ']'); } } }
/** * @param updateSeq Update sequence. * @return {@code True} if entry has been transitioned to state EVICTED. */ boolean tryEvict(boolean updateSeq) { if (state.getReference() != RENTING || state.getStamp() != 0 || groupReserved()) return false; // Attempt to evict partition entries from cache. clearAll(); if (map.isEmpty() && state.compareAndSet(RENTING, EVICTED, 0, 0)) { if (log.isDebugEnabled()) log.debug("Evicted partition: " + this); if (!GridQueryProcessor.isEnabled(cctx.config())) clearSwap(); if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id); cctx.dataStructures().onPartitionEvicted(id); rent.onDone(); ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq); clearDeferredDeletes(); return true; } return false; }
/** Sends requests to remote nodes. */ public void map() { if (!mappings.isEmpty()) { for (GridDhtAtomicUpdateRequest req : mappings.values()) { try { if (log.isDebugEnabled()) log.debug( "Sending DHT atomic update request [nodeId=" + req.nodeId() + ", req=" + req + ']'); cctx.io().send(req.nodeId(), req, cctx.ioPolicy()); } catch (ClusterTopologyCheckedException ignored) { U.warn( log, "Failed to send update request to backup node because it left grid: " + req.nodeId()); registerResponse(req.nodeId()); } catch (IgniteCheckedException e) { U.error( log, "Failed to send update request to backup node (did node leave the grid?): " + req.nodeId(), e); registerResponse(req.nodeId()); } } } else onDone(); // Send response right away if no ACKs from backup is required. // Backups will send ACKs anyway, future will be completed after all backups have replied. if (updateReq.writeSynchronizationMode() != FULL_SYNC) completionCb.apply(updateReq, updateRes); }
/** * @param updateSeq Update sequence. * @return Future for evict attempt. */ IgniteInternalFuture<Boolean> tryEvictAsync(boolean updateSeq) { if (map.isEmpty() && !GridQueryProcessor.isEnabled(cctx.config()) && state.compareAndSet(RENTING, EVICTED, 0, 0)) { if (log.isDebugEnabled()) log.debug("Evicted partition: " + this); clearSwap(); if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id); cctx.dataStructures().onPartitionEvicted(id); rent.onDone(); ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq); clearDeferredDeletes(); return new GridFinishedFuture<>(true); } return cctx.closures() .callLocalSafe( new GPC<Boolean>() { @Override public Boolean call() { return tryEvict(true); } }, /*system pool*/ true); }
/** * @param nodes Nodes. * @param id ID. * @throws IgniteCheckedException If failed. */ private void sendAllPartitions( Collection<? extends ClusterNode> nodes, GridDhtPartitionExchangeId id) throws IgniteCheckedException { GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(id, lastVer.get(), id.topologyVersion()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (!cacheCtx.isLocal()) { AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion(); boolean ready = startTopVer == null || startTopVer.compareTo(id.topologyVersion()) <= 0; if (ready) m.addFullPartitionsMap(cacheCtx.cacheId(), cacheCtx.topology().partitionMap(true)); } } // It is important that client topologies be added after contexts. for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) m.addFullPartitionsMap(top.cacheId(), top.partitionMap(true)); if (log.isDebugEnabled()) log.debug( "Sending full partition map [nodeIds=" + F.viewReadOnly(nodes, F.node2id()) + ", exchId=" + exchId + ", msg=" + m + ']'); cctx.io().safeSend(nodes, m, SYSTEM_POOL, null); }
/** {@inheritDoc} */ @Override public boolean onNodeLeft(UUID nodeId) { if (log.isDebugEnabled()) log.debug("Processing node leave event [fut=" + this + ", nodeId=" + nodeId + ']'); return registerResponse(nodeId); }
private void sendPartitions() { ClusterNode oldestNode = this.oldestNode.get(); try { sendLocalPartitions(oldestNode, exchId); } catch (ClusterTopologyCheckedException ignore) { if (log.isDebugEnabled()) log.debug( "Oldest node left during partition exchange [nodeId=" + oldestNode.id() + ", exchId=" + exchId + ']'); } catch (IgniteCheckedException e) { scheduleRecheck(); U.error( log, "Failed to send local partitions to oldest node (will retry after timeout) [oldestNodeId=" + oldestNode.id() + ", exchId=" + exchId + ']', e); } }
/** * @param cctx Cache context. * @param busyLock Busy lock. * @param exchId Exchange ID. * @param reqs Cache change requests. */ public GridDhtPartitionsExchangeFuture( GridCacheSharedContext cctx, ReadWriteLock busyLock, GridDhtPartitionExchangeId exchId, Collection<DynamicCacheChangeRequest> reqs) { assert busyLock != null; assert exchId != null; dummy = false; forcePreload = false; reassign = false; this.cctx = cctx; this.busyLock = busyLock; this.exchId = exchId; this.reqs = reqs; log = cctx.logger(getClass()); initFut = new GridFutureAdapter<>(); if (log.isDebugEnabled()) log.debug( "Creating exchange future [localNode=" + cctx.localNodeId() + ", fut=" + this + ']'); }
/** @return {@code true} if entered to busy state. */ private boolean enterBusy() { if (busyLock.readLock().tryLock()) return true; if (log.isDebugEnabled()) log.debug("Failed to enter busy state (exchanger is stopping): " + this); return false; }
/** * @param p Partition number. * @param topVer Topology version. * @param create Create flag. * @param updateSeq Update sequence. * @return Local partition. */ private GridDhtLocalPartition localPartition( int p, AffinityTopologyVersion topVer, boolean create, boolean updateSeq) { while (true) { boolean belongs = cctx.affinity().localNode(p, topVer); GridDhtLocalPartition loc = locParts.get(p); if (loc != null && loc.state() == EVICTED) { locParts.remove(p, loc); if (!create) return null; if (!belongs) throw new GridDhtInvalidPartitionException( p, "Adding entry to evicted partition [part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.topVer + ']'); continue; } if (loc == null && create) { if (!belongs) throw new GridDhtInvalidPartitionException( p, "Creating partition which does not belong [part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.topVer + ']'); lock.writeLock().lock(); try { GridDhtLocalPartition old = locParts.putIfAbsent(p, loc = new GridDhtLocalPartition(cctx, p)); if (old != null) loc = old; else { if (updateSeq) this.updateSeq.incrementAndGet(); if (log.isDebugEnabled()) log.debug("Created local partition: " + loc); } } finally { lock.writeLock().unlock(); } } return loc; } }
/** {@inheritDoc} */ @Override public final void onKernalStop(boolean cancel) { if (!starting.get()) // Ignoring attempt to stop manager that has never been started. return; onKernalStop0(cancel); if (log != null && log.isDebugEnabled()) log.debug(kernalStopInfo()); }
/** {@inheritDoc} */ @Override public final void stop(boolean cancel) { if (!starting.get() || !stop.compareAndSet(false, true)) // Ignoring attempt to stop manager that has never been started. return; stop0(cancel); if (log != null && log.isDebugEnabled()) log.debug(stopInfo()); }
/** * @param log Logger. * @param time Time. * @param msg Message. */ private static void log0(@Nullable IgniteLogger log, long time, String msg) { if (log != null) { if (log.isDebugEnabled()) log.debug(msg); else log.warning(msg); } else X.println( String.format( "[%s][%s]%s", DEBUG_DATE_FMT.get().format(time), Thread.currentThread().getName(), msg)); }
/** Perform cleanup of the trash directory. */ private void delete() { IgfsFileInfo info = null; try { info = meta.info(TRASH_ID); } catch (ClusterTopologyServerNotFoundException e) { LT.warn(log, e, "Server nodes not found."); } catch (IgniteCheckedException e) { U.error(log, "Cannot obtain trash directory info.", e); } if (info != null) { for (Map.Entry<String, IgfsListingEntry> entry : info.listing().entrySet()) { IgniteUuid fileId = entry.getValue().fileId(); if (log.isDebugEnabled()) log.debug( "Deleting IGFS trash entry [name=" + entry.getKey() + ", fileId=" + fileId + ']'); try { if (!cancelled) { if (delete(entry.getKey(), fileId)) { if (log.isDebugEnabled()) log.debug( "Sending delete confirmation message [name=" + entry.getKey() + ", fileId=" + fileId + ']'); sendDeleteMessage(new IgfsDeleteMessage(fileId)); } } else break; } catch (IgniteInterruptedCheckedException ignored) { // Ignore this exception while stopping. } catch (IgniteCheckedException e) { U.error(log, "Failed to delete entry from the trash directory: " + entry.getKey(), e); sendDeleteMessage(new IgfsDeleteMessage(fileId, e)); } } } }
/** @param cand Remote candidate. */ private void addRemote(GridCacheMvccCandidate cand) { assert !cand.local(); if (log.isDebugEnabled()) log.debug("Adding remote candidate [mvcc=" + this + ", cand=" + cand + "]"); cctx.versions().onReceived(cand.nodeId(), cand.version()); add0(cand); }
/** * @param rmtReducer Optional reducer. * @param rmtTransform Optional transformer. * @param args Arguments. * @return Future. */ @SuppressWarnings("IfMayBeConditional") private <R> CacheQueryFuture<R> execute( @Nullable IgniteReducer<T, R> rmtReducer, @Nullable IgniteClosure<T, R> rmtTransform, @Nullable Object... args) { Collection<ClusterNode> nodes = nodes(); cctx.checkSecurity(SecurityPermission.CACHE_READ); if (nodes.isEmpty()) return new GridCacheQueryErrorFuture<>( cctx.kernalContext(), new ClusterGroupEmptyCheckedException()); if (log.isDebugEnabled()) log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']'); if (cctx.deploymentEnabled()) { try { cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform); cctx.deploy().registerClasses(args); } catch (IgniteCheckedException e) { return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e); } } if (subjId == null) subjId = cctx.localNodeId(); taskHash = cctx.kernalContext().job().currentTaskNameHash(); final GridCacheQueryBean bean = new GridCacheQueryBean( this, (IgniteReducer<Object, Object>) rmtReducer, (IgniteClosure<Object, Object>) rmtTransform, args); final GridCacheQueryManager qryMgr = cctx.queries(); boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId()); if (type == SQL_FIELDS || type == SPI) return (CacheQueryFuture<R>) (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes)); else if (type == SCAN && part != null && nodes.size() > 1) return new CacheQueryFallbackFuture<>(nodes, bean, qryMgr); else return (CacheQueryFuture<R>) (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes)); }
/** * @param desc Sender process descriptor. * @param msg Communication message. */ protected void notifyListener(HadoopProcessDescriptor desc, HadoopMessage msg) { HadoopMessageListener lsnr = this.lsnr; if (lsnr != null) // Notify listener of a new message. lsnr.onMessageReceived(desc, msg); else if (log.isDebugEnabled()) log.debug( "Received communication message without any registered listeners (will ignore) " + "[senderProcDesc=" + desc + ", msg=" + msg + ']'); }
/** {@inheritDoc} */ @Override public final void start(GridCacheSharedContext<K, V> cctx) throws IgniteCheckedException { if (!starting.compareAndSet(false, true)) assert false : "Method start is called more than once for manager: " + this; assert cctx != null; this.cctx = cctx; log = cctx.logger(getClass()); start0(); if (log.isDebugEnabled()) log.debug(startInfo()); }
/** * Sets remote candidate to done. * * @param ver Version. * @param pending Pending versions. * @param committed Committed versions. * @param rolledback Rolledback versions. * @return Lock owner. */ @Nullable public CacheLockCandidates doneRemote( GridCacheVersion ver, Collection<GridCacheVersion> pending, Collection<GridCacheVersion> committed, Collection<GridCacheVersion> rolledback) { assert ver != null; if (log.isDebugEnabled()) log.debug("Setting remote candidate to done [mvcc=" + this + ", ver=" + ver + "]"); // Check remote candidate. GridCacheMvccCandidate cand = candidate(rmts, ver); if (cand != null) { assert rmts != null; assert !rmts.isEmpty(); assert !cand.local() : "Remote candidate is marked as local: " + cand; assert !cand.nearLocal() : "Remote candidate is marked as near local: " + cand; cand.setOwner(); cand.setUsed(); List<GridCacheMvccCandidate> mvAfter = null; for (ListIterator<GridCacheMvccCandidate> it = rmts.listIterator(); it.hasNext(); ) { GridCacheMvccCandidate c = it.next(); assert !c.nearLocal() : "Remote candidate marked as near local: " + c; if (c == cand) { if (mvAfter != null) for (GridCacheMvccCandidate mv : mvAfter) it.add(mv); break; } else if (!committed.contains(c.version()) && !rolledback.contains(c.version()) && pending.contains(c.version())) { it.remove(); if (mvAfter == null) mvAfter = new LinkedList<>(); mvAfter.add(c); } } } return allOwners(); }
/** * @param nodeId Node ID. * @param retryCnt Number of retries. */ private void sendAllPartitions(final UUID nodeId, final int retryCnt) { ClusterNode n = cctx.node(nodeId); try { if (n != null) sendAllPartitions(F.asList(n), exchId); } catch (IgniteCheckedException e) { if (e instanceof ClusterTopologyCheckedException || !cctx.discovery().alive(n)) { log.debug( "Failed to send full partition map to node, node left grid " + "[rmtNode=" + nodeId + ", exchangeId=" + exchId + ']'); return; } if (retryCnt > 0) { long timeout = cctx.gridConfig().getNetworkSendRetryDelay(); LT.error( log, e, "Failed to send full partition map to node (will retry after timeout) " + "[node=" + nodeId + ", exchangeId=" + exchId + ", timeout=" + timeout + ']'); cctx.time() .addTimeoutObject( new GridTimeoutObjectAdapter(timeout) { @Override public void onTimeout() { sendAllPartitions(nodeId, retryCnt - 1); } }); } else U.error( log, "Failed to send full partition map [node=" + n + ", exchangeId=" + exchId + ']', e); } }
/** {@inheritDoc} */ @Override public void handle(String target, Request req, HttpServletRequest srvReq, HttpServletResponse res) throws IOException, ServletException { if (log.isDebugEnabled()) log.debug("Handling request [target=" + target + ", req=" + req + ", srvReq=" + srvReq + ']'); if (target.startsWith("/ignite")) { processRequest(target, srvReq, res); req.setHandled(true); } else if (target.startsWith("/favicon.ico")) { if (favicon == null) { res.setStatus(HttpServletResponse.SC_NOT_FOUND); req.setHandled(true); return; } res.setStatus(HttpServletResponse.SC_OK); res.setContentType("image/x-icon"); res.getOutputStream().write(favicon); res.getOutputStream().flush(); req.setHandled(true); } else { if (dfltPage == null) { res.setStatus(HttpServletResponse.SC_NOT_FOUND); req.setHandled(true); return; } res.setStatus(HttpServletResponse.SC_OK); res.setContentType("text/html"); res.getWriter().write(dfltPage); res.getWriter().flush(); req.setHandled(true); } }
/** {@inheritDoc} */ @Override public boolean onDone(AffinityTopologyVersion res, Throwable err) { Map<Integer, Boolean> m = null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.config().getTopologyValidator() != null && !CU.isSystemCache(cacheCtx.name())) { if (m == null) m = new HashMap<>(); m.put( cacheCtx.cacheId(), cacheCtx.config().getTopologyValidator().validate(discoEvt.topologyNodes())); } } cacheValidRes = m != null ? m : Collections.<Integer, Boolean>emptyMap(); cctx.cache().onExchangeDone(exchId.topologyVersion(), reqs, err); cctx.exchange().onExchangeDone(this, err); if (super.onDone(res, err) && !dummy && !forcePreload) { if (log.isDebugEnabled()) log.debug( "Completed partition exchange [localNode=" + cctx.localNodeId() + ", exchange= " + this + ']'); initFut.onDone(err == null); GridTimeoutObject timeoutObj = this.timeoutObj; // Deschedule timeout object. if (timeoutObj != null) cctx.kernalContext().timeout().removeTimeoutObject(timeoutObj); if (exchId.isLeft()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) cacheCtx.config().getAffinity().removeNode(exchId.nodeId()); } return true; } return dummy; }
/** {@inheritDoc} */ @Override protected void body() throws InterruptedException { if (log.isDebugEnabled()) log.debug("Delete worker started."); while (!cancelled) { lock.lock(); try { if (!cancelled && !force) cond.await(FREQUENCY, TimeUnit.MILLISECONDS); force = false; // Reset force flag. } finally { lock.unlock(); } if (!cancelled) delete(); } }
/** * Returns existing or just created client to node. * * @param desc Node to which client should be open. * @return The existing or just created client. * @throws IgniteCheckedException Thrown if any exception occurs. */ private HadoopCommunicationClient reserveClient(HadoopProcessDescriptor desc) throws IgniteCheckedException { assert desc != null; UUID procId = desc.processId(); while (true) { HadoopCommunicationClient client = clients.get(procId); if (client == null) { if (log.isDebugEnabled()) log.debug( "Did not find client for remote process [locProcDesc=" + locProcDesc + ", desc=" + desc + ']'); // Do not allow concurrent connects. Object sync = locks.lock(procId); try { client = clients.get(procId); if (client == null) { HadoopCommunicationClient old = clients.put(procId, client = createNioClient(desc)); assert old == null; } } finally { locks.unlock(procId, sync); } assert client != null; } if (client.reserve()) return client; else // Client has just been closed by idle worker. Help it and try again. clients.remove(procId, client); } }
/** * @param updateSeq Update sequence. * @return Future to signal that this node is no longer an owner or backup. */ IgniteInternalFuture<?> rent(boolean updateSeq) { while (true) { int reservations = state.getStamp(); GridDhtPartitionState s = state.getReference(); if (s == RENTING || s == EVICTED) return rent; if (state.compareAndSet(s, RENTING, reservations, reservations)) { if (log.isDebugEnabled()) log.debug("Moved partition to RENTING state: " + this); // Evict asynchronously, as the 'rent' method may be called // from within write locks on local partition. tryEvictAsync(updateSeq); break; } } return rent; }