/** * Waits for renting partitions. * * @return {@code True} if mapping was changed. * @throws IgniteCheckedException If failed. */ private boolean waitForRent() throws IgniteCheckedException { boolean changed = false; // Synchronously wait for all renting partitions to complete. for (Iterator<GridDhtLocalPartition> it = locParts.values().iterator(); it.hasNext(); ) { GridDhtLocalPartition p = it.next(); GridDhtPartitionState state = p.state(); if (state == RENTING || state == EVICTED) { if (log.isDebugEnabled()) log.debug("Waiting for renting partition: " + p); // Wait for partition to empty out. p.rent(true).get(); if (log.isDebugEnabled()) log.debug("Finished waiting for renting partition: " + p); // Remove evicted partition. it.remove(); changed = true; } } return changed; }
/** * @param updateSeq Update sequence. * @return {@code True} if entry has been transitioned to state EVICTED. */ boolean tryEvict(boolean updateSeq) { if (state.getReference() != RENTING || state.getStamp() != 0 || groupReserved()) return false; // Attempt to evict partition entries from cache. clearAll(); if (map.isEmpty() && state.compareAndSet(RENTING, EVICTED, 0, 0)) { if (log.isDebugEnabled()) log.debug("Evicted partition: " + this); if (!GridQueryProcessor.isEnabled(cctx.config())) clearSwap(); if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id); cctx.dataStructures().onPartitionEvicted(id); rent.onDone(); ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq); clearDeferredDeletes(); return true; } return false; }
/** * Creates new HTTP requests handler. * * @param hnd Handler. * @param authChecker Authentication checking closure. * @param log Logger. */ GridJettyRestHandler( GridRestProtocolHandler hnd, IgniteClosure<String, Boolean> authChecker, IgniteLogger log) { assert hnd != null; assert log != null; this.hnd = hnd; this.log = log; this.authChecker = authChecker; this.jsonMapper = new GridJettyObjectMapper(); // Init default page and favicon. try { initDefaultPage(); if (log.isDebugEnabled()) log.debug("Initialized default page."); } catch (IOException e) { U.warn(log, "Failed to initialize default page: " + e.getMessage()); } try { initFavicon(); if (log.isDebugEnabled()) log.debug( favicon != null ? "Initialized favicon, size: " + favicon.length : "Favicon is null."); } catch (IOException e) { U.warn(log, "Failed to initialize favicon: " + e.getMessage()); } }
/** * @param updateSeq Update sequence. * @return Future for evict attempt. */ IgniteInternalFuture<Boolean> tryEvictAsync(boolean updateSeq) { if (map.isEmpty() && !GridQueryProcessor.isEnabled(cctx.config()) && state.compareAndSet(RENTING, EVICTED, 0, 0)) { if (log.isDebugEnabled()) log.debug("Evicted partition: " + this); clearSwap(); if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id); cctx.dataStructures().onPartitionEvicted(id); rent.onDone(); ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq); clearDeferredDeletes(); return new GridFinishedFuture<>(true); } return cctx.closures() .callLocalSafe( new GPC<Boolean>() { @Override public Boolean call() { return tryEvict(true); } }, /*system pool*/ true); }
/** * @param cctx Cache context. * @param busyLock Busy lock. * @param exchId Exchange ID. * @param reqs Cache change requests. */ public GridDhtPartitionsExchangeFuture( GridCacheSharedContext cctx, ReadWriteLock busyLock, GridDhtPartitionExchangeId exchId, Collection<DynamicCacheChangeRequest> reqs) { assert busyLock != null; assert exchId != null; dummy = false; forcePreload = false; reassign = false; this.cctx = cctx; this.busyLock = busyLock; this.exchId = exchId; this.reqs = reqs; log = cctx.logger(getClass()); initFut = new GridFutureAdapter<>(); if (log.isDebugEnabled()) log.debug( "Creating exchange future [localNode=" + cctx.localNodeId() + ", fut=" + this + ']'); }
/** {@inheritDoc} */ @Override public Serializable execute() { synchronized (mux) { execCnt++; } if (log.isInfoEnabled()) log.info("Executing job: " + jobCtx.getJobId()); long now = System.currentTimeMillis(); while (!isCancelled && now < thresholdTime) { synchronized (mux) { try { mux.wait(thresholdTime - now); } catch (InterruptedException ignored) { // No-op. } } now = System.currentTimeMillis(); } synchronized (mux) { return isCancelled ? 1 : 0; } }
/** * Callback for backup update response. * * @param nodeId Backup node ID. * @param updateRes Update response. */ public void onResult(UUID nodeId, GridDhtAtomicUpdateResponse updateRes) { if (log.isDebugEnabled()) log.debug( "Received DHT atomic update future result [nodeId=" + nodeId + ", updateRes=" + updateRes + ']'); if (updateRes.error() != null) this.updateRes.addFailedKeys(updateRes.failedKeys(), updateRes.error()); if (!F.isEmpty(updateRes.nearEvicted())) { for (KeyCacheObject key : updateRes.nearEvicted()) { GridDhtCacheEntry entry = nearReadersEntries.get(key); try { entry.removeReader(nodeId, updateRes.messageId()); } catch (GridCacheEntryRemovedException e) { if (log.isDebugEnabled()) log.debug("Entry with evicted reader was removed [entry=" + entry + ", err=" + e + ']'); } } } registerResponse(nodeId); }
/** * Sends message to Hadoop process. * * @param desc * @param msg * @throws IgniteCheckedException */ public void sendMessage(HadoopProcessDescriptor desc, HadoopMessage msg) throws IgniteCheckedException { assert desc != null; assert msg != null; if (log.isTraceEnabled()) log.trace("Sending message to Hadoop process [desc=" + desc + ", msg=" + msg + ']'); HadoopCommunicationClient client = null; boolean closeOnRelease = true; try { client = reserveClient(desc); client.sendMessage(desc, msg); closeOnRelease = false; } finally { if (client != null) { if (closeOnRelease) { client.forceClose(); clients.remove(desc.processId(), client); } else client.release(); } } }
/** {@inheritDoc} */ @Override public boolean onNodeLeft(UUID nodeId) { if (log.isDebugEnabled()) log.debug("Processing node leave event [fut=" + this + ", nodeId=" + nodeId + ']'); return registerResponse(nodeId); }
private void sendPartitions() { ClusterNode oldestNode = this.oldestNode.get(); try { sendLocalPartitions(oldestNode, exchId); } catch (ClusterTopologyCheckedException ignore) { if (log.isDebugEnabled()) log.debug( "Oldest node left during partition exchange [nodeId=" + oldestNode.id() + ", exchId=" + exchId + ']'); } catch (IgniteCheckedException e) { scheduleRecheck(); U.error( log, "Failed to send local partitions to oldest node (will retry after timeout) [oldestNodeId=" + oldestNode.id() + ", exchId=" + exchId + ']', e); } }
/** * @param nodes Nodes. * @param id ID. * @throws IgniteCheckedException If failed. */ private void sendAllPartitions( Collection<? extends ClusterNode> nodes, GridDhtPartitionExchangeId id) throws IgniteCheckedException { GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(id, lastVer.get(), id.topologyVersion()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (!cacheCtx.isLocal()) { AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion(); boolean ready = startTopVer == null || startTopVer.compareTo(id.topologyVersion()) <= 0; if (ready) m.addFullPartitionsMap(cacheCtx.cacheId(), cacheCtx.topology().partitionMap(true)); } } // It is important that client topologies be added after contexts. for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) m.addFullPartitionsMap(top.cacheId(), top.partitionMap(true)); if (log.isDebugEnabled()) log.debug( "Sending full partition map [nodeIds=" + F.viewReadOnly(nodes, F.node2id()) + ", exchId=" + exchId + ", msg=" + m + ']'); cctx.io().safeSend(nodes, m, SYSTEM_POOL, null); }
/** {@inheritDoc} */ @Override public Set<Long> call() throws IgniteCheckedException { assert ignite != null; if (log.isInfoEnabled()) log.info("Running GetAndIncrementJob on node: " + ignite.cluster().localNode().id()); IgniteAtomicSequence seq = ignite.atomicSequence(seqName, 0, true); assert seq != null; // Result set. Set<Long> resSet = new HashSet<>(); // Get sequence value and try to put it result set. for (int i = 0; i < retries; i++) { long val = seq.getAndIncrement(); assert !resSet.contains(val) : "Element already in set : " + val; resSet.add(val); } return resSet; }
/** {@inheritDoc} */ @SuppressWarnings({"CatchGenericClass"}) @Override public final void run() { try { body(); } catch (InterruptedException e) { if (log.isDebugEnabled()) log.debug("Caught interrupted exception: " + e); Thread.currentThread().interrupt(); } // Catch everything to make sure that it gets logged properly and // not to kill any threads from the underlying thread pool. catch (Throwable e) { U.error(log, "Runtime error caught during grid runnable execution: " + this, e); if (e instanceof Error) throw e; } finally { cleanup(); if (log.isDebugEnabled()) { if (isInterrupted()) log.debug( "Grid runnable finished due to interruption without cancellation: " + getName()); else log.debug("Grid runnable finished normally: " + getName()); } } }
/** Sends requests to remote nodes. */ public void map() { if (!mappings.isEmpty()) { for (GridDhtAtomicUpdateRequest req : mappings.values()) { try { if (log.isDebugEnabled()) log.debug( "Sending DHT atomic update request [nodeId=" + req.nodeId() + ", req=" + req + ']'); cctx.io().send(req.nodeId(), req, cctx.ioPolicy()); } catch (ClusterTopologyCheckedException ignored) { U.warn( log, "Failed to send update request to backup node because it left grid: " + req.nodeId()); registerResponse(req.nodeId()); } catch (IgniteCheckedException e) { U.error( log, "Failed to send update request to backup node (did node leave the grid?): " + req.nodeId(), e); registerResponse(req.nodeId()); } } } else onDone(); // Send response right away if no ACKs from backup is required. // Backups will send ACKs anyway, future will be completed after all backups have replied. if (updateReq.writeSynchronizationMode() != FULL_SYNC) completionCb.apply(updateReq, updateRes); }
/** {@inheritDoc} */ @Override public void spiStart(String gridName) throws IgniteSpiException { // Start SPI start stopwatch. startStopwatch(); // Ack start. if (log.isInfoEnabled()) log.info(startInfo()); }
/** @return {@code true} if entered to busy state. */ private boolean enterBusy() { if (busyLock.readLock().tryLock()) return true; if (log.isDebugEnabled()) log.debug("Failed to enter busy state (exchanger is stopping): " + this); return false; }
/** * @param p Partition number. * @param topVer Topology version. * @param create Create flag. * @param updateSeq Update sequence. * @return Local partition. */ private GridDhtLocalPartition localPartition( int p, AffinityTopologyVersion topVer, boolean create, boolean updateSeq) { while (true) { boolean belongs = cctx.affinity().localNode(p, topVer); GridDhtLocalPartition loc = locParts.get(p); if (loc != null && loc.state() == EVICTED) { locParts.remove(p, loc); if (!create) return null; if (!belongs) throw new GridDhtInvalidPartitionException( p, "Adding entry to evicted partition [part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.topVer + ']'); continue; } if (loc == null && create) { if (!belongs) throw new GridDhtInvalidPartitionException( p, "Creating partition which does not belong [part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.topVer + ']'); lock.writeLock().lock(); try { GridDhtLocalPartition old = locParts.putIfAbsent(p, loc = new GridDhtLocalPartition(cctx, p)); if (old != null) loc = old; else { if (updateSeq) this.updateSeq.incrementAndGet(); if (log.isDebugEnabled()) log.debug("Created local partition: " + loc); } } finally { lock.writeLock().unlock(); } } return loc; } }
/** {@inheritDoc} */ @Override public final void onKernalStop(boolean cancel) { if (!starting.get()) // Ignoring attempt to stop manager that has never been started. return; onKernalStop0(cancel); if (log != null && log.isDebugEnabled()) log.debug(kernalStopInfo()); }
/** {@inheritDoc} */ @Override public final void stop(boolean cancel) { if (!starting.get() || !stop.compareAndSet(false, true)) // Ignoring attempt to stop manager that has never been started. return; stop0(cancel); if (log != null && log.isDebugEnabled()) log.debug(stopInfo()); }
/** @param cand Remote candidate. */ private void addRemote(GridCacheMvccCandidate cand) { assert !cand.local(); if (log.isDebugEnabled()) log.debug("Adding remote candidate [mvcc=" + this + ", cand=" + cand + "]"); cctx.versions().onReceived(cand.nodeId(), cand.version()); add0(cand); }
/** * @param log Logger. * @param time Time. * @param msg Message. */ private static void log0(@Nullable IgniteLogger log, long time, String msg) { if (log != null) { if (log.isDebugEnabled()) log.debug(msg); else log.warning(msg); } else X.println( String.format( "[%s][%s]%s", DEBUG_DATE_FMT.get().format(time), Thread.currentThread().getName(), msg)); }
/** {@inheritDoc} */ @Override public Collection<? extends ComputeJob> split(int gridSize, Serializable arg) { if (log.isInfoEnabled()) log.info("Splitting task [task=" + this + ", gridSize=" + gridSize + ", arg=" + arg + ']'); Collection<GridCancelTestJob> jobs = new ArrayList<>(SPLIT_COUNT); for (int i = 0; i < SPLIT_COUNT; i++) jobs.add(new GridCancelTestJob()); return jobs; }
/** * @param nodeId Sender. * @param res Result. */ public void onResult(UUID nodeId, GridDhtTxFinishResponse res) { if (!isDone()) { boolean found = false; for (IgniteInternalFuture<IgniteInternalTx> fut : futures()) { if (isMini(fut)) { MiniFuture f = (MiniFuture) fut; if (f.futureId().equals(res.miniId())) { found = true; assert f.node().id().equals(nodeId); f.onResult(res); } } } if (!found) { if (msgLog.isDebugEnabled()) { msgLog.debug( "DHT finish fut, failed to find mini future [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nodeId + ", res=" + res + ", fut=" + this + ']'); } } } else { if (msgLog.isDebugEnabled()) { msgLog.debug( "DHT finish fut, failed to find mini future [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nodeId + ", res=" + res + ", fut=" + this + ']'); } } }
/** * @param rmtReducer Optional reducer. * @param rmtTransform Optional transformer. * @param args Arguments. * @return Future. */ @SuppressWarnings("IfMayBeConditional") private <R> CacheQueryFuture<R> execute( @Nullable IgniteReducer<T, R> rmtReducer, @Nullable IgniteClosure<T, R> rmtTransform, @Nullable Object... args) { Collection<ClusterNode> nodes = nodes(); cctx.checkSecurity(SecurityPermission.CACHE_READ); if (nodes.isEmpty()) return new GridCacheQueryErrorFuture<>( cctx.kernalContext(), new ClusterGroupEmptyCheckedException()); if (log.isDebugEnabled()) log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']'); if (cctx.deploymentEnabled()) { try { cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform); cctx.deploy().registerClasses(args); } catch (IgniteCheckedException e) { return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e); } } if (subjId == null) subjId = cctx.localNodeId(); taskHash = cctx.kernalContext().job().currentTaskNameHash(); final GridCacheQueryBean bean = new GridCacheQueryBean( this, (IgniteReducer<Object, Object>) rmtReducer, (IgniteClosure<Object, Object>) rmtTransform, args); final GridCacheQueryManager qryMgr = cctx.queries(); boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId()); if (type == SQL_FIELDS || type == SPI) return (CacheQueryFuture<R>) (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes)); else if (type == SCAN && part != null && nodes.size() > 1) return new CacheQueryFallbackFuture<>(nodes, bean, qryMgr); else return (CacheQueryFuture<R>) (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes)); }
/** * @param desc Sender process descriptor. * @param msg Communication message. */ protected void notifyListener(HadoopProcessDescriptor desc, HadoopMessage msg) { HadoopMessageListener lsnr = this.lsnr; if (lsnr != null) // Notify listener of a new message. lsnr.onMessageReceived(desc, msg); else if (log.isDebugEnabled()) log.debug( "Received communication message without any registered listeners (will ignore) " + "[senderProcDesc=" + desc + ", msg=" + msg + ']'); }
/** {@inheritDoc} */ @Override public Object reduce(List<ComputeJobResult> results) { if (log.isInfoEnabled()) log.info("Aggregating job [job=" + this + ", results=" + results + ']'); int res = 0; for (ComputeJobResult result : results) { assert result != null; if (result.getData() != null) res += (Integer) result.getData(); } return res; }
/** {@inheritDoc} */ @Override public final void start(GridCacheSharedContext<K, V> cctx) throws IgniteCheckedException { if (!starting.compareAndSet(false, true)) assert false : "Method start is called more than once for manager: " + this; assert cctx != null; this.cctx = cctx; log = cctx.logger(getClass()); start0(); if (log.isDebugEnabled()) log.debug(startInfo()); }
/** * Sets remote candidate to done. * * @param ver Version. * @param pending Pending versions. * @param committed Committed versions. * @param rolledback Rolledback versions. * @return Lock owner. */ @Nullable public CacheLockCandidates doneRemote( GridCacheVersion ver, Collection<GridCacheVersion> pending, Collection<GridCacheVersion> committed, Collection<GridCacheVersion> rolledback) { assert ver != null; if (log.isDebugEnabled()) log.debug("Setting remote candidate to done [mvcc=" + this + ", ver=" + ver + "]"); // Check remote candidate. GridCacheMvccCandidate cand = candidate(rmts, ver); if (cand != null) { assert rmts != null; assert !rmts.isEmpty(); assert !cand.local() : "Remote candidate is marked as local: " + cand; assert !cand.nearLocal() : "Remote candidate is marked as near local: " + cand; cand.setOwner(); cand.setUsed(); List<GridCacheMvccCandidate> mvAfter = null; for (ListIterator<GridCacheMvccCandidate> it = rmts.listIterator(); it.hasNext(); ) { GridCacheMvccCandidate c = it.next(); assert !c.nearLocal() : "Remote candidate marked as near local: " + c; if (c == cand) { if (mvAfter != null) for (GridCacheMvccCandidate mv : mvAfter) it.add(mv); break; } else if (!committed.contains(c.version()) && !rolledback.contains(c.version()) && pending.contains(c.version())) { it.remove(); if (mvAfter == null) mvAfter = new LinkedList<>(); mvAfter.add(c); } } } return allOwners(); }
/** {@inheritDoc} */ @Override public void handle(String target, Request req, HttpServletRequest srvReq, HttpServletResponse res) throws IOException, ServletException { if (log.isDebugEnabled()) log.debug("Handling request [target=" + target + ", req=" + req + ", srvReq=" + srvReq + ']'); if (target.startsWith("/ignite")) { processRequest(target, srvReq, res); req.setHandled(true); } else if (target.startsWith("/favicon.ico")) { if (favicon == null) { res.setStatus(HttpServletResponse.SC_NOT_FOUND); req.setHandled(true); return; } res.setStatus(HttpServletResponse.SC_OK); res.setContentType("image/x-icon"); res.getOutputStream().write(favicon); res.getOutputStream().flush(); req.setHandled(true); } else { if (dfltPage == null) { res.setStatus(HttpServletResponse.SC_NOT_FOUND); req.setHandled(true); return; } res.setStatus(HttpServletResponse.SC_OK); res.setContentType("text/html"); res.getWriter().write(dfltPage); res.getWriter().flush(); req.setHandled(true); } }
/** {@inheritDoc} */ @Override public boolean onDone(AffinityTopologyVersion res, Throwable err) { Map<Integer, Boolean> m = null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.config().getTopologyValidator() != null && !CU.isSystemCache(cacheCtx.name())) { if (m == null) m = new HashMap<>(); m.put( cacheCtx.cacheId(), cacheCtx.config().getTopologyValidator().validate(discoEvt.topologyNodes())); } } cacheValidRes = m != null ? m : Collections.<Integer, Boolean>emptyMap(); cctx.cache().onExchangeDone(exchId.topologyVersion(), reqs, err); cctx.exchange().onExchangeDone(this, err); if (super.onDone(res, err) && !dummy && !forcePreload) { if (log.isDebugEnabled()) log.debug( "Completed partition exchange [localNode=" + cctx.localNodeId() + ", exchange= " + this + ']'); initFut.onDone(err == null); GridTimeoutObject timeoutObj = this.timeoutObj; // Deschedule timeout object. if (timeoutObj != null) cctx.kernalContext().timeout().removeTimeoutObject(timeoutObj); if (exchId.isLeft()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) cacheCtx.config().getAffinity().removeNode(exchId.nodeId()); } return true; } return dummy; }