/** {@inheritDoc} */ @Override public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException { ExecutorService exec = new ThreadPoolExecutor( threadsCnt, threadsCnt, 0L, MILLISECONDS, new ArrayBlockingQueue<Runnable>(batchQueueSize), new BlockingRejectedExecutionHandler()); Iterator<I> iter = inputIterator(args); Collection<I> buf = new ArrayList<>(batchSize); try { while (iter.hasNext()) { if (Thread.currentThread().isInterrupted()) { U.warn(log, "Working thread was interrupted while loading data."); break; } buf.add(iter.next()); if (buf.size() == batchSize) { exec.submit(new Worker(c, buf, args)); buf = new ArrayList<>(batchSize); } } if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args)); } catch (RejectedExecutionException ignored) { // Because of custom RejectedExecutionHandler. assert false : "RejectedExecutionException was thrown while it shouldn't."; } finally { exec.shutdown(); try { exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS); } catch (InterruptedException ignored) { U.warn(log, "Working thread was interrupted while waiting for put operations to complete."); Thread.currentThread().interrupt(); } } }
/** {@inheritDoc} */ @SuppressWarnings({"unchecked"}) @Override public void listenAsync(@Nullable final GridInClosure<? super GridFuture<R>> lsnr) { if (lsnr != null) { checkValid(); boolean done; synchronized (mux) { done = this.done; if (!done) lsnrs.add(lsnr); } if (done) { try { if (syncNotify) notifyListener(lsnr); else ctx.closure() .runLocalSafe( new GPR() { @Override public void run() { notifyListener(lsnr); } }, true); } catch (IllegalStateException ignore) { U.warn( null, "Future notification will not proceed because grid is stopped: " + ctx.gridName()); } } } }
/** * Processes cache query response. * * @param sndId Sender node id. * @param res Query response. */ @SuppressWarnings("unchecked") private void processQueryResponse(UUID sndId, GridCacheQueryResponse res) { if (log.isDebugEnabled()) log.debug("Received query response: " + res); GridCacheQueryFutureAdapter fut = getQueryFuture(res.requestId()); if (fut != null) if (res.fields()) ((GridCacheDistributedFieldsQueryFuture) fut) .onPage( sndId, res.metadata(), (Collection<Map<String, Object>>) ((Collection) res.data()), res.error(), res.isFinished()); else fut.onPage(sndId, res.data(), res.error(), res.isFinished()); else if (!cancelled.contains(res.requestId())) U.warn( log, "Received response for finished or unknown query [rmtNodeId=" + sndId + ", res=" + res + ']'); }
/** {@inheritDoc} */ @Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { try { if (executor.isShutdown()) throw new RejectedExecutionException(); else executor.getQueue().put(r); } catch (InterruptedException ignored) { U.warn(log, "Working thread was interrupted while loading data."); Thread.currentThread().interrupt(); } }
private void dumpPendingObjects() { U.warn( log, "Failed to wait for partition release future. Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Pending transactions:"); for (IgniteInternalTx tx : cctx.tm().activeTransactions()) U.warn(log, ">>> " + tx); U.warn(log, "Pending explicit locks:"); for (GridCacheExplicitLockSpan lockSpan : cctx.mvcc().activeExplicitLocks()) U.warn(log, ">>> " + lockSpan); U.warn(log, "Pending cache futures:"); for (GridCacheFuture<?> fut : cctx.mvcc().activeFutures()) U.warn(log, ">>> " + fut); U.warn(log, "Pending atomic cache futures:"); for (GridCacheFuture<?> fut : cctx.mvcc().atomicFutures()) U.warn(log, ">>> " + fut); }
/** * Send delete message to all meta cache nodes in the grid. * * @param msg Message to send. */ private void sendDeleteMessage(IgfsDeleteMessage msg) { assert msg != null; Collection<ClusterNode> nodes = meta.metaCacheNodes(); for (ClusterNode node : nodes) { try { igfsCtx.send(node, topic, msg, GridIoPolicy.SYSTEM_POOL); } catch (IgniteCheckedException e) { U.warn( log, "Failed to send IGFS delete message to node [nodeId=" + node.id() + ", msg=" + msg + ", err=" + e.getMessage() + ']'); } } }
/** * Notifies single listener. * * @param lsnr Listener. */ private void notifyListener(GridInClosure<? super GridFuture<R>> lsnr) { assert lsnr != null; try { lsnr.apply(this); } catch (IllegalStateException ignore) { U.warn( null, "Failed to notify listener (grid is stopped) [grid=" + ctx.gridName() + ", lsnr=" + lsnr + ']'); } catch (RuntimeException e) { U.error(log, "Failed to notify listener: " + lsnr, e); throw e; } catch (Error e) { U.error(log, "Failed to notify listener: " + lsnr, e); throw e; } }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
/** {@inheritDoc} */ @SuppressWarnings({"CatchGenericClass", "ThrowableInstanceNeverThrown"}) @Override public void finish(boolean commit) throws GridException { if (log.isDebugEnabled()) log.debug("Finishing near local tx [tx=" + this + ", commit=" + commit + "]"); if (commit) { if (!state(COMMITTING)) { GridCacheTxState state = state(); if (state != COMMITTING && state != COMMITTED) throw new GridException( "Invalid transaction state for commit [state=" + state() + ", tx=" + this + ']'); else { if (log.isDebugEnabled()) log.debug( "Invalid transaction state for commit (another thread is committing): " + this); return; } } } else { if (!state(ROLLING_BACK)) { if (log.isDebugEnabled()) log.debug( "Invalid transaction state for rollback [state=" + state() + ", tx=" + this + ']'); return; } } GridException err = null; // Commit to DB first. This way if there is a failure, transaction // won't be committed. try { if (commit && !isRollbackOnly()) userCommit(); else userRollback(); } catch (GridException e) { err = e; commit = false; // If heuristic error. if (!isRollbackOnly()) { invalidate = true; U.warn( log, "Set transaction invalidation flag to true due to error [tx=" + this + ", err=" + err + ']'); } } if (err != null) { state(UNKNOWN); throw err; } else { if (!state(commit ? COMMITTED : ROLLED_BACK)) { state(UNKNOWN); throw new GridException("Invalid transaction state for commit or rollback: " + this); } } }
/** * Initializes store. * * @throws GridException If failed to initialize. */ private void init() throws GridException { if (initGuard.compareAndSet(false, true)) { if (log.isDebugEnabled()) log.debug("Initializing cache store."); try { if (sesFactory != null) // Session factory has been provided - nothing to do. return; if (!F.isEmpty(hibernateCfgPath)) { try { URL url = new URL(hibernateCfgPath); sesFactory = new Configuration().configure(url).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using URL: " + url); // Session factory has been successfully initialized. return; } catch (MalformedURLException e) { if (log.isDebugEnabled()) log.debug("Caught malformed URL exception: " + e.getMessage()); } // Provided path is not a valid URL. File? File cfgFile = new File(hibernateCfgPath); if (cfgFile.exists()) { sesFactory = new Configuration().configure(cfgFile).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using file: " + hibernateCfgPath); // Session factory has been successfully initialized. return; } // Provided path is not a file. Classpath resource? sesFactory = new Configuration().configure(hibernateCfgPath).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using classpath resource: " + hibernateCfgPath); } else { if (hibernateProps == null) { U.warn( log, "No Hibernate configuration has been provided for store (will use default)."); hibernateProps = new Properties(); hibernateProps.setProperty("hibernate.connection.url", DFLT_CONN_URL); hibernateProps.setProperty("hibernate.show_sql", DFLT_SHOW_SQL); hibernateProps.setProperty("hibernate.hbm2ddl.auto", DFLT_HBM2DDL_AUTO); } Configuration cfg = new Configuration(); cfg.setProperties(hibernateProps); assert resourceAvailable(MAPPING_RESOURCE); cfg.addResource(MAPPING_RESOURCE); sesFactory = cfg.buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using properties: " + hibernateProps); } } catch (HibernateException e) { throw new GridException("Failed to initialize store.", e); } finally { initLatch.countDown(); } } else if (initLatch.getCount() > 0) U.await(initLatch); if (sesFactory == null) throw new GridException("Cache store was not properly initialized."); }
/** * @param entries Entries to submit. * @param curFut Current future. * @throws GridInterruptedException If interrupted. */ private void submit(final List<Map.Entry<K, V>> entries, final GridFutureAdapter<Object> curFut) throws GridInterruptedException { assert entries != null; assert !entries.isEmpty(); assert curFut != null; incrementActiveTasks(); GridFuture<Object> fut; if (isLocNode) { fut = ctx.closure() .callLocalSafe( new GridDataLoadUpdateJob<>(ctx, log, cacheName, entries, false, updater), false); locFuts.add(fut); fut.listenAsync( new GridInClosure<GridFuture<Object>>() { @Override public void apply(GridFuture<Object> t) { try { boolean rmv = locFuts.remove(t); assert rmv; curFut.onDone(t.get()); } catch (GridException e) { curFut.onDone(e); } } }); } else { byte[] entriesBytes; try { entriesBytes = ctx.config().getMarshaller().marshal(entries); if (updaterBytes == null) { assert updater != null; updaterBytes = ctx.config().getMarshaller().marshal(updater); } if (topicBytes == null) topicBytes = ctx.config().getMarshaller().marshal(topic); } catch (GridException e) { U.error(log, "Failed to marshal (request will not be sent).", e); return; } GridDeployment dep = null; GridPeerDeployAware jobPda0 = null; if (ctx.deploy().enabled()) { try { jobPda0 = jobPda; assert jobPda0 != null; dep = ctx.deploy().deploy(jobPda0.deployClass(), jobPda0.classLoader()); } catch (GridException e) { U.error( log, "Failed to deploy class (request will not be sent): " + jobPda0.deployClass(), e); return; } if (dep == null) U.warn(log, "Failed to deploy class (request will be sent): " + jobPda0.deployClass()); } long reqId = idGen.incrementAndGet(); fut = curFut; reqs.put(reqId, (GridFutureAdapter<Object>) fut); GridDataLoadRequest<Object, Object> req = new GridDataLoadRequest<>( reqId, topicBytes, cacheName, updaterBytes, entriesBytes, true, dep != null ? dep.deployMode() : null, dep != null ? jobPda0.deployClass().getName() : null, dep != null ? dep.userVersion() : null, dep != null ? dep.participants() : null, dep != null ? dep.classLoaderId() : null, dep == null); try { ctx.io().send(node, TOPIC_DATALOAD, req, PUBLIC_POOL); if (log.isDebugEnabled()) log.debug("Sent request to node [nodeId=" + node.id() + ", req=" + req + ']'); } catch (GridException e) { if (ctx.discovery().alive(node) && ctx.discovery().pingNode(node.id())) ((GridFutureAdapter<Object>) fut).onDone(e); else ((GridFutureAdapter<Object>) fut) .onDone( new GridTopologyException( "Failed to send " + "request (node has left): " + node.id())); } } }