/** * Creates new HTTP requests handler. * * @param hnd Handler. * @param authChecker Authentication checking closure. * @param log Logger. */ GridJettyRestHandler( GridRestProtocolHandler hnd, IgniteClosure<String, Boolean> authChecker, IgniteLogger log) { assert hnd != null; assert log != null; this.hnd = hnd; this.log = log; this.authChecker = authChecker; this.jsonMapper = new GridJettyObjectMapper(); // Init default page and favicon. try { initDefaultPage(); if (log.isDebugEnabled()) log.debug("Initialized default page."); } catch (IOException e) { U.warn(log, "Failed to initialize default page: " + e.getMessage()); } try { initFavicon(); if (log.isDebugEnabled()) log.debug( favicon != null ? "Initialized favicon, size: " + favicon.length : "Favicon is null."); } catch (IOException e) { U.warn(log, "Failed to initialize favicon: " + e.getMessage()); } }
/** Sends requests to remote nodes. */ public void map() { if (!mappings.isEmpty()) { for (GridDhtAtomicUpdateRequest req : mappings.values()) { try { if (log.isDebugEnabled()) log.debug( "Sending DHT atomic update request [nodeId=" + req.nodeId() + ", req=" + req + ']'); cctx.io().send(req.nodeId(), req, cctx.ioPolicy()); } catch (ClusterTopologyCheckedException ignored) { U.warn( log, "Failed to send update request to backup node because it left grid: " + req.nodeId()); registerResponse(req.nodeId()); } catch (IgniteCheckedException e) { U.error( log, "Failed to send update request to backup node (did node leave the grid?): " + req.nodeId(), e); registerResponse(req.nodeId()); } } } else onDone(); // Send response right away if no ACKs from backup is required. // Backups will send ACKs anyway, future will be completed after all backups have replied. if (updateReq.writeSynchronizationMode() != FULL_SYNC) completionCb.apply(updateReq, updateRes); }
/** * @param nodes Nodes. * @param msg Message. * @param partsMap Partitions. * @return {@code true} If all messages sent successfully. */ private boolean send( Collection<ClusterNode> nodes, Message msg, Map<ClusterNode, IntArray> partsMap) { boolean locNodeFound = false; boolean ok = true; for (ClusterNode node : nodes) { if (node.isLocal()) { locNodeFound = true; continue; } try { ctx.io().send(node, GridTopic.TOPIC_QUERY, copy(msg, node, partsMap), QUERY_POOL); } catch (IgniteCheckedException e) { ok = false; U.warn(log, e.getMessage()); } } if (locNodeFound) // Local node goes the last to allow parallel execution. h2.mapQueryExecutor() .onMessage(ctx.localNodeId(), copy(msg, ctx.discovery().localNode(), partsMap)); return ok; }
/** * Starts communication. * * @throws IgniteCheckedException If failed. */ public void start() throws IgniteCheckedException { try { locHost = U.getLocalHost(); } catch (IOException e) { throw new IgniteCheckedException("Failed to initialize local address.", e); } try { shmemSrv = resetShmemServer(); } catch (IgniteCheckedException e) { U.warn(log, "Failed to start shared memory communication server.", e); } try { // This method potentially resets local port to the value // local node was bound to. nioSrvr = resetNioServer(); } catch (IgniteCheckedException e) { throw new IgniteCheckedException("Failed to initialize TCP server: " + locHost, e); } locProcDesc.address(locHost.getHostAddress()); locProcDesc.sharedMemoryPort(boundTcpShmemPort); locProcDesc.tcpPort(boundTcpPort); locIdMsg = new ProcessHandshakeMessage(locProcDesc); if (shmemSrv != null) { shmemAcceptWorker = new ShmemAcceptWorker(shmemSrv); new IgniteThread(shmemAcceptWorker).start(); } nioSrvr.start(); }
/** @return Nodes to execute on. */ private Collection<ClusterNode> nodes() { CacheMode cacheMode = cctx.config().getCacheMode(); switch (cacheMode) { case LOCAL: if (prj != null) U.warn( log, "Ignoring query projection because it's executed over LOCAL cache " + "(only local node will be queried): " + this); return Collections.singletonList(cctx.localNode()); case REPLICATED: if (prj != null || partition() != null) return nodes(cctx, prj, partition()); return cctx.affinityNode() ? Collections.singletonList(cctx.localNode()) : Collections.singletonList(F.rand(nodes(cctx, null, partition()))); case PARTITIONED: return nodes(cctx, prj, partition()); default: throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode); } }
/** * Processes cache query response. * * @param sndId Sender node id. * @param res Query response. */ @SuppressWarnings("unchecked") private void processQueryResponse(UUID sndId, GridCacheQueryResponse res) { if (log.isDebugEnabled()) log.debug("Received query response: " + res); GridCacheQueryFutureAdapter fut = getQueryFuture(res.requestId()); if (fut != null) if (res.fields()) ((GridCacheDistributedFieldsQueryFuture) fut) .onPage( sndId, res.metadata(), (Collection<Map<String, Object>>) ((Collection) res.data()), res.error(), res.isFinished()); else fut.onPage(sndId, res.data(), res.error(), res.isFinished()); else if (!cancelled.contains(res.requestId())) U.warn( log, "Received response for finished or unknown query [rmtNodeId=" + sndId + ", res=" + res + ']'); }
/** {@inheritDoc} */ @Override public void onDiscoveryDataReceived(UUID nodeId, UUID rmtNodeId, Serializable data) { Map<String, Serializable> discData = (Map<String, Serializable>) data; if (discData != null) { for (Map.Entry<String, Serializable> e : discData.entrySet()) { PluginProvider provider = plugins.get(e.getKey()); if (provider != null) provider.receiveDiscoveryData(nodeId, e.getValue()); else U.warn(log, "Received discovery data for unknown plugin: " + e.getKey()); } } }
/** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public boolean addAll(final Collection<? extends T> items) { A.notNull(items, "items"); try { boolean retVal; int cnt = 0; while (true) { try (IgniteInternalTx tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) { Long idx = (Long) cache.invoke(queueKey, new AddProcessor(id, items.size())).get(); if (idx != null) { checkRemoved(idx); Map<GridCacheQueueItemKey, T> putMap = new HashMap<>(); for (T item : items) { putMap.put(itemKey(idx), item); idx++; } cache.putAll(putMap); retVal = true; } else retVal = false; tx.commit(); break; } catch (ClusterTopologyCheckedException e) { if (e instanceof ClusterGroupEmptyCheckedException) throw e; if (cnt++ == MAX_UPDATE_RETRIES) throw e; else { U.warn(log, "Failed to add item, will retry [err=" + e + ']'); U.sleep(RETRY_DELAY); } } } return retVal; } catch (IgniteCheckedException e) { throw U.convertException(e); } }
private void dumpPendingObjects() { U.warn( log, "Failed to wait for partition release future. Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Pending transactions:"); for (IgniteInternalTx tx : cctx.tm().activeTransactions()) U.warn(log, ">>> " + tx); U.warn(log, "Pending explicit locks:"); for (GridCacheExplicitLockSpan lockSpan : cctx.mvcc().activeExplicitLocks()) U.warn(log, ">>> " + lockSpan); U.warn(log, "Pending cache futures:"); for (GridCacheFuture<?> fut : cctx.mvcc().activeFutures()) U.warn(log, ">>> " + fut); U.warn(log, "Pending atomic cache futures:"); for (GridCacheFuture<?> fut : cctx.mvcc().atomicFutures()) U.warn(log, ">>> " + fut); }
/** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public boolean offer(final T item) throws IgniteException { A.notNull(item, "item"); try { boolean retVal; int cnt = 0; while (true) { try { try (IgniteInternalTx tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) { Long idx = (Long) cache.invoke(queueKey, new AddProcessor(id, 1)).get(); if (idx != null) { checkRemoved(idx); cache.getAndPut(itemKey(idx), item); retVal = true; } else retVal = false; tx.commit(); break; } } catch (ClusterTopologyCheckedException e) { if (e instanceof ClusterGroupEmptyCheckedException) throw e; if (cnt++ == MAX_UPDATE_RETRIES) throw e; else { U.warn(log, "Failed to add item, will retry [err=" + e + ']'); U.sleep(RETRY_DELAY); } } } return retVal; } catch (IgniteCheckedException e) { throw U.convertException(e); } }
/** {@inheritDoc} */ @SuppressWarnings("deprecation") @Override public void start() throws IgniteException { if (sesFactory == null && F.isEmpty(hibernateCfgPath)) throw new IgniteException( "Either session factory or Hibernate configuration file is required by " + getClass().getSimpleName() + '.'); if (!F.isEmpty(hibernateCfgPath)) { if (sesFactory == null) { try { URL url = new URL(hibernateCfgPath); sesFactory = new Configuration().configure(url).buildSessionFactory(); } catch (MalformedURLException ignored) { // No-op. } if (sesFactory == null) { File cfgFile = new File(hibernateCfgPath); if (cfgFile.exists()) sesFactory = new Configuration().configure(cfgFile).buildSessionFactory(); } if (sesFactory == null) sesFactory = new Configuration().configure(hibernateCfgPath).buildSessionFactory(); if (sesFactory == null) throw new IgniteException( "Failed to resolve Hibernate configuration file: " + hibernateCfgPath); closeSesOnStop = true; } else U.warn( log, "Hibernate configuration file configured in " + getClass().getSimpleName() + " will be ignored (session factory is already set)."); } }
/** {@inheritDoc} */ @SuppressWarnings("unchecked") @Nullable @Override public T poll() throws IgniteException { try { int cnt = 0; T retVal; while (true) { try (IgniteInternalTx tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) { Long idx = (Long) cache.invoke(queueKey, new PollProcessor(id)).get(); if (idx != null) { checkRemoved(idx); retVal = (T) cache.getAndRemove(itemKey(idx)); assert retVal != null : idx; } else retVal = null; tx.commit(); break; } catch (ClusterTopologyCheckedException e) { if (e instanceof ClusterGroupEmptyCheckedException) throw e; if (cnt++ == MAX_UPDATE_RETRIES) throw e; else { U.warn(log, "Failed to add item, will retry [err=" + e + ']'); U.sleep(RETRY_DELAY); } } } return retVal; } catch (IgniteCheckedException e) { throw U.convertException(e); } }
/** * Send delete message to all meta cache nodes in the grid. * * @param msg Message to send. */ private void sendDeleteMessage(IgfsDeleteMessage msg) { assert msg != null; Collection<ClusterNode> nodes = meta.metaCacheNodes(); for (ClusterNode node : nodes) { try { igfsCtx.send(node, topic, msg, GridIoPolicy.SYSTEM_POOL); } catch (IgniteCheckedException e) { U.warn( log, "Failed to send IGFS delete message to node [nodeId=" + node.id() + ", msg=" + msg + ", err=" + e.getMessage() + ']'); } } }
/** {@inheritDoc} */ @Override public void close() { closed = true; U.closeQuiet(srvSock); if (gcWorker != null) { U.cancel(gcWorker); // This method may be called from already interrupted thread. // Need to ensure cleaning on close. boolean interrupted = Thread.interrupted(); try { U.join(gcWorker); } catch (IgniteInterruptedCheckedException e) { U.warn(log, "Interrupted when stopping GC worker.", e); } finally { if (interrupted) Thread.currentThread().interrupt(); } } }
/** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override protected void removeItem(final long rmvIdx) throws IgniteCheckedException { try { int cnt = 0; while (true) { try (IgniteInternalTx tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) { Long idx = (Long) cache.invoke(queueKey, new RemoveProcessor(id, rmvIdx)).get(); if (idx != null) { checkRemoved(idx); boolean rmv = cache.remove(itemKey(idx)); assert rmv : idx; } tx.commit(); break; } catch (ClusterTopologyCheckedException e) { if (e instanceof ClusterGroupEmptyCheckedException) throw e; if (cnt++ == MAX_UPDATE_RETRIES) throw e; else { U.warn(log, "Failed to add item, will retry [err=" + e + ']'); U.sleep(RETRY_DELAY); } } } } catch (IgniteCheckedException e) { throw U.convertException(e); } }
/** {@inheritDoc} */ @SuppressWarnings({"CatchGenericClass", "ThrowableInstanceNeverThrown"}) @Override public boolean finish(boolean commit) throws IgniteCheckedException { if (log.isDebugEnabled()) log.debug("Finishing near local tx [tx=" + this + ", commit=" + commit + "]"); if (commit) { if (!state(COMMITTING)) { TransactionState state = state(); if (state != COMMITTING && state != COMMITTED) throw new IgniteCheckedException( "Invalid transaction state for commit [state=" + state() + ", tx=" + this + ']'); else { if (log.isDebugEnabled()) log.debug( "Invalid transaction state for commit (another thread is committing): " + this); return false; } } } else { if (!state(ROLLING_BACK)) { if (log.isDebugEnabled()) log.debug( "Invalid transaction state for rollback [state=" + state() + ", tx=" + this + ']'); return false; } } IgniteCheckedException err = null; // Commit to DB first. This way if there is a failure, transaction // won't be committed. try { if (commit && !isRollbackOnly()) userCommit(); else userRollback(); } catch (IgniteCheckedException e) { err = e; commit = false; // If heuristic error. if (!isRollbackOnly()) { invalidate = true; systemInvalidate(true); U.warn( log, "Set transaction invalidation flag to true due to error [tx=" + this + ", err=" + err + ']'); } } if (err != null) { state(UNKNOWN); throw err; } else { // Committed state will be set in finish future onDone callback. if (commit) { if (!onePhaseCommit()) { if (!state(COMMITTED)) { state(UNKNOWN); throw new IgniteCheckedException("Invalid transaction state for commit: " + this); } } } else { if (!state(ROLLED_BACK)) { state(UNKNOWN); throw new IgniteCheckedException("Invalid transaction state for rollback: " + this); } } } return true; }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
/** * @param cctx Cache context. * @param qry Query. * @param keepPortable Keep portable. * @return Cursor. */ public Iterator<List<?>> query( GridCacheContext<?, ?> cctx, GridCacheTwoStepQuery qry, boolean keepPortable) { for (int attempt = 0; ; attempt++) { if (attempt != 0) { try { Thread.sleep(attempt * 10); // Wait for exchange. } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new CacheException("Query was interrupted.", e); } } long qryReqId = reqIdGen.incrementAndGet(); QueryRun r = new QueryRun(); r.pageSize = qry.pageSize() <= 0 ? GridCacheTwoStepQuery.DFLT_PAGE_SIZE : qry.pageSize(); r.idxs = new ArrayList<>(qry.mapQueries().size()); String space = cctx.name(); r.conn = (JdbcConnection) h2.connectionForSpace(space); AffinityTopologyVersion topVer = h2.readyTopologyVersion(); List<String> extraSpaces = extraSpaces(space, qry.spaces()); Collection<ClusterNode> nodes; // Explicit partition mapping for unstable topology. Map<ClusterNode, IntArray> partsMap = null; if (isPreloadingActive(cctx, extraSpaces)) { if (cctx.isReplicated()) nodes = replicatedUnstableDataNodes(cctx, extraSpaces); else { partsMap = partitionedUnstableDataNodes(cctx, extraSpaces); nodes = partsMap == null ? null : partsMap.keySet(); } } else nodes = stableDataNodes(topVer, cctx, extraSpaces); if (nodes == null) continue; // Retry. assert !nodes.isEmpty(); if (cctx.isReplicated() || qry.explain()) { assert qry.explain() || !nodes.contains(ctx.discovery().localNode()) : "We must be on a client node."; // Select random data node to run query on a replicated data or get EXPLAIN PLAN from a // single node. nodes = Collections.singleton(F.rand(nodes)); } int tblIdx = 0; final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable(); for (GridCacheSqlQuery mapQry : qry.mapQueries()) { GridMergeIndex idx; if (!skipMergeTbl) { GridMergeTable tbl; try { tbl = createMergeTable(r.conn, mapQry, qry.explain()); } catch (IgniteCheckedException e) { throw new IgniteException(e); } idx = tbl.getScanIndex(null); fakeTable(r.conn, tblIdx++).setInnerTable(tbl); } else idx = GridMergeIndexUnsorted.createDummy(); for (ClusterNode node : nodes) idx.addSource(node.id()); r.idxs.add(idx); } r.latch = new CountDownLatch(r.idxs.size() * nodes.size()); runs.put(qryReqId, r); try { if (ctx.clientDisconnected()) { throw new CacheException( "Query was cancelled, client node disconnected.", new IgniteClientDisconnectedException( ctx.cluster().clientReconnectFuture(), "Client node disconnected.")); } Collection<GridCacheSqlQuery> mapQrys = qry.mapQueries(); if (qry.explain()) { mapQrys = new ArrayList<>(qry.mapQueries().size()); for (GridCacheSqlQuery mapQry : qry.mapQueries()) mapQrys.add(new GridCacheSqlQuery("EXPLAIN " + mapQry.query(), mapQry.parameters())); } if (nodes.size() != 1 || !F.first(nodes).isLocal()) { // Marshall params for remotes. Marshaller m = ctx.config().getMarshaller(); for (GridCacheSqlQuery mapQry : mapQrys) mapQry.marshallParams(m); } boolean retry = false; if (send( nodes, new GridQueryRequest(qryReqId, r.pageSize, space, mapQrys, topVer, extraSpaces, null), partsMap)) { awaitAllReplies(r, nodes); Object state = r.state.get(); if (state != null) { if (state instanceof CacheException) { CacheException err = (CacheException) state; if (err.getCause() instanceof IgniteClientDisconnectedException) throw err; throw new CacheException("Failed to run map query remotely.", err); } if (state instanceof AffinityTopologyVersion) { retry = true; // If remote node asks us to retry then we have outdated full partition map. h2.awaitForReadyTopologyVersion((AffinityTopologyVersion) state); } } } else // Send failed. retry = true; Iterator<List<?>> resIter = null; if (!retry) { if (qry.explain()) return explainPlan(r.conn, space, qry); if (skipMergeTbl) { List<List<?>> res = new ArrayList<>(); assert r.idxs.size() == 1 : r.idxs; GridMergeIndex idx = r.idxs.get(0); Cursor cur = idx.findInStream(null, null); while (cur.next()) { Row row = cur.get(); int cols = row.getColumnCount(); List<Object> resRow = new ArrayList<>(cols); for (int c = 0; c < cols; c++) resRow.add(row.getValue(c).getObject()); res.add(resRow); } resIter = res.iterator(); } else { GridCacheSqlQuery rdc = qry.reduceQuery(); // Statement caching is prohibited here because we can't guarantee correct merge index // reuse. ResultSet res = h2.executeSqlQueryWithTimer( space, r.conn, rdc.query(), F.asList(rdc.parameters()), false); resIter = new Iter(res); } } for (GridMergeIndex idx : r.idxs) { if (!idx.fetchedAll()) // We have to explicitly cancel queries on remote nodes. send(nodes, new GridQueryCancelRequest(qryReqId), null); } if (retry) { if (Thread.currentThread().isInterrupted()) throw new IgniteInterruptedCheckedException("Query was interrupted."); continue; } return new GridQueryCacheObjectsIterator(resIter, cctx, keepPortable); } catch (IgniteCheckedException | RuntimeException e) { U.closeQuiet(r.conn); if (e instanceof CacheException) throw (CacheException) e; Throwable cause = e; if (e instanceof IgniteCheckedException) { Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class); if (disconnectedErr != null) cause = disconnectedErr; } throw new CacheException("Failed to run reduce query locally.", cause); } finally { if (!runs.remove(qryReqId, r)) U.warn(log, "Query run was already removed: " + qryReqId); if (!skipMergeTbl) { for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++) fakeTable(null, i).setInnerTable(null); // Drop all merge tables. } } } }
/** {@inheritDoc} */ @Override public boolean onDone(@Nullable Void res, @Nullable Throwable err) { if (super.onDone(res, err)) { cctx.mvcc().removeAtomicFuture(version()); if (err != null) { if (!mappings.isEmpty()) { Collection<KeyCacheObject> hndKeys = new ArrayList<>(keys.size()); exit: for (GridDhtAtomicUpdateRequest req : mappings.values()) { for (int i = 0; i < req.size(); i++) { KeyCacheObject key = req.key(i); if (!hndKeys.contains(key)) { updateRes.addFailedKey(key, err); cctx.continuousQueries() .skipUpdateEvent( key, req.partitionId(i), req.updateCounter(i), updateReq.topologyVersion()); hndKeys.add(key); if (hndKeys.size() == keys.size()) break exit; } } } } else for (KeyCacheObject key : keys) updateRes.addFailedKey(key, err); } else { Collection<KeyCacheObject> hndKeys = new ArrayList<>(keys.size()); exit: for (GridDhtAtomicUpdateRequest req : mappings.values()) { for (int i = 0; i < req.size(); i++) { KeyCacheObject key = req.key(i); if (!hndKeys.contains(key)) { try { cctx.continuousQueries() .onEntryUpdated( key, req.value(i), req.localPreviousValue(i), key.internal() || !cctx.userCache(), req.partitionId(i), true, false, req.updateCounter(i), updateReq.topologyVersion()); } catch (IgniteCheckedException e) { U.warn( log, "Failed to send continuous query message. [key=" + key + ", newVal=" + req.value(i) + ", err=" + e + "]"); } hndKeys.add(key); if (hndKeys.size() == keys.size()) break exit; } } } } if (updateReq.writeSynchronizationMode() == FULL_SYNC) completionCb.apply(updateReq, updateRes); return true; } return false; }
/** * @param entry Entry to map. * @param val Value to write. * @param entryProcessor Entry processor. * @param ttl TTL (optional). * @param conflictExpireTime Conflict expire time (optional). * @param conflictVer Conflict version (optional). * @param updateCntr Partition update counter. */ public void addWriteEntry( GridDhtCacheEntry entry, @Nullable CacheObject val, EntryProcessor<Object, Object, Object> entryProcessor, long ttl, long conflictExpireTime, @Nullable GridCacheVersion conflictVer, boolean addPrevVal, @Nullable CacheObject prevVal, long updateCntr) { AffinityTopologyVersion topVer = updateReq.topologyVersion(); Collection<ClusterNode> dhtNodes = cctx.dht().topology().nodes(entry.partition(), topVer); if (log.isDebugEnabled()) log.debug( "Mapping entry to DHT nodes [nodes=" + U.nodeIds(dhtNodes) + ", entry=" + entry + ']'); CacheWriteSynchronizationMode syncMode = updateReq.writeSynchronizationMode(); keys.add(entry.key()); for (ClusterNode node : dhtNodes) { UUID nodeId = node.id(); if (!nodeId.equals(cctx.localNodeId())) { GridDhtAtomicUpdateRequest updateReq = mappings.get(nodeId); if (updateReq == null) { updateReq = new GridDhtAtomicUpdateRequest( cctx.cacheId(), nodeId, futVer, writeVer, syncMode, topVer, forceTransformBackups, this.updateReq.subjectId(), this.updateReq.taskNameHash(), forceTransformBackups ? this.updateReq.invokeArguments() : null, cctx.deploymentEnabled(), this.updateReq.keepBinary()); mappings.put(nodeId, updateReq); } updateReq.addWriteValue( entry.key(), val, entryProcessor, ttl, conflictExpireTime, conflictVer, addPrevVal, entry.partition(), prevVal, updateCntr); } else if (dhtNodes.size() == 1) { try { cctx.continuousQueries() .onEntryUpdated( entry.key(), val, prevVal, entry.key().internal() || !cctx.userCache(), entry.partition(), true, false, updateCntr, updateReq.topologyVersion()); } catch (IgniteCheckedException e) { U.warn( log, "Failed to send continuous query message. [key=" + entry.key() + ", newVal=" + val + ", err=" + e + "]"); } } } }