/** * Unmarshalls transfer object from remote node within a given context. * * @param ctx Grid kernal context that provides deployment and marshalling services. * @param sndNodeId {@link UUID} of the sender node. * @param msg Transfer object that contains original serialized object and deployment information. * @return Unmarshalled object. * @throws IgniteCheckedException If node cannot obtain deployment. */ static Object unmarshall(GridKernalContext ctx, UUID sndNodeId, GridAffinityMessage msg) throws IgniteCheckedException { GridDeployment dep = ctx.deploy() .getGlobalDeployment( msg.deploymentMode(), msg.sourceClassName(), msg.sourceClassName(), msg.userVersion(), sndNodeId, msg.classLoaderId(), msg.loaderParticipants(), null); if (dep == null) throw new IgniteDeploymentCheckedException( "Failed to obtain affinity object (is peer class loading turned on?): " + msg); Object src = U.unmarshal(ctx, msg.source(), U.resolveClassLoader(dep.classLoader(), ctx.config())); // Resource injection. ctx.resource().inject(dep, dep.deployedClass(msg.sourceClassName()), src); return src; }
/** {@inheritDoc} */ @Override public void p2pUnmarshal(UUID nodeId, GridKernalContext ctx) throws IgniteCheckedException { assert nodeId != null; assert ctx != null; assert ctx.config().isPeerClassLoadingEnabled(); if (rmtFilterDep != null) rmtFilter = rmtFilterDep.unmarshal(nodeId, ctx); }
/** {@inheritDoc} */ @Override public void p2pMarshal(GridKernalContext ctx) throws IgniteCheckedException { assert ctx != null; assert ctx.config().isPeerClassLoadingEnabled(); if (rmtFilter != null && !U.isGrid(rmtFilter.getClass())) rmtFilterDep = new DeployableObject(rmtFilter, ctx); }
/** * @param obj Object. * @param ctx Kernal context. * @throws IgniteCheckedException In case of error. */ private DeployableObject(Object obj, GridKernalContext ctx) throws IgniteCheckedException { assert obj != null; assert ctx != null; Class cls = U.detectClass(obj); clsName = cls.getName(); GridDeployment dep = ctx.deploy().deploy(cls, U.detectClassLoader(cls)); if (dep == null) throw new IgniteDeploymentCheckedException("Failed to deploy object: " + obj); depInfo = new GridDeploymentInfoBean(dep); bytes = ctx.config().getMarshaller().marshal(obj); }
/** * @param nodeId Node ID. * @param ctx Kernal context. * @return Deserialized object. * @throws IgniteCheckedException In case of error. */ <T> T unmarshal(UUID nodeId, GridKernalContext ctx) throws IgniteCheckedException { assert ctx != null; GridDeployment dep = ctx.deploy() .getGlobalDeployment( depInfo.deployMode(), clsName, clsName, depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); if (dep == null) throw new IgniteDeploymentCheckedException( "Failed to obtain deployment for class: " + clsName); return ctx.config().getMarshaller().unmarshal(bytes, dep.classLoader()); }
/** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public void notifyCallback( UUID nodeId, UUID routineId, Collection<?> objs, GridKernalContext ctx) { assert nodeId != null; assert routineId != null; assert objs != null; assert ctx != null; Collection<CacheContinuousQueryEntry> entries = (Collection<CacheContinuousQueryEntry>) objs; final GridCacheContext cctx = cacheContext(ctx); for (CacheContinuousQueryEntry e : entries) { GridCacheDeploymentManager depMgr = cctx.deploy(); ClassLoader ldr = depMgr.globalLoader(); if (ctx.config().isPeerClassLoadingEnabled()) { GridDeploymentInfo depInfo = e.deployInfo(); if (depInfo != null) { depMgr.p2pContext( nodeId, depInfo.classLoaderId(), depInfo.userVersion(), depInfo.deployMode(), depInfo.participants(), depInfo.localDeploymentOwner()); } } try { e.unmarshal(cctx, ldr); } catch (IgniteCheckedException ex) { U.error(ctx.log(getClass()), "Failed to unmarshal entry.", ex); } } final IgniteCache cache = cctx.kernalContext().cache().jcache(cctx.name()); Collection<CacheContinuousQueryEntry> entries0 = new ArrayList<>(); for (CacheContinuousQueryEntry e : entries) entries0.addAll(handleEvent(ctx, e)); Iterable<CacheEntryEvent<? extends K, ? extends V>> evts = F.viewReadOnly( entries0, new C1<CacheContinuousQueryEntry, CacheEntryEvent<? extends K, ? extends V>>() { @Override public CacheEntryEvent<? extends K, ? extends V> apply(CacheContinuousQueryEntry e) { return new CacheContinuousQueryEvent<>(cache, cctx, e); } }, new IgnitePredicate<CacheContinuousQueryEntry>() { @Override public boolean apply(CacheContinuousQueryEntry entry) { return !entry.isFiltered(); } }); locLsnr.onUpdated(evts); }
/** * @param cctx Cache context. * @param qry Query. * @param keepPortable Keep portable. * @return Cursor. */ public Iterator<List<?>> query( GridCacheContext<?, ?> cctx, GridCacheTwoStepQuery qry, boolean keepPortable) { for (int attempt = 0; ; attempt++) { if (attempt != 0) { try { Thread.sleep(attempt * 10); // Wait for exchange. } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new CacheException("Query was interrupted.", e); } } long qryReqId = reqIdGen.incrementAndGet(); QueryRun r = new QueryRun(); r.pageSize = qry.pageSize() <= 0 ? GridCacheTwoStepQuery.DFLT_PAGE_SIZE : qry.pageSize(); r.idxs = new ArrayList<>(qry.mapQueries().size()); String space = cctx.name(); r.conn = (JdbcConnection) h2.connectionForSpace(space); AffinityTopologyVersion topVer = h2.readyTopologyVersion(); List<String> extraSpaces = extraSpaces(space, qry.spaces()); Collection<ClusterNode> nodes; // Explicit partition mapping for unstable topology. Map<ClusterNode, IntArray> partsMap = null; if (isPreloadingActive(cctx, extraSpaces)) { if (cctx.isReplicated()) nodes = replicatedUnstableDataNodes(cctx, extraSpaces); else { partsMap = partitionedUnstableDataNodes(cctx, extraSpaces); nodes = partsMap == null ? null : partsMap.keySet(); } } else nodes = stableDataNodes(topVer, cctx, extraSpaces); if (nodes == null) continue; // Retry. assert !nodes.isEmpty(); if (cctx.isReplicated() || qry.explain()) { assert qry.explain() || !nodes.contains(ctx.discovery().localNode()) : "We must be on a client node."; // Select random data node to run query on a replicated data or get EXPLAIN PLAN from a // single node. nodes = Collections.singleton(F.rand(nodes)); } int tblIdx = 0; final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable(); for (GridCacheSqlQuery mapQry : qry.mapQueries()) { GridMergeIndex idx; if (!skipMergeTbl) { GridMergeTable tbl; try { tbl = createMergeTable(r.conn, mapQry, qry.explain()); } catch (IgniteCheckedException e) { throw new IgniteException(e); } idx = tbl.getScanIndex(null); fakeTable(r.conn, tblIdx++).setInnerTable(tbl); } else idx = GridMergeIndexUnsorted.createDummy(); for (ClusterNode node : nodes) idx.addSource(node.id()); r.idxs.add(idx); } r.latch = new CountDownLatch(r.idxs.size() * nodes.size()); runs.put(qryReqId, r); try { if (ctx.clientDisconnected()) { throw new CacheException( "Query was cancelled, client node disconnected.", new IgniteClientDisconnectedException( ctx.cluster().clientReconnectFuture(), "Client node disconnected.")); } Collection<GridCacheSqlQuery> mapQrys = qry.mapQueries(); if (qry.explain()) { mapQrys = new ArrayList<>(qry.mapQueries().size()); for (GridCacheSqlQuery mapQry : qry.mapQueries()) mapQrys.add(new GridCacheSqlQuery("EXPLAIN " + mapQry.query(), mapQry.parameters())); } if (nodes.size() != 1 || !F.first(nodes).isLocal()) { // Marshall params for remotes. Marshaller m = ctx.config().getMarshaller(); for (GridCacheSqlQuery mapQry : mapQrys) mapQry.marshallParams(m); } boolean retry = false; if (send( nodes, new GridQueryRequest(qryReqId, r.pageSize, space, mapQrys, topVer, extraSpaces, null), partsMap)) { awaitAllReplies(r, nodes); Object state = r.state.get(); if (state != null) { if (state instanceof CacheException) { CacheException err = (CacheException) state; if (err.getCause() instanceof IgniteClientDisconnectedException) throw err; throw new CacheException("Failed to run map query remotely.", err); } if (state instanceof AffinityTopologyVersion) { retry = true; // If remote node asks us to retry then we have outdated full partition map. h2.awaitForReadyTopologyVersion((AffinityTopologyVersion) state); } } } else // Send failed. retry = true; Iterator<List<?>> resIter = null; if (!retry) { if (qry.explain()) return explainPlan(r.conn, space, qry); if (skipMergeTbl) { List<List<?>> res = new ArrayList<>(); assert r.idxs.size() == 1 : r.idxs; GridMergeIndex idx = r.idxs.get(0); Cursor cur = idx.findInStream(null, null); while (cur.next()) { Row row = cur.get(); int cols = row.getColumnCount(); List<Object> resRow = new ArrayList<>(cols); for (int c = 0; c < cols; c++) resRow.add(row.getValue(c).getObject()); res.add(resRow); } resIter = res.iterator(); } else { GridCacheSqlQuery rdc = qry.reduceQuery(); // Statement caching is prohibited here because we can't guarantee correct merge index // reuse. ResultSet res = h2.executeSqlQueryWithTimer( space, r.conn, rdc.query(), F.asList(rdc.parameters()), false); resIter = new Iter(res); } } for (GridMergeIndex idx : r.idxs) { if (!idx.fetchedAll()) // We have to explicitly cancel queries on remote nodes. send(nodes, new GridQueryCancelRequest(qryReqId), null); } if (retry) { if (Thread.currentThread().isInterrupted()) throw new IgniteInterruptedCheckedException("Query was interrupted."); continue; } return new GridQueryCacheObjectsIterator(resIter, cctx, keepPortable); } catch (IgniteCheckedException | RuntimeException e) { U.closeQuiet(r.conn); if (e instanceof CacheException) throw (CacheException) e; Throwable cause = e; if (e instanceof IgniteCheckedException) { Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class); if (disconnectedErr != null) cause = disconnectedErr; } throw new CacheException("Failed to run reduce query locally.", cause); } finally { if (!runs.remove(qryReqId, r)) U.warn(log, "Query run was already removed: " + qryReqId); if (!skipMergeTbl) { for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++) fakeTable(null, i).setInnerTable(null); // Drop all merge tables. } } } }