/** * Creates new HTTP requests handler. * * @param hnd Handler. * @param authChecker Authentication checking closure. * @param log Logger. */ GridJettyRestHandler( GridRestProtocolHandler hnd, GridClosure<String, Boolean> authChecker, GridLogger log) { assert hnd != null; assert log != null; this.hnd = hnd; this.log = log; this.authChecker = authChecker; // Init default page and favicon. try { initDefaultPage(); if (log.isDebugEnabled()) log.debug("Initialized default page."); } catch (IOException e) { U.warn(log, "Failed to initialize default page: " + e.getMessage()); } try { initFavicon(); if (log.isDebugEnabled()) log.debug( favicon != null ? "Initialized favicon, size: " + favicon.length : "Favicon is null."); } catch (IOException e) { U.warn(log, "Failed to initialize favicon: " + e.getMessage()); } }
/** {@inheritDoc} */ @Override public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException { ExecutorService exec = new ThreadPoolExecutor( threadsCnt, threadsCnt, 0L, MILLISECONDS, new ArrayBlockingQueue<Runnable>(batchQueueSize), new BlockingRejectedExecutionHandler()); Iterator<I> iter = inputIterator(args); Collection<I> buf = new ArrayList<>(batchSize); try { while (iter.hasNext()) { if (Thread.currentThread().isInterrupted()) { U.warn(log, "Working thread was interrupted while loading data."); break; } buf.add(iter.next()); if (buf.size() == batchSize) { exec.submit(new Worker(c, buf, args)); buf = new ArrayList<>(batchSize); } } if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args)); } catch (RejectedExecutionException ignored) { // Because of custom RejectedExecutionHandler. assert false : "RejectedExecutionException was thrown while it shouldn't."; } finally { exec.shutdown(); try { exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS); } catch (InterruptedException ignored) { U.warn(log, "Working thread was interrupted while waiting for put operations to complete."); Thread.currentThread().interrupt(); } } }
/** @return Nodes to execute on. */ private Collection<GridNode> nodes() { GridCacheMode cacheMode = cctx.config().getCacheMode(); switch (cacheMode) { case LOCAL: if (prj != null) U.warn( log, "Ignoring query projection because it's executed over LOCAL cache " + "(only local node will be queried): " + this); return Collections.singletonList(cctx.localNode()); case REPLICATED: if (prj != null) return nodes(cctx, prj); GridCacheDistributionMode mode = cctx.config().getDistributionMode(); return mode == PARTITIONED_ONLY || mode == NEAR_PARTITIONED ? Collections.singletonList(cctx.localNode()) : Collections.singletonList(F.rand(nodes(cctx, null))); case PARTITIONED: return nodes(cctx, prj); default: throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode); } }
/** Checks reversing. */ private void checkReversing() { if (!keepGoing) U.warn( log, "Suspect logic - reversing listener return status (was 'true', then 'false', " + "now 'true' again)."); }
/** {@inheritDoc} */ @SuppressWarnings({"unchecked"}) @Override public void listenAsync(@Nullable final GridInClosure<? super GridFuture<R>> lsnr) { if (lsnr != null) { checkValid(); boolean done; synchronized (mux) { done = this.done; if (!done) lsnrs.add(lsnr); } if (done) { try { if (syncNotify) notifyListener(lsnr); else ctx.closure() .runLocalSafe( new GPR() { @Override public void run() { notifyListener(lsnr); } }, true); } catch (IllegalStateException ignore) { U.warn( null, "Future notification will not proceed because grid is stopped: " + ctx.gridName()); } } } }
/** * @param nodeId Sender node ID. * @param msg Finish transaction response. */ private void processFinishResponse(UUID nodeId, GridDistributedTxFinishResponse<K, V> msg) { GridReplicatedTxCommitFuture<K, V> fut = (GridReplicatedTxCommitFuture<K, V>) ctx.mvcc().<GridCacheTx>future(msg.xid().id(), msg.futureId()); if (fut != null) fut.onResult(nodeId); else U.warn(log, "Received finish response for unknown transaction: " + msg); }
/** * @param nodeId Sender. * @param res Result. */ void onResult(UUID nodeId, GridNearLockResponse<K, V> res) { if (!isDone()) { if (log.isDebugEnabled()) log.debug( "Received lock response from node [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']'); for (GridFuture<Boolean> fut : pending()) { if (isMini(fut)) { MiniFuture mini = (MiniFuture) fut; if (mini.futureId().equals(res.miniId())) { assert mini.node().id().equals(nodeId); if (log.isDebugEnabled()) log.debug("Found mini future for response [mini=" + mini + ", res=" + res + ']'); mini.onResult(res); if (log.isDebugEnabled()) log.debug( "Future after processed lock response [fut=" + this + ", mini=" + mini + ", res=" + res + ']'); return; } } } U.warn( log, "Failed to find mini future for response (perhaps due to stale message) [res=" + res + ", fut=" + this + ']'); } else if (log.isDebugEnabled()) log.debug( "Ignoring lock response from node (future is done) [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']'); }
/** {@inheritDoc} */ @Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { try { if (executor.isShutdown()) throw new RejectedExecutionException(); else executor.getQueue().put(r); } catch (InterruptedException ignored) { U.warn(log, "Working thread was interrupted while loading data."); Thread.currentThread().interrupt(); } }
/** * Adds new participant to deployment. * * @param dep Shared deployment. * @param meta Request metadata. * @return {@code True} if participant was added. */ private boolean addParticipant(SharedDeployment dep, GridDeploymentMetadata meta) { assert dep != null; assert meta != null; assert Thread.holdsLock(mux); if (!checkModeMatch(dep, meta)) return false; if (meta.participants() != null) { for (Map.Entry<UUID, GridTuple2<GridUuid, Long>> e : meta.participants().entrySet()) { dep.addParticipant(e.getKey(), e.getValue().get1(), e.getValue().get2()); if (log.isDebugEnabled()) log.debug( "Added new participant [nodeId=" + e.getKey() + ", clsLdrId=" + e.getValue().get1() + ", seqNum=" + e.getValue().get2() + ']'); } } if (dep.deployMode() == CONTINUOUS || meta.participants() == null) { if (!dep.addParticipant(meta.senderNodeId(), meta.classLoaderId(), meta.sequenceNumber())) { U.warn( log, "Failed to create shared mode deployment " + "(requested class loader was already undeployed, did sender node leave grid?) " + "[clsLdrId=" + meta.classLoaderId() + ", senderNodeId=" + meta.senderNodeId() + ']'); return false; } if (log.isDebugEnabled()) log.debug( "Added new participant [nodeId=" + meta.senderNodeId() + ", clsLdrId=" + meta.classLoaderId() + ", seqNum=" + meta.sequenceNumber() + ']'); } return true; }
/** * Processes lock response. * * @param nodeId Sender node ID. * @param res Lock response. */ private void processLockResponse(UUID nodeId, GridDistributedLockResponse<K, V> res) { GridReplicatedLockFuture<K, V> fut = futurex(res.lockId(), res.futureId()); if (fut == null) { U.warn(log, "Received lock response for non-existing future (will ignore): " + res); } else { fut.onResult(nodeId, res); if (fut.isDone()) { ctx.mvcc().removeFuture(fut); if (log.isDebugEnabled()) log.debug("Received all replies for future (future was removed): " + fut); } } }
/** * Checks if deployment modes match. * * @param dep Shared deployment. * @param meta Request metadata. * @return {@code True} if shared deployment modes match. */ private boolean checkModeMatch(GridDeploymentInfo dep, GridDeploymentMetadata meta) { if (dep.deployMode() != meta.deploymentMode()) { U.warn( log, "Received invalid deployment mode (will not deploy, make sure that all nodes " + "executing the same classes in shared mode have identical GridDeploymentMode parameter) [mode=" + meta.deploymentMode() + ", expected=" + dep.deployMode() + ']'); return false; } return true; }
/** @param e Error. */ void onResult(Throwable e) { if (rcvRes.compareAndSet(false, true)) { if (log.isDebugEnabled()) log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']'); // Fail. onDone(e); } else U.warn( log, "Received error after another result has been processed [fut=" + GridNearLockFuture.this + ", mini=" + this + ']', e); }
/** {@inheritDoc} */ @Override public void start() throws GridException { if (ctx.config().isDaemon()) return; startSpi(); if (enabled()) { getSpi() .setExternalCollisionListener( new GridCollisionExternalListener() { @Override public void onExternalCollision() { GridCollisionExternalListener lsnr = extLsnr.get(); if (lsnr != null) lsnr.onExternalCollision(); } }); } else U.warn(log, "Collision resolution is disabled (all jobs will be activated upon arrival)."); if (log.isDebugEnabled()) log.debug(startInfo()); }
/** * Notifies single listener. * * @param lsnr Listener. */ private void notifyListener(GridInClosure<? super GridFuture<R>> lsnr) { assert lsnr != null; try { lsnr.apply(this); } catch (IllegalStateException ignore) { U.warn( null, "Failed to notify listener (grid is stopped) [grid=" + ctx.gridName() + ", lsnr=" + lsnr + ']'); } catch (RuntimeException e) { U.error(log, "Failed to notify listener: " + lsnr, e); throw e; } catch (Error e) { U.error(log, "Failed to notify listener: " + lsnr, e); throw e; } }
/** {@inheritDoc} */ @SuppressWarnings({"CatchGenericClass", "ThrowableInstanceNeverThrown"}) @Override public void finish(boolean commit) throws GridException { if (log.isDebugEnabled()) log.debug("Finishing near local tx [tx=" + this + ", commit=" + commit + "]"); if (commit) { if (!state(COMMITTING)) { GridCacheTxState state = state(); if (state != COMMITTING && state != COMMITTED) throw new GridException( "Invalid transaction state for commit [state=" + state() + ", tx=" + this + ']'); else { if (log.isDebugEnabled()) log.debug( "Invalid transaction state for commit (another thread is committing): " + this); return; } } } else { if (!state(ROLLING_BACK)) { if (log.isDebugEnabled()) log.debug( "Invalid transaction state for rollback [state=" + state() + ", tx=" + this + ']'); return; } } GridException err = null; // Commit to DB first. This way if there is a failure, transaction // won't be committed. try { if (commit && !isRollbackOnly()) userCommit(); else userRollback(); } catch (GridException e) { err = e; commit = false; // If heuristic error. if (!isRollbackOnly()) { invalidate = true; U.warn( log, "Set transaction invalidation flag to true due to error [tx=" + this + ", err=" + err + ']'); } } if (err != null) { state(UNKNOWN); throw err; } else { if (!state(commit ? COMMITTED : ROLLED_BACK)) { state(UNKNOWN); throw new GridException("Invalid transaction state for commit or rollback: " + this); } } }
/** * Initializes store. * * @throws GridException If failed to initialize. */ private void init() throws GridException { if (initGuard.compareAndSet(false, true)) { if (log.isDebugEnabled()) log.debug("Initializing cache store."); try { if (sesFactory != null) // Session factory has been provided - nothing to do. return; if (!F.isEmpty(hibernateCfgPath)) { try { URL url = new URL(hibernateCfgPath); sesFactory = new Configuration().configure(url).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using URL: " + url); // Session factory has been successfully initialized. return; } catch (MalformedURLException e) { if (log.isDebugEnabled()) log.debug("Caught malformed URL exception: " + e.getMessage()); } // Provided path is not a valid URL. File? File cfgFile = new File(hibernateCfgPath); if (cfgFile.exists()) { sesFactory = new Configuration().configure(cfgFile).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using file: " + hibernateCfgPath); // Session factory has been successfully initialized. return; } // Provided path is not a file. Classpath resource? sesFactory = new Configuration().configure(hibernateCfgPath).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using classpath resource: " + hibernateCfgPath); } else { if (hibernateProps == null) { U.warn( log, "No Hibernate configuration has been provided for store (will use default)."); hibernateProps = new Properties(); hibernateProps.setProperty("hibernate.connection.url", DFLT_CONN_URL); hibernateProps.setProperty("hibernate.show_sql", DFLT_SHOW_SQL); hibernateProps.setProperty("hibernate.hbm2ddl.auto", DFLT_HBM2DDL_AUTO); } Configuration cfg = new Configuration(); cfg.setProperties(hibernateProps); assert resourceAvailable(MAPPING_RESOURCE); cfg.addResource(MAPPING_RESOURCE); sesFactory = cfg.buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using properties: " + hibernateProps); } } catch (HibernateException e) { throw new GridException("Failed to initialize store.", e); } finally { initLatch.countDown(); } } else if (initLatch.getCount() > 0) U.await(initLatch); if (sesFactory == null) throw new GridException("Cache store was not properly initialized."); }
/** * @param nodeId Primary node ID. * @param req Request. * @return Remote transaction. * @throws GridException If failed. * @throws GridDistributedLockCancelledException If lock has been cancelled. */ @SuppressWarnings({"RedundantTypeArguments"}) @Nullable public GridNearTxRemote<K, V> startRemoteTxForFinish( UUID nodeId, GridDhtTxFinishRequest<K, V> req) throws GridException, GridDistributedLockCancelledException { GridNearTxRemote<K, V> tx = null; ClassLoader ldr = ctx.deploy().globalLoader(); if (ldr != null) { for (GridCacheTxEntry<K, V> txEntry : req.nearWrites()) { GridDistributedCacheEntry<K, V> entry = null; while (true) { try { entry = peekExx(txEntry.key()); if (entry != null) { entry.keyBytes(txEntry.keyBytes()); // Handle implicit locks for pessimistic transactions. tx = ctx.tm().tx(req.version()); if (tx != null) { if (tx.local()) return null; if (tx.markFinalizing()) tx.addWrite(txEntry.key(), txEntry.keyBytes()); else return null; } else { tx = new GridNearTxRemote<K, V>( nodeId, req.nearNodeId(), req.threadId(), req.version(), null, PESSIMISTIC, req.isolation(), req.isInvalidate(), 0, txEntry.key(), txEntry.keyBytes(), txEntry.value(), txEntry.valueBytes(), ctx); if (tx.empty()) return tx; tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Failed to acquire lock " + "(transaction has been completed): " + req.version()); if (!tx.markFinalizing()) return null; } // Add remote candidate before reordering. if (txEntry.explicitVersion() == null) entry.addRemote( req.nearNodeId(), nodeId, req.threadId(), req.version(), 0, tx.ec(), /*tx*/ true, tx.implicitSingle()); // Remote candidates for ordered lock queuing. entry.addRemoteCandidates( Collections.<GridCacheMvccCandidate<K>>emptyList(), req.version(), req.committedVersions(), req.rolledbackVersions()); } // Double-check in case if sender node left the grid. if (ctx.discovery().node(req.nearNodeId()) == null) { if (log.isDebugEnabled()) log.debug("Node requesting lock left grid (lock request will be ignored): " + req); if (tx != null) tx.rollback(); return null; } // Entry is legit. break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry; if (log.isDebugEnabled()) log.debug("Received entry removed exception (will retry on renewed entry): " + entry); if (tx != null) { tx.clearEntry(entry.key()); if (log.isDebugEnabled()) log.debug( "Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']'); } } } } } else { String err = "Failed to acquire deployment class loader for message: " + req; U.warn(log, err); throw new GridException(err); } return tx; }
/** {@inheritDoc} */ @Override public GridDeployment getDeployment(GridDeploymentMetadata meta) { assert meta != null; assert ctx.config().isPeerClassLoadingEnabled(); // Validate metadata. assert meta.classLoaderId() != null; assert meta.senderNodeId() != null; assert meta.sequenceNumber() >= -1; assert meta.parentLoader() == null; if (log.isDebugEnabled()) log.debug("Starting to peer-load class based on deployment metadata: " + meta); while (true) { List<SharedDeployment> depsToCheck = null; SharedDeployment dep = null; synchronized (mux) { // Check obsolete request. if (isDeadClassLoader(meta)) return null; List<SharedDeployment> deps = cache.get(meta.userVersion()); if (deps != null) { assert !deps.isEmpty(); for (SharedDeployment d : deps) { if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId()) || meta.senderNodeId().equals(ctx.localNodeId())) { // Done. dep = d; break; } } if (dep == null) { GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta); if (!redeployCheck.get1()) { // Checking for redeployment encountered invalid state. if (log.isDebugEnabled()) log.debug("Checking for redeployment encountered invalid state: " + meta); return null; } dep = redeployCheck.get2(); if (dep == null) { // Find existing deployments that need to be checked // whether they should be reused for this request. for (SharedDeployment d : deps) { if (!d.isPendingUndeploy() && !d.isUndeployed()) { if (depsToCheck == null) depsToCheck = new LinkedList<SharedDeployment>(); if (log.isDebugEnabled()) log.debug("Adding deployment to check: " + d); depsToCheck.add(d); } } // If no deployment can be reused, create a new one. if (depsToCheck == null) { dep = createNewDeployment(meta, false); deps.add(dep); } } } } else { GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta); if (!redeployCheck.get1()) { // Checking for redeployment encountered invalid state. if (log.isDebugEnabled()) log.debug("Checking for redeployment encountered invalid state: " + meta); return null; } dep = redeployCheck.get2(); if (dep == null) // Create peer class loader. dep = createNewDeployment(meta, true); } } if (dep != null) { if (log.isDebugEnabled()) log.debug("Found SHARED or CONTINUOUS deployment after first check: " + dep); // Cache the deployed class. Class<?> cls = dep.deployedClass(meta.className(), meta.alias()); if (cls == null) { U.warn( log, "Failed to load peer class (ignore if class got undeployed during preloading) [alias=" + meta.alias() + ", dep=" + dep + ']'); return null; } return dep; } assert meta.parentLoader() == null; assert depsToCheck != null; assert !depsToCheck.isEmpty(); /* * Logic below must be performed outside of synchronization * because it involves network calls. */ // Check if class can be loaded from existing nodes. // In most cases this loop will find something. for (SharedDeployment d : depsToCheck) { // Load class. Note, that remote node will not load this class. // The class will only be loaded on this node. Class<?> cls = d.deployedClass(meta.className(), meta.alias()); if (cls != null) { synchronized (mux) { if (!d.isUndeployed() && !d.isPendingUndeploy()) { if (!addParticipant(d, meta)) return null; if (log.isDebugEnabled()) log.debug( "Acquired deployment after verifying it's availability on " + "existing nodes [depCls=" + cls + ", dep=" + d + ", meta=" + meta + ']'); return d; } } } else if (log.isDebugEnabled()) { log.debug( "Deployment cannot be reused (class does not exist on participating nodes) [dep=" + d + ", meta=" + meta + ']'); } } // We are here either because all participant nodes failed // or the class indeed should have a separate deployment. for (SharedDeployment d : depsToCheck) { // Temporary class loader. ClassLoader temp = new GridDeploymentClassLoader( GridUuid.randomUuid(), meta.userVersion(), meta.deploymentMode(), true, ctx, ctxLdr, meta.classLoaderId(), meta.senderNodeId(), meta.sequenceNumber(), comm, ctx.config().getNetworkTimeout(), log, ctx.config().getPeerClassLoadingClassPathExclude(), 0, false); String path = U.classNameToResourceName(d.sampleClassName()); // We check if any random class from existing deployment can be // loaded from sender node. If it can, then we reuse existing // deployment. InputStream rsrcIn = temp.getResourceAsStream(path); if (rsrcIn != null) { // We don't need the actual stream. U.closeQuiet(rsrcIn); synchronized (mux) { if (d.isUndeployed() || d.isPendingUndeploy()) continue; // Add new node prior to loading the class, so we attempt // to load the class from the latest node. if (!addParticipant(d, meta)) { if (log.isDebugEnabled()) log.debug( "Failed to add participant to deployment " + "[meta=" + meta + ", dep=" + dep + ']'); return null; } } Class<?> depCls = d.deployedClass(meta.className(), meta.alias()); if (depCls == null) { U.error( log, "Failed to peer load class after loading it as a resource [alias=" + meta.alias() + ", dep=" + dep + ']'); return null; } if (log.isDebugEnabled()) log.debug( "Acquired deployment class after verifying other class " + "availability on sender node [depCls=" + depCls + ", rndCls=" + d.sampleClass() + ", sampleClsName=" + d.sampleClassName() + ", meta=" + meta + ']'); return d; } else if (log.isDebugEnabled()) log.debug( "Deployment cannot be reused (random class could not be loaded from sender node) [dep=" + d + ", meta=" + meta + ']'); } synchronized (mux) { if (log.isDebugEnabled()) log.debug( "None of the existing class-loaders fit (will try to create a new one): " + meta); // Check obsolete request. if (isDeadClassLoader(meta)) return null; // Check that deployment picture has not changed. List<SharedDeployment> deps = cache.get(meta.userVersion()); if (deps != null) { assert !deps.isEmpty(); boolean retry = false; for (SharedDeployment d : deps) { // Double check if sender was already added. if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())) { dep = d; retry = false; break; } // New deployment was added while outside of synchronization. // Need to recheck it again. if (!d.isPendingUndeploy() && !d.isUndeployed() && !depsToCheck.contains(d)) retry = true; } if (retry) { if (log.isDebugEnabled()) log.debug("Retrying due to concurrency issues: " + meta); // Outer while loop. continue; } if (dep == null) { // No new deployments were added, so we can safely add ours. dep = createNewDeployment(meta, false); deps.add(dep); if (log.isDebugEnabled()) log.debug( "Adding new deployment within second check [dep=" + dep + ", meta=" + meta + ']'); } } else { dep = createNewDeployment(meta, true); if (log.isDebugEnabled()) log.debug( "Created new deployment within second check [dep=" + dep + ", meta=" + meta + ']'); } } if (dep != null) { // Cache the deployed class. Class<?> cls = dep.deployedClass(meta.className(), meta.alias()); if (cls == null) { U.warn( log, "Failed to load peer class (ignore if class got undeployed during preloading) [alias=" + meta.alias() + ", dep=" + dep + ']'); return null; } } return dep; } }
/** * @param entries Entries to submit. * @param curFut Current future. * @throws GridInterruptedException If interrupted. */ private void submit(final List<Map.Entry<K, V>> entries, final GridFutureAdapter<Object> curFut) throws GridInterruptedException { assert entries != null; assert !entries.isEmpty(); assert curFut != null; incrementActiveTasks(); GridFuture<Object> fut; if (isLocNode) { fut = ctx.closure() .callLocalSafe( new GridDataLoadUpdateJob<>(ctx, log, cacheName, entries, false, updater), false); locFuts.add(fut); fut.listenAsync( new GridInClosure<GridFuture<Object>>() { @Override public void apply(GridFuture<Object> t) { try { boolean rmv = locFuts.remove(t); assert rmv; curFut.onDone(t.get()); } catch (GridException e) { curFut.onDone(e); } } }); } else { byte[] entriesBytes; try { entriesBytes = ctx.config().getMarshaller().marshal(entries); if (updaterBytes == null) { assert updater != null; updaterBytes = ctx.config().getMarshaller().marshal(updater); } if (topicBytes == null) topicBytes = ctx.config().getMarshaller().marshal(topic); } catch (GridException e) { U.error(log, "Failed to marshal (request will not be sent).", e); return; } GridDeployment dep = null; GridPeerDeployAware jobPda0 = null; if (ctx.deploy().enabled()) { try { jobPda0 = jobPda; assert jobPda0 != null; dep = ctx.deploy().deploy(jobPda0.deployClass(), jobPda0.classLoader()); } catch (GridException e) { U.error( log, "Failed to deploy class (request will not be sent): " + jobPda0.deployClass(), e); return; } if (dep == null) U.warn(log, "Failed to deploy class (request will be sent): " + jobPda0.deployClass()); } long reqId = idGen.incrementAndGet(); fut = curFut; reqs.put(reqId, (GridFutureAdapter<Object>) fut); GridDataLoadRequest<Object, Object> req = new GridDataLoadRequest<>( reqId, topicBytes, cacheName, updaterBytes, entriesBytes, true, dep != null ? dep.deployMode() : null, dep != null ? jobPda0.deployClass().getName() : null, dep != null ? dep.userVersion() : null, dep != null ? dep.participants() : null, dep != null ? dep.classLoaderId() : null, dep == null); try { ctx.io().send(node, TOPIC_DATALOAD, req, PUBLIC_POOL); if (log.isDebugEnabled()) log.debug("Sent request to node [nodeId=" + node.id() + ", req=" + req + ']'); } catch (GridException e) { if (ctx.discovery().alive(node) && ctx.discovery().pingNode(node.id())) ((GridFutureAdapter<Object>) fut).onDone(e); else ((GridFutureAdapter<Object>) fut) .onDone( new GridTopologyException( "Failed to send " + "request (node has left): " + node.id())); } } }
/** * @param nodeId Primary node ID. * @param req Request. * @return Remote transaction. * @throws GridException If failed. * @throws GridDistributedLockCancelledException If lock has been cancelled. */ @SuppressWarnings({"RedundantTypeArguments"}) @Nullable public GridNearTxRemote<K, V> startRemoteTx(UUID nodeId, GridDhtLockRequest<K, V> req) throws GridException, GridDistributedLockCancelledException { List<byte[]> nearKeyBytes = req.nearKeyBytes(); GridNearTxRemote<K, V> tx = null; ClassLoader ldr = ctx.deploy().globalLoader(); if (ldr != null) { for (int i = 0; i < nearKeyBytes.size(); i++) { byte[] bytes = nearKeyBytes.get(i); if (bytes == null) continue; K key = req.nearKeys().get(i); Collection<GridCacheMvccCandidate<K>> cands = req.candidatesByIndex(i); if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key); GridNearCacheEntry<K, V> entry = null; while (true) { try { entry = peekExx(key); if (entry != null) { entry.keyBytes(bytes); // Handle implicit locks for pessimistic transactions. if (req.inTx()) { tx = ctx.tm().tx(req.version()); if (tx != null) tx.addWrite(key, bytes, null /*Value.*/, null /*Value bytes.*/); else { tx = new GridNearTxRemote<K, V>( nodeId, req.nearNodeId(), req.threadId(), req.version(), null, PESSIMISTIC, req.isolation(), req.isInvalidate(), req.timeout(), key, bytes, null, // Value. null, // Value bytes. ctx); if (tx.empty()) return tx; tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Failed to acquire lock " + "(transaction has been completed): " + req.version()); } } // Add remote candidate before reordering. entry.addRemote( req.nodeId(), nodeId, req.threadId(), req.version(), req.timeout(), tx != null && tx.ec(), tx != null, tx != null && tx.implicitSingle()); // Remote candidates for ordered lock queuing. entry.addRemoteCandidates( cands, req.version(), req.committedVersions(), req.rolledbackVersions()); entry.orderOwned(req.version(), req.owned(entry.key())); } // Double-check in case if sender node left the grid. if (ctx.discovery().node(req.nodeId()) == null) { if (log.isDebugEnabled()) log.debug("Node requesting lock left grid (lock request will be ignored): " + req); if (tx != null) tx.rollback(); return null; } // Entry is legit. break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry; if (log.isDebugEnabled()) log.debug("Received entry removed exception (will retry on renewed entry): " + entry); if (tx != null) { tx.clearEntry(entry.key()); if (log.isDebugEnabled()) log.debug( "Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']'); } } } } } else { String err = "Failed to acquire deployment class loader for message: " + req; U.warn(log, err); throw new GridException(err); } return tx; }
/** * Removes obsolete deployments in case of redeploy. * * @param meta Request metadata. * @return List of shares deployment. */ private GridTuple2<Boolean, SharedDeployment> checkRedeploy(GridDeploymentMetadata meta) { assert Thread.holdsLock(mux); SharedDeployment newDep = null; for (List<SharedDeployment> deps : cache.values()) { for (SharedDeployment dep : deps) { if (!dep.isUndeployed() && !dep.isPendingUndeploy()) { long undeployTimeout = ctx.config().getNetworkTimeout(); SharedDeployment doomed = null; // Only check deployments with no participants. if (!dep.hasParticipants()) { // In case of SHARED deployment it is possible to get hear if // unmarshalling happens during undeploy. In this case, we // simply don't do anything. if (dep.deployMode() == CONTINUOUS) { if (dep.existingDeployedClass(meta.className()) != null) { // Change from shared deploy to shared undeploy or user version change. // Simply remove all deployments with no participating nodes. if (meta.deploymentMode() == SHARED || !meta.userVersion().equals(dep.userVersion())) doomed = dep; } } } // If there are participants, we undeploy if class loader ID on some node changed. else if (dep.existingDeployedClass(meta.className()) != null) { GridTuple2<GridUuid, Long> ldr = dep.getClassLoaderId(meta.senderNodeId()); if (ldr != null) { if (!ldr.get1().equals(meta.classLoaderId())) { // If deployed sequence number is less, then schedule for undeployment. if (ldr.get2() < meta.sequenceNumber()) { if (log.isDebugEnabled()) log.debug( "Received request for a class with newer sequence number " + "(will schedule current class for undeployment) [newSeq=" + meta.sequenceNumber() + ", oldSeq=" + ldr.get2() + ", senderNodeId=" + meta.senderNodeId() + ", newClsLdrId=" + meta.classLoaderId() + ", oldClsLdrId=" + ldr.get1() + ']'); doomed = dep; } else if (ldr.get2() > meta.sequenceNumber()) { long time = System.currentTimeMillis() - dep.timestamp(); if (newDep == null && time < ctx.config().getNetworkTimeout()) { // Set undeployTimeout, so the class will be scheduled // for undeployment. undeployTimeout = ctx.config().getNetworkTimeout() - time; if (log.isDebugEnabled()) log.debug( "Received execution request for a stale class (will deploy and " + "schedule undeployment in " + undeployTimeout + "ms) " + "[curSeq=" + ldr.get2() + ", staleSeq=" + meta.sequenceNumber() + ", cls=" + meta.className() + ", senderNodeId=" + meta.senderNodeId() + ", curLdrId=" + ldr.get1() + ", staleLdrId=" + meta.classLoaderId() + ']'); // We got the redeployed class before the old one. // Simply create a temporary deployment for the sender node, // and schedule undeploy for it. newDep = createNewDeployment(meta, false); doomed = newDep; } else { U.warn( log, "Received execution request for a class that has been redeployed " + "(will ignore): " + meta.alias()); if (log.isDebugEnabled()) log.debug( "Received execution request for a class that has been redeployed " + "(will ignore) [alias=" + meta.alias() + ", dep=" + dep + ']'); return F.t(false, null); } } else { U.error( log, "Sequence number does not correspond to class loader ID [seqNum=" + meta.sequenceNumber() + ", dep=" + dep + ']'); return F.t(false, null); } } } } if (doomed != null) { doomed.onUndeployScheduled(); if (log.isDebugEnabled()) log.debug("Deployment was scheduled for undeploy: " + doomed); // Lifespan time. final long endTime = System.currentTimeMillis() + undeployTimeout; // Deployment to undeploy. final SharedDeployment undep = doomed; ctx.timeout() .addTimeoutObject( new GridTimeoutObject() { @Override public GridUuid timeoutId() { return undep.classLoaderId(); } @Override public long endTime() { return endTime < 0 ? Long.MAX_VALUE : endTime; } @Override public void onTimeout() { boolean removed = false; // Hot redeployment. synchronized (mux) { assert undep.isPendingUndeploy(); if (!undep.isUndeployed()) { undep.undeploy(); undep.onRemoved(); removed = true; Collection<SharedDeployment> deps = cache.get(undep.userVersion()); if (deps != null) { for (Iterator<SharedDeployment> i = deps.iterator(); i.hasNext(); ) if (i.next() == undep) i.remove(); if (deps.isEmpty()) cache.remove(undep.userVersion()); } if (log.isInfoEnabled()) log.info( "Undeployed class loader due to deployment mode change, " + "user version change, or hot redeployment: " + undep); } } // Outside synchronization. if (removed) undep.recordUndeployed(null); } }); } } } } if (newDep != null) { List<SharedDeployment> list = F.addIfAbsent(cache, meta.userVersion(), F.<SharedDeployment>newList()); assert list != null; list.add(newDep); } return F.t(true, newDep); }
/** * @param nodeId Sender node ID. * @param req Finish transaction message. */ @SuppressWarnings({"CatchGenericClass"}) private void processFinishRequest(UUID nodeId, GridDistributedTxFinishRequest<K, V> req) { assert nodeId != null; assert req != null; GridReplicatedTxRemote<K, V> tx = ctx.tm().tx(req.version()); try { ClassLoader ldr = ctx.deploy().globalLoader(); if (req.commit()) { // If lock was acquired explicitly. if (tx == null) { // Create transaction and add entries. tx = ctx.tm() .onCreated( new GridReplicatedTxRemote<K, V>( ldr, nodeId, req.threadId(), req.version(), req.commitVersion(), PESSIMISTIC, READ_COMMITTED, req.isInvalidate(), /*timeout */ 0, /*read entries*/ null, req.writes(), ctx)); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Attempt to start a completed " + "transaction: " + req); } else { boolean set = tx.commitVersion(req.commitVersion()); assert set; } Collection<GridCacheTxEntry<K, V>> writeEntries = req.writes(); if (!F.isEmpty(writeEntries)) { // In OPTIMISTIC mode, we get the values at PREPARE stage. assert tx.concurrency() == PESSIMISTIC; for (GridCacheTxEntry<K, V> entry : writeEntries) { // Unmarshal write entries. entry.unmarshal(ctx, ldr); if (log.isDebugEnabled()) log.debug( "Unmarshalled transaction entry from pessimistic transaction [key=" + entry.key() + ", value=" + entry.value() + ", tx=" + tx + ']'); if (!tx.setWriteValue(entry)) U.warn( log, "Received entry to commit that was not present in transaction [entry=" + entry + ", tx=" + tx + ']'); } } // Add completed versions. tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); if (tx.pessimistic()) tx.prepare(); tx.commit(); } else if (tx != null) { tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); tx.rollback(); } if (req.replyRequired()) { GridCacheMessage<K, V> res = new GridDistributedTxFinishResponse<K, V>(req.version(), req.futureId()); try { ctx.io().send(nodeId, res); } catch (Throwable e) { // Double-check. if (ctx.discovery().node(nodeId) == null) { if (log.isDebugEnabled()) log.debug( "Node left while sending finish response [nodeId=" + nodeId + ", res=" + res + ']'); } else U.error( log, "Failed to send finish response to node [nodeId=" + nodeId + ", res=" + res + ']', e); } } } catch (GridCacheTxRollbackException e) { if (log.isDebugEnabled()) log.debug("Attempted to start a completed transaction (will ignore): " + e); } catch (Throwable e) { U.error( log, "Failed completing transaction [commit=" + req.commit() + ", tx=" + CU.txString(tx) + ']', e); if (tx != null) tx.rollback(); } }
/** {@inheritDoc} */ @SuppressWarnings("BusyWait") @Override public void start(final GridRestProtocolHandler hnd) throws GridException { assert hnd != null; GridConfiguration cfg = ctx.config(); GridNioServerListener<GridClientMessage> lsnr = new GridTcpRestNioListener(log, hnd); GridNioParser parser = new GridTcpRestParser(log); try { host = resolveRestTcpHost(cfg); SSLContext sslCtx = null; if (cfg.isRestTcpSslEnabled()) { GridSslContextFactory factory = cfg.getRestTcpSslContextFactory(); if (factory == null) // Thrown SSL exception instead of GridException for writing correct warning message into // log. throw new SSLException("SSL is enabled, but SSL context factory is not specified."); sslCtx = factory.createSslContext(); } int lastPort = cfg.getRestTcpPort() + cfg.getRestPortRange() - 1; for (port = cfg.getRestTcpPort(); port <= lastPort; port++) { if (startTcpServer(host, port, lsnr, parser, sslCtx, cfg)) { if (log.isInfoEnabled()) log.info(startInfo()); return; } } U.warn( log, "Failed to start TCP binary REST server (possibly all ports in range are in use) " + "[firstPort=" + cfg.getRestTcpPort() + ", lastPort=" + lastPort + ", host=" + host + ']'); } catch (SSLException e) { U.warn( log, "Failed to start " + name() + " protocol on port " + port + ": " + e.getMessage(), "Failed to start " + name() + " protocol on port " + port + ". Check if SSL context factory is " + "properly configured."); } catch (IOException e) { U.warn( log, "Failed to start " + name() + " protocol on port " + port + ": " + e.getMessage(), "Failed to start " + name() + " protocol on port " + port + ". " + "Check restTcpHost configuration property."); } }
/** * Processes lock request. * * @param nodeId Sender node ID. * @param msg Lock request. */ @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"}) private void processLockRequest(UUID nodeId, GridDistributedLockRequest<K, V> msg) { assert !nodeId.equals(locNodeId); List<byte[]> keys = msg.keyBytes(); int cnt = keys.size(); GridReplicatedTxRemote<K, V> tx = null; GridDistributedLockResponse res; ClassLoader ldr = null; try { ldr = ctx.deploy().globalLoader(); if (ldr != null) { res = new GridDistributedLockResponse(msg.version(), msg.futureId(), cnt); for (int i = 0; i < keys.size(); i++) { byte[] bytes = keys.get(i); K key = msg.keys().get(i); Collection<GridCacheMvccCandidate<K>> cands = msg.candidatesByIndex(i); if (bytes == null) continue; if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key); GridDistributedCacheEntry<K, V> entry = null; while (true) { try { entry = entryexx(key); // Handle implicit locks for pessimistic transactions. if (msg.inTx()) { tx = ctx.tm().tx(msg.version()); if (tx != null) { if (msg.txRead()) tx.addRead(key, bytes); else tx.addWrite(key, bytes); } else { tx = new GridReplicatedTxRemote<K, V>( nodeId, msg.threadId(), msg.version(), null, PESSIMISTIC, msg.isolation(), msg.isInvalidate(), msg.timeout(), key, bytes, msg.txRead(), ctx); tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Failed to acquire lock " + "(transaction has been completed): " + msg.version()); } } // Add remote candidate before reordering. entry.addRemote( msg.nodeId(), null, msg.threadId(), msg.version(), msg.timeout(), tx != null && tx.ec(), tx != null, tx != null && tx.implicitSingle()); // Remote candidates for ordered lock queuing. entry.addRemoteCandidates( cands, msg.version(), msg.committedVersions(), msg.rolledbackVersions()); // Double-check in case if sender node left the grid. if (ctx.discovery().node(msg.nodeId()) == null) { if (log.isDebugEnabled()) log.debug( "Node requesting lock left grid (lock request will be ignored): " + msg); if (tx != null) tx.rollback(); return; } res.setCandidates( i, entry.localCandidates(), ctx.tm().committedVersions(msg.version()), ctx.tm().rolledbackVersions(msg.version())); res.addValueBytes( entry.rawGet(), msg.returnValue(i) ? entry.valueBytes(null) : null, ctx); // Entry is legit. break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry; if (log.isDebugEnabled()) log.debug( "Received entry removed exception (will retry on renewed entry): " + entry); if (tx != null) { tx.clearEntry(entry.key()); if (log.isDebugEnabled()) log.debug( "Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']'); } } } } } else { String err = "Failed to acquire deployment class for message: " + msg; U.warn(log, err); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err)); } } catch (GridCacheTxRollbackException e) { if (log.isDebugEnabled()) log.debug("Received lock request for completed transaction (will ignore): " + e); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), e); } catch (GridException e) { String err = "Failed to unmarshal at least one of the keys for lock request message: " + msg; log.error(err, e); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err, e)); if (tx != null) tx.rollback(); } catch (GridDistributedLockCancelledException ignored) { // Received lock request for cancelled lock. if (log.isDebugEnabled()) log.debug("Received lock request for canceled lock (will ignore): " + msg); if (tx != null) tx.rollback(); // Don't send response back. return; } GridNode node = ctx.discovery().node(msg.nodeId()); boolean releaseAll = false; if (node != null) { try { // Reply back to sender. ctx.io().send(node, res); } catch (GridException e) { U.error(log, "Failed to send message to node (did the node leave grid?): " + node.id(), e); releaseAll = ldr != null; } } // If sender left grid, release all locks acquired so far. else releaseAll = ldr != null; // Release all locks because sender node left grid. if (releaseAll) { for (K key : msg.keys()) { while (true) { GridDistributedCacheEntry<K, V> entry = peekexx(key); try { if (entry != null) entry.removeExplicitNodeLocks(msg.nodeId()); break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock on removed entity during failure " + "of replicated lock request handling (will retry): " + entry); } } } U.warn( log, "Sender node left grid in the midst of lock acquisition (locks will be released)."); } }
/** {@inheritDoc} */ @Nullable @SuppressWarnings({"UnusedCatchParameter"}) @Override public GridDeployment getDeployment(GridDeploymentMetadata meta) { GridDeployment dep; Class<?> cls = null; String alias = meta.alias(); synchronized (mux) { // Validate metadata. assert meta.alias() != null; dep = getDeployment(meta.alias()); if (dep != null) { if (log.isDebugEnabled()) { log.debug("Acquired deployment class from local cache: " + dep); } return dep; } GridDeploymentResource rsrc = spi.findResource(meta.alias()); if (rsrc != null) { dep = deploy( ctx.config().getDeploymentMode(), rsrc.getClassLoader(), rsrc.getResourceClass(), alias); if (dep == null) { return null; } if (log.isDebugEnabled()) { log.debug("Acquired deployment class from SPI: " + dep); } } // Auto-deploy. else { ClassLoader ldr = meta.classLoader(); if (ldr == null) { ldr = Thread.currentThread().getContextClassLoader(); // Safety. if (ldr == null) { ldr = ctxLdr; } } // Don't auto-deploy locally in case of nested execution. if (ldr instanceof GridDeploymentClassLoader) { return null; } try { // Check that class can be loaded. cls = ldr.loadClass(meta.alias()); spi.register(ldr, cls); rsrc = spi.findResource(alias); if (rsrc != null && rsrc.getResourceClass().equals(cls)) { if (log.isDebugEnabled()) { log.debug("Retrieved auto-loaded resource from spi: " + rsrc); } dep = deploy(ctx.config().getDeploymentMode(), ldr, cls, alias); if (dep == null) { return null; } } else { U.warn( log, "Failed to find resource from deployment SPI even after registering it: " + meta.alias()); return null; } } catch (ClassNotFoundException e) { if (log.isDebugEnabled()) { log.debug( "Failed to load class for local auto-deployment [ldr=" + ldr + ", meta=" + meta + ']'); } return null; } catch (GridSpiException e) { U.error(log, "Failed to deploy local class: " + meta.alias(), e); return null; } } } if (cls != null) { recordDeploy(cls, alias, meta.isRecord()); dep.addDeployedClass(cls, meta.className(), meta.alias()); } if (log.isDebugEnabled()) { log.debug("Acquired deployment class: " + dep); } return dep; }
/** {@inheritDoc} */ @Override public void execute(@Nullable GridProjection prj) throws GridException { if (cb == null) throw new IllegalStateException("Mandatory local callback is not set for the query: " + this); if (prj == null) prj = ctx.grid(); prj = prj.forCache(ctx.name()); if (prj.nodes().isEmpty()) throw new GridTopologyException("Failed to execute query (projection is empty): " + this); GridCacheMode mode = ctx.config().getCacheMode(); if (mode == LOCAL || mode == REPLICATED) { Collection<GridNode> nodes = prj.nodes(); GridNode node = nodes.contains(ctx.localNode()) ? ctx.localNode() : F.rand(nodes); assert node != null; if (nodes.size() > 1 && !ctx.cache().isDrSystemCache()) { if (node.id().equals(ctx.localNodeId())) U.warn( log, "Continuous query for " + mode + " cache can be run only on local node. " + "Will execute query locally: " + this); else U.warn( log, "Continuous query for " + mode + " cache can be run only on single node. " + "Will execute query on remote node [qry=" + this + ", node=" + node + ']'); } prj = prj.forNode(node); } closeLock.lock(); try { if (routineId != null) throw new IllegalStateException("Continuous query can't be executed twice."); guard.block(); GridContinuousHandler hnd = new GridCacheContinuousQueryHandler<>(ctx.name(), topic, cb, filter, prjPred); routineId = ctx.kernalContext() .continuous() .startRoutine(hnd, bufSize, timeInterval, autoUnsubscribe, prj.predicate()) .get(); } finally { closeLock.unlock(); } }
/** {@inheritDoc} */ @SuppressWarnings("BusyWait") @Override public void start(GridRestProtocolHandler hnd) throws GridException { InetAddress locHost; try { locHost = U.resolveLocalHost(ctx.config().getLocalHost()); } catch (IOException e) { throw new GridException( "Failed to resolve local host to bind address: " + ctx.config().getLocalHost(), e); } System.setProperty(GG_JETTY_HOST, locHost.getHostAddress()); jettyHnd = new GridJettyRestHandler( hnd, new C1<String, Boolean>() { @Override public Boolean apply(String tok) { return F.isEmpty(secretKey) || authenticate(tok); } }, log); String jettyPath = ctx.config().getRestJettyPath(); final URL cfgUrl; if (jettyPath == null) { cfgUrl = null; if (log.isDebugEnabled()) log.debug("Jetty configuration file is not provided, using defaults."); } else { cfgUrl = U.resolveGridGainUrl(jettyPath); if (cfgUrl == null) throw new GridSpiException("Invalid Jetty configuration file: " + jettyPath); else if (log.isDebugEnabled()) log.debug("Jetty configuration file: " + cfgUrl); } loadJettyConfiguration(cfgUrl); AbstractNetworkConnector connector = getJettyConnector(); try { host = InetAddress.getByName(connector.getHost()); } catch (UnknownHostException e) { throw new GridException("Failed to resolve Jetty host address: " + connector.getHost(), e); } int initPort = connector.getPort(); int lastPort = initPort + ctx.config().getRestPortRange() - 1; for (port = initPort; port <= lastPort; port++) { connector.setPort(port); if (startJetty()) { if (log.isInfoEnabled()) log.info(startInfo()); return; } } U.warn( log, "Failed to start Jetty REST server (possibly all ports in range are in use) " + "[firstPort=" + initPort + ", lastPort=" + lastPort + ']'); }