/** * @param nodeId Node ID. * @return Class loader ID for node ID. */ GridTuple2<GridUuid, Long> getClassLoaderId(UUID nodeId) { assert nodeId != null; assert Thread.holdsLock(mux); return loader().registeredClassLoaderId(nodeId); }
/** * Checks if node is participating in deployment. * * @param nodeId Node ID to check. * @param ldrId Class loader ID. * @return {@code True} if node is participating in deployment. */ boolean hasParticipant(UUID nodeId, GridUuid ldrId) { assert nodeId != null; assert ldrId != null; assert Thread.holdsLock(mux); return loader().hasRegisteredNode(nodeId, ldrId); }
/** Stops Jetty. */ private void stopJetty() { // Jetty does not really stop the server if port is busy. try { if (httpSrv != null) { // If server was successfully started, deregister ports. if (httpSrv.isStarted()) ctx.ports().deregisterPorts(getClass()); // Record current interrupted status of calling thread. boolean interrupted = Thread.interrupted(); try { httpSrv.stop(); } finally { // Reset interrupted flag on calling thread. if (interrupted) Thread.currentThread().interrupt(); } } } catch (InterruptedException ignored) { if (log.isDebugEnabled()) log.debug("Thread has been interrupted."); Thread.currentThread().interrupt(); } catch (Exception e) { U.error(log, "Failed to stop Jetty HTTP server.", e); } }
/** @throws Exception If failed. */ @SuppressWarnings({"ObjectEquality"}) public void testUndeployedTask() throws Exception { Grid grid1 = null; Grid grid2 = null; try { grid1 = startGrid(1, new GridSpringResourceContextImpl(new GenericApplicationContext())); grid2 = startGrid(2, new GridSpringResourceContextImpl(new GenericApplicationContext())); // Execute tasks. grid1.compute().execute(SharedResourceTask1.class, null).get(); grid1.compute().execute(SharedResourceTask2.class, null).get(); grid1.compute().undeployTask(SharedResourceTask1.class.getName()); // Wait until resources get undeployed remotely // because undeploy is asynchronous apply. Thread.sleep(3000); // 1 local and 1 remote resource instances checkUsageCount(createClss, UserResource1.class, 4); checkUsageCount(deployClss, UserResource1.class, 4); checkUsageCount(createClss, UserResource2.class, 4); checkUsageCount(deployClss, UserResource2.class, 4); checkUsageCount(undeployClss, UserResource1.class, 4); checkUsageCount(undeployClss, UserResource2.class, 4); grid1.compute().undeployTask(SharedResourceTask2.class.getName()); // Wait until resources get undeployed remotely // because undeploy is asynchronous apply. Thread.sleep(3000); // We undeployed last task for this class loader and resources. // All resources should be undeployed. checkUsageCount(undeployClss, UserResource1.class, 4); checkUsageCount(undeployClss, UserResource2.class, 4); // Execute the same tasks. grid1.compute().execute(SharedResourceTask1.class, null).get(); grid1.compute().execute(SharedResourceTask2.class, null).get(); // 2 new resources. checkUsageCount(createClss, UserResource1.class, 8); checkUsageCount(deployClss, UserResource1.class, 8); checkUsageCount(createClss, UserResource2.class, 8); checkUsageCount(deployClss, UserResource2.class, 8); } finally { GridTestUtils.close(grid1, log()); GridTestUtils.close(grid2, log()); } checkUsageCount(undeployClss, UserResource1.class, 8); checkUsageCount(undeployClss, UserResource2.class, 8); }
/** * Waits for all workers to finish. * * @param cancel Flag to indicate whether workers should be cancelled before waiting for them to * finish. */ public void join(boolean cancel) { if (cancel) U.cancel(workers); // Record current interrupted status of calling thread. boolean interrupted = Thread.interrupted(); try { U.join(workers, log); } finally { // Reset interrupted flag on calling thread. if (interrupted) Thread.currentThread().interrupt(); } }
/** {@inheritDoc} */ @Override public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException { ExecutorService exec = new ThreadPoolExecutor( threadsCnt, threadsCnt, 0L, MILLISECONDS, new ArrayBlockingQueue<Runnable>(batchQueueSize), new BlockingRejectedExecutionHandler()); Iterator<I> iter = inputIterator(args); Collection<I> buf = new ArrayList<>(batchSize); try { while (iter.hasNext()) { if (Thread.currentThread().isInterrupted()) { U.warn(log, "Working thread was interrupted while loading data."); break; } buf.add(iter.next()); if (buf.size() == batchSize) { exec.submit(new Worker(c, buf, args)); buf = new ArrayList<>(batchSize); } } if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args)); } catch (RejectedExecutionException ignored) { // Because of custom RejectedExecutionHandler. assert false : "RejectedExecutionException was thrown while it shouldn't."; } finally { exec.shutdown(); try { exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS); } catch (InterruptedException ignored) { U.warn(log, "Working thread was interrupted while waiting for put operations to complete."); Thread.currentThread().interrupt(); } } }
/** @throws Exception If failed. */ public void testDisabledRest() throws Exception { restEnabled = false; final Grid g = startGrid("disabled-rest"); try { Thread.sleep(2 * TOP_REFRESH_FREQ); // As long as we have round robin load balancer this will cause every node to be queried. for (int i = 0; i < NODES_CNT + 1; i++) assertEquals(NODES_CNT + 1, client.compute().refreshTopology(false, false).size()); final GridClientData data = client.data(PARTITIONED_CACHE_NAME); // Check rest-disabled node is unavailable. try { String affKey; do { affKey = UUID.randomUUID().toString(); } while (!data.affinity(affKey).equals(g.localNode().id())); data.put(affKey, "asdf"); assertEquals("asdf", cache(0, PARTITIONED_CACHE_NAME).get(affKey)); } catch (GridServerUnreachableException e) { // Thrown for direct client-node connections. assertTrue( "Unexpected exception message: " + e.getMessage(), e.getMessage() .startsWith("No available endpoints to connect (is rest enabled for this node?)")); } catch (GridClientException e) { // Thrown for routed client-router-node connections. String msg = e.getMessage(); assertTrue( "Unexpected exception message: " + msg, protocol() == GridClientProtocol.TCP ? msg.contains("No available endpoints to connect (is rest enabled for this node?)") : // TCP router. msg.startsWith( "No available nodes on the router for destination node ID")); // HTTP router. } // Check rest-enabled nodes are available. String affKey; do { affKey = UUID.randomUUID().toString(); } while (data.affinity(affKey).equals(g.localNode().id())); data.put(affKey, "fdsa"); assertEquals("fdsa", cache(0, PARTITIONED_CACHE_NAME).get(affKey)); } finally { restEnabled = true; G.stop(g.name(), true); } }
/** * @param concurrency Concurrency. * @param isolation Isolation. * @throws GridException If test failed. */ private void checkTransactionTimeout( GridCacheTxConcurrency concurrency, GridCacheTxIsolation isolation) throws Exception { boolean wasEx = false; GridCacheTx tx = null; try { GridCache<Integer, String> cache = grid.cache(null); tx = cache.txStart(concurrency, isolation, 50, 0); cache.put(1, "1"); Thread.sleep(100); cache.put(1, "2"); tx.commit(); } catch (GridCacheTxOptimisticException e) { info("Received expected optimistic exception: " + e.getMessage()); wasEx = true; tx.rollback(); } catch (GridCacheTxTimeoutException e) { info("Received expected timeout exception: " + e.getMessage()); wasEx = true; tx.rollback(); } assert wasEx; }
/** {@inheritDoc} */ @Override public Serializable execute() { int arg = this.<Integer>argument(0); try { if (log.isInfoEnabled()) log.info("Executing job [job=" + this + ", arg=" + arg + ']'); startSignal.countDown(); try { if (!startSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS)) fail(); if (arg == 1) { if (log.isInfoEnabled()) log.info("Job one is proceeding."); } else Thread.sleep(WAIT_TIME); } catch (InterruptedException e) { if (log.isInfoEnabled()) log.info("Job got cancelled [arg=" + arg + ", ses=" + ses + ", e=" + e + ']'); return 0; } if (log.isInfoEnabled()) log.info("Completing job: " + ses); return argument(0); } finally { stopSignal.countDown(); processedCnt++; } }
/** @throws Exception If failed. */ public void testSameTaskFromTwoNodesLeft() throws Exception { Grid grid1 = null; Grid grid2 = null; Grid grid3 = null; try { grid1 = startGrid(1, new GridSpringResourceContextImpl(new GenericApplicationContext())); grid2 = startGrid(2, new GridSpringResourceContextImpl(new GenericApplicationContext())); grid3 = startGrid(3, new GridSpringResourceContextImpl(new GenericApplicationContext())); grid1.compute().execute(SharedResourceTask1.class, null).get(); grid2.compute().execute(SharedResourceTask1.class, null).get(); checkUsageCount(createClss, UserResource1.class, 6); checkUsageCount(deployClss, UserResource1.class, 6); checkUsageCount(createClss, UserResource2.class, 6); checkUsageCount(deployClss, UserResource2.class, 6); checkUsageCount(undeployClss, UserResource1.class, 0); checkUsageCount(undeployClss, UserResource2.class, 0); GridTestUtils.close(grid1, log()); // Wait until other nodes get notified // this grid1 left. Thread.sleep(1000); // Undeployment happened only on Grid1. checkUsageCount(undeployClss, UserResource1.class, 2); checkUsageCount(undeployClss, UserResource2.class, 2); GridTestUtils.close(grid2, log()); // Wait until resources get undeployed remotely // because undeploy is asynchronous apply. Thread.sleep(1000); // Grid1 and Grid2 checkUsageCount(undeployClss, UserResource1.class, 4); checkUsageCount(undeployClss, UserResource2.class, 4); } finally { GridTestUtils.close(grid1, log()); GridTestUtils.close(grid2, log()); GridTestUtils.close(grid3, log()); } }
/** @throws Exception If failed. */ public void testSameTaskFromTwoNodesUndeploy() throws Exception { Grid grid1 = null; Grid grid2 = null; Grid grid3 = null; try { grid1 = startGrid(1, new GridSpringResourceContextImpl(new GenericApplicationContext())); grid2 = startGrid(2, new GridSpringResourceContextImpl(new GenericApplicationContext())); grid3 = startGrid(3, new GridSpringResourceContextImpl(new GenericApplicationContext())); grid1.compute().execute(SharedResourceTask1.class, null).get(); grid2.compute().execute(SharedResourceTask1.class, null).get(); checkUsageCount(createClss, UserResource1.class, 6); checkUsageCount(deployClss, UserResource1.class, 6); checkUsageCount(createClss, UserResource2.class, 6); checkUsageCount(deployClss, UserResource2.class, 6); checkUsageCount(undeployClss, UserResource1.class, 0); checkUsageCount(undeployClss, UserResource2.class, 0); grid1.compute().undeployTask(SharedResourceTask1.class.getName()); // Wait until resources get undeployed remotely // because undeploy is asynchronous apply. Thread.sleep(3000); checkUsageCount(undeployClss, UserResource1.class, 6); checkUsageCount(undeployClss, UserResource2.class, 6); grid2.compute().undeployTask(SharedResourceTask1.class.getName()); // Wait until resources get undeployed remotely // because undeploy is asynchronous apply. Thread.sleep(3000); // All Tasks from originating nodes were undeployed. All resources should be cleaned up. checkUsageCount(undeployClss, UserResource1.class, 6); checkUsageCount(undeployClss, UserResource2.class, 6); } finally { GridTestUtils.close(grid1, log()); GridTestUtils.close(grid2, log()); GridTestUtils.close(grid3, log()); } }
/** @throws Exception If failed. */ public void testAffinityPut() throws Exception { Thread.sleep(2 * TOP_REFRESH_FREQ); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT); GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME); GridClientCompute compute = client.compute(); for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i)); for (int i = 0; i < 100; i++) { String key = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id(); assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key)); assertEquals(primaryNodeId, partitioned.affinity(key)); // Must go to primary node only. Since backup count is 0, value must present on // primary node only. partitioned.put(key, "val" + key); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key); if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val); else assertNull(val); } } // Now check that we will see value in near cache in pinned mode. for (int i = 100; i < 200; i++) { String pinnedKey = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id(); UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId))); GridClientNode node = compute.node(pinnedNodeId); partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey); if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey())) assertEquals("val" + pinnedKey, val); else assertNull(val); } } }
/** {@inheritDoc} */ @Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { try { if (executor.isShutdown()) throw new RejectedExecutionException(); else executor.getQueue().put(r); } catch (InterruptedException ignored) { U.warn(log, "Working thread was interrupted while loading data."); Thread.currentThread().interrupt(); } }
/** {@inheritDoc} */ @SuppressWarnings("BusyWait") @Override public Boolean reduce(List<GridComputeJobResult> results) throws GridException { assert taskSes != null; assert results != null; assert params != null; assert !params.isEmpty(); assert results.size() == params.size(); Map<String, Integer> receivedParams = new HashMap<>(); boolean allAttrReceived = false; int cnt = 0; while (!allAttrReceived && cnt++ < 3) { allAttrReceived = true; for (Map.Entry<String, Integer> entry : params.entrySet()) { assert taskSes.getAttribute(entry.getKey()) != null; Integer newVal = (Integer) taskSes.getAttribute(entry.getKey()); assert newVal != null; receivedParams.put(entry.getKey(), newVal); if (newVal != entry.getValue() + 1) allAttrReceived = false; } if (!allAttrReceived) { try { Thread.sleep(100); } catch (InterruptedException e) { throw new GridException("Thread interrupted.", e); } } } if (log.isDebugEnabled()) { for (Map.Entry<String, Integer> entry : receivedParams.entrySet()) { log.debug( "Received session attr value [name=" + entry.getKey() + ", val=" + entry.getValue() + ", expected=" + (params.get(entry.getKey()) + 1) + ']'); } } return allAttrReceived; }
/** * Adds new participant to deployment. * * @param dep Shared deployment. * @param meta Request metadata. * @return {@code True} if participant was added. */ private boolean addParticipant(SharedDeployment dep, GridDeploymentMetadata meta) { assert dep != null; assert meta != null; assert Thread.holdsLock(mux); if (!checkModeMatch(dep, meta)) return false; if (meta.participants() != null) { for (Map.Entry<UUID, GridTuple2<GridUuid, Long>> e : meta.participants().entrySet()) { dep.addParticipant(e.getKey(), e.getValue().get1(), e.getValue().get2()); if (log.isDebugEnabled()) log.debug( "Added new participant [nodeId=" + e.getKey() + ", clsLdrId=" + e.getValue().get1() + ", seqNum=" + e.getValue().get2() + ']'); } } if (dep.deployMode() == CONTINUOUS || meta.participants() == null) { if (!dep.addParticipant(meta.senderNodeId(), meta.classLoaderId(), meta.sequenceNumber())) { U.warn( log, "Failed to create shared mode deployment " + "(requested class loader was already undeployed, did sender node leave grid?) " + "[clsLdrId=" + meta.classLoaderId() + ", senderNodeId=" + meta.senderNodeId() + ']'); return false; } if (log.isDebugEnabled()) log.debug( "Added new participant [nodeId=" + meta.senderNodeId() + ", clsLdrId=" + meta.classLoaderId() + ", seqNum=" + meta.sequenceNumber() + ']'); } return true; }
/** @param nodeId Node ID to remove. */ void removeParticipant(UUID nodeId) { assert nodeId != null; assert Thread.holdsLock(mux); GridUuid ldrId = loader().unregister(nodeId); if (log.isDebugEnabled()) log.debug("Registering dead class loader ID: " + ldrId); synchronized (mux) { deadClsLdrs.add(ldrId); } }
/** * @param meta Request metadata. * @return {@code True} if class loader is obsolete. */ private boolean isDeadClassLoader(GridDeploymentMetadata meta) { assert Thread.holdsLock(mux); synchronized (mux) { if (deadClsLdrs.contains(meta.classLoaderId())) { if (log.isDebugEnabled()) log.debug("Ignoring request for obsolete class loader: " + meta); return true; } return false; } }
/** Sets property removed. */ void onRemoved() { assert Thread.holdsLock(mux); removed = true; Collection<GridUuid> deadIds = loader().registeredClassLoaderIds(); if (log.isDebugEnabled()) log.debug("Registering dead class loader IDs: " + deadIds); synchronized (mux) { deadClsLdrs.addAll(deadIds); } }
/** * Starts Grid instance. Note that if grid is already started, then it will be looked up and * returned from this method. * * @return Started grid. */ private Grid startGrid() { Properties props = System.getProperties(); gridName = props.getProperty(GRIDGAIN_NAME.name()); if (!props.containsKey(GRIDGAIN_NAME.name()) || G.state(gridName) != GridFactoryState.STARTED) { selfStarted = true; // Set class loader for the spring. ClassLoader curCl = Thread.currentThread().getContextClassLoader(); // Add no-op logger to remove no-appender warning. Appender app = new NullAppender(); Logger.getRootLogger().addAppender(app); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); Grid grid = G.start(cfgPath); gridName = grid.name(); System.setProperty(GRIDGAIN_NAME.name(), grid.name()); return grid; } catch (GridException e) { throw new GridRuntimeException("Failed to start grid: " + cfgPath, e); } finally { Logger.getRootLogger().removeAppender(app); Thread.currentThread().setContextClassLoader(curCl); } } return G.grid(gridName); }
/** * Executes example. * * @param args Command line arguments, none required. * @throws GridException If example execution failed. */ public static void main(String[] args) throws Exception { try (Grid grid = GridGain.start("examples/config/example-cache.xml")) { System.out.println(); System.out.println(">>> Events API example started."); // Listen to events happening on local node. localListen(); // Listen to events happening on all grid nodes. remoteListen(); // Wait for a while while callback is notified about remaining puts. Thread.sleep(1000); } }
/** * @param nodeId Grid node ID. * @param ldrId Class loader ID. * @param seqNum Sequence number for the class loader. * @return Whether actually added or not. */ boolean addParticipant(UUID nodeId, GridUuid ldrId, long seqNum) { assert nodeId != null; assert ldrId != null; assert Thread.holdsLock(mux); synchronized (mux) { if (!deadClsLdrs.contains(ldrId)) { loader().register(nodeId, ldrId, seqNum); return true; } return false; } }
/** * @param alias Class alias. * @return Deployed class. */ @Nullable private GridDeployment getDeployment(String alias) { assert Thread.holdsLock(mux); LinkedList<GridDeployment> deps = cache.get(alias); if (deps != null) { assert !deps.isEmpty(); GridDeployment dep = deps.getFirst(); if (!dep.isUndeployed()) { return dep; } } return null; }
/** * Basically, future mapping consists from two parts. First, we must determine the topology * version this future will map on. Locking is performed within a user transaction, we must * continue to map keys on the same topology version as it started. If topology version is * undefined, we get current topology future and wait until it completes so the topology is ready * to use. * * <p>During the second part we map keys to primary nodes using topology snapshot we obtained * during the first part. Note that if primary node leaves grid, the future will fail and * transaction will be rolled back. */ void map() { // Obtain the topology version to use. GridDiscoveryTopologySnapshot snapshot = tx != null ? tx.topologySnapshot() : cctx.mvcc().lastExplicitLockTopologySnapshot(Thread.currentThread().getId()); if (snapshot != null) { // Continue mapping on the same topology version as it was before. topSnapshot.compareAndSet(null, snapshot); map(keys); markInitialized(); return; } // Must get topology snapshot and map on that version. mapOnTopology(); }
/** Notifies all registered listeners. */ private void notifyListeners() { final Collection<GridInClosure<? super GridFuture<R>>> tmp; synchronized (mux) { tmp = new ArrayList<GridInClosure<? super GridFuture<R>>>(lsnrs); } boolean concurNotify = this.concurNotify; boolean syncNotify = this.syncNotify; if (concurNotify) { for (final GridInClosure<? super GridFuture<R>> lsnr : tmp) ctx.closure() .runLocalSafe( new GPR() { @Override public void run() { notifyListener(lsnr); } }, true); } else { // Always notify in the thread different from start thread. if (Thread.currentThread() == thread && !syncNotify) { ctx.closure() .runLocalSafe( new GPR() { @Override public void run() { // Since concurrent notifications are off, we notify // all listeners in one thread. for (GridInClosure<? super GridFuture<R>> lsnr : tmp) notifyListener(lsnr); } }, true); } else { for (GridInClosure<? super GridFuture<R>> lsnr : tmp) notifyListener(lsnr); } } }
/** * @param cctx Registry. * @param keys Keys to lock. * @param tx Transaction. * @param read Read flag. * @param retval Flag to return value or not. * @param timeout Lock acquisition timeout. * @param filter Filter. */ public GridNearLockFuture( GridCacheContext<K, V> cctx, Collection<? extends K> keys, @Nullable GridNearTxLocal<K, V> tx, boolean read, boolean retval, long timeout, GridPredicate<GridCacheEntry<K, V>>[] filter) { super(cctx.kernalContext(), CU.boolReducer()); assert cctx != null; assert keys != null; this.cctx = cctx; this.keys = keys; this.tx = tx; this.read = read; this.retval = retval; this.timeout = timeout; this.filter = filter; threadId = tx == null ? Thread.currentThread().getId() : tx.threadId(); lockVer = tx != null ? tx.xidVersion() : cctx.versions().next(); futId = GridUuid.randomUuid(); entries = new ArrayList<>(keys.size()); log = U.logger(ctx, logRef, GridNearLockFuture.class); if (timeout > 0) { timeoutObj = new LockTimeoutObject(); cctx.time().addTimeoutObject(timeoutObj); } valMap = new ConcurrentHashMap8<>(keys.size(), 1f); }
/** * Checks availability of a classpath resource. * * @param name Resource name. * @return {@code true} if resource is available and ready for read, {@code false} otherwise. */ private boolean resourceAvailable(String name) { InputStream cfgStream = Thread.currentThread().getContextClassLoader().getResourceAsStream(name); if (cfgStream == null) { log.error("Classpath resource not found: " + name); return false; } try { // Read a single byte to force actual content access by JVM. cfgStream.read(); return true; } catch (IOException e) { log.error("Failed to read classpath resource: " + name, e); return false; } finally { U.close(cfgStream, log); } }
/** * Called to record all undeployed classes.. * * @param leftNodeId Left node ID. */ void recordUndeployed(@Nullable UUID leftNodeId) { assert !Thread.holdsLock(mux); for (Map.Entry<String, Class<?>> depCls : deployedClassMap().entrySet()) { boolean isTask = isTask(depCls.getValue()); String msg = (isTask ? "Task" : "Class") + " was undeployed in SHARED or CONTINUOUS mode: " + depCls.getValue(); int type = isTask ? EVT_TASK_UNDEPLOYED : EVT_CLASS_UNDEPLOYED; if (ctx.event().isRecordable(type)) { GridDeploymentEvent evt = new GridDeploymentEvent(); evt.nodeId(ctx.localNodeId()); evt.message(msg); evt.type(type); evt.alias(depCls.getKey()); ctx.event().record(evt); } if (log.isInfoEnabled()) log.info(msg); } if (isObsolete()) { // Resource cleanup. ctx.resource().onUndeployed(this); ctx.cache().onUndeployed(leftNodeId, loader()); clearSerializationCaches(); } }
/** {@inheritDoc} */ @Override public void onDeployed(Class<?> cls) { assert !Thread.holdsLock(mux); boolean isTask = isTask(cls); String msg = (isTask ? "Task" : "Class") + " was deployed in SHARED or CONTINUOUS mode: " + cls; int type = isTask ? EVT_TASK_DEPLOYED : EVT_CLASS_DEPLOYED; if (ctx.event().isRecordable(type)) { GridDeploymentEvent evt = new GridDeploymentEvent(); evt.nodeId(ctx.localNodeId()); evt.message(msg); evt.type(type); evt.alias(cls.getName()); ctx.event().record(evt); } if (log.isInfoEnabled()) log.info(msg); }
/** * Future adapter. * * @author 2005-2011 Copyright (C) GridGain Systems, Inc. * @version 3.1.1c.19062011 */ public class GridFutureAdapter<R> extends GridMetadataAwareAdapter implements GridFuture<R>, Externalizable { /** Synchronous notification flag. */ private static final boolean SYNC_NOTIFY = U.isFutureNotificationSynchronous(); /** Concurrent notification flag. */ private static final boolean CONCUR_NOTIFY = U.isFutureNotificationConcurrent(); /** Done flag. */ private boolean done; /** Cancelled flag. */ private boolean cancelled; /** Result. */ @GridToStringInclude private R res; /** Error. */ private Throwable err; /** Set to {@code false} on deserialization whenever incomplete future is serialized. */ private boolean valid = true; /** Asynchronous listener. */ private final Set<GridInClosure<? super GridFuture<R>>> lsnrs = new GridLeanSet<GridInClosure<? super GridFuture<R>>>(); /** Creator thread. */ private Thread thread = Thread.currentThread(); /** Mutex. */ private final Object mux = new Object(); /** Context. */ protected GridKernalContext ctx; /** Logger. */ protected GridLogger log; /** Future start time. */ protected final long startTime = System.currentTimeMillis(); /** Synchronous notification flag. */ private volatile boolean syncNotify = SYNC_NOTIFY; /** Concurrent notification flag. */ private volatile boolean concurNotify = CONCUR_NOTIFY; /** Future end time. */ private volatile long endTime; /** Watch. */ protected GridStopwatch watch; /** Empty constructor required for {@link Externalizable}. */ public GridFutureAdapter() { // No-op. } /** @param ctx Kernal context. */ public GridFutureAdapter(GridKernalContext ctx) { assert ctx != null; this.ctx = ctx; log = ctx.log(getClass()); } /** {@inheritDoc} */ @Override public long startTime() { return startTime; } /** {@inheritDoc} */ @Override public long duration() { long endTime = this.endTime; return endTime == 0 ? System.currentTimeMillis() - startTime : endTime - startTime; } /** {@inheritDoc} */ @Override public boolean concurrentNotify() { return concurNotify; } /** {@inheritDoc} */ @Override public void concurrentNotify(boolean concurNotify) { this.concurNotify = concurNotify; } /** {@inheritDoc} */ @Override public boolean syncNotify() { return syncNotify; } /** {@inheritDoc} */ @Override public void syncNotify(boolean syncNotify) { this.syncNotify = syncNotify; } /** * Adds a watch to this future. * * @param name Name of the watch. */ public void addWatch(String name) { assert name != null; watch = W.stopwatch(name); } /** * Adds a watch to this future. * * @param watch Watch to add. */ public void addWatch(GridStopwatch watch) { assert watch != null; this.watch = watch; } /** Checks that future is in usable state. */ protected void checkValid() { if (!valid) throw new IllegalStateException( "Incomplete future was serialized and cannot " + "be used after deserialization."); } /** @return Valid flag. */ protected boolean isValid() { return valid; } /** * Gets internal mutex. * * @return Internal mutex. */ protected Object mutex() { checkValid(); return mux; } /** @return Value of error. */ protected Throwable error() { checkValid(); synchronized (mux) { return err; } } /** @return Value of result. */ protected R result() { checkValid(); synchronized (mux) { return res; } } /** {@inheritDoc} */ @Override public R call() throws Exception { return get(); } /** {@inheritDoc} */ @Override public R get(long timeout) throws GridException { return get(timeout, MILLISECONDS); } /** {@inheritDoc} */ @Override public R get() throws GridException { checkValid(); try { synchronized (mux) { while (!done && !cancelled) mux.wait(); if (done) { if (err != null) throw U.cast(err); return res; } throw new GridFutureCancelledException("Future was cancelled: " + this); } } catch (InterruptedException e) { throw new GridInterruptedException(e); } } /** {@inheritDoc} */ @Override public R get(long timeout, TimeUnit unit) throws GridException { A.ensure(timeout >= 0, "timeout cannot be negative: " + timeout); A.notNull(unit, "unit"); checkValid(); try { long now = System.currentTimeMillis(); long end = timeout == 0 ? Long.MAX_VALUE : now + MILLISECONDS.convert(timeout, unit); // Account for overflow. if (end < 0) end = Long.MAX_VALUE; synchronized (mux) { while (!done && !cancelled && now < end) { mux.wait(end - now); if (!done) now = System.currentTimeMillis(); } if (done) { if (err != null) throw U.cast(err); return res; } if (cancelled) throw new GridFutureCancelledException("Future was cancelled: " + this); throw new GridFutureTimeoutException( "Timeout was reached before computation completed [duration=" + duration() + "ms, timeout=" + unit.toMillis(timeout) + "ms]"); } } catch (InterruptedException e) { throw new GridInterruptedException( "Got interrupted while waiting for future to complete [duration=" + duration() + "ms, timeout=" + unit.toMillis(timeout) + "ms]", e); } } /** {@inheritDoc} */ @SuppressWarnings({"unchecked"}) @Override public void listenAsync(@Nullable final GridInClosure<? super GridFuture<R>> lsnr) { if (lsnr != null) { checkValid(); boolean done; synchronized (mux) { done = this.done; if (!done) lsnrs.add(lsnr); } if (done) { try { if (syncNotify) notifyListener(lsnr); else ctx.closure() .runLocalSafe( new GPR() { @Override public void run() { notifyListener(lsnr); } }, true); } catch (IllegalStateException ignore) { U.warn( null, "Future notification will not proceed because grid is stopped: " + ctx.gridName()); } } } } /** {@inheritDoc} */ @Override public void stopListenAsync(@Nullable GridInClosure<? super GridFuture<R>>... lsnr) { if (F.isEmpty(lsnr)) synchronized (mux) { lsnrs.clear(); } else synchronized (mux) { lsnrs.removeAll(F.asList(lsnr)); } } /** Notifies all registered listeners. */ private void notifyListeners() { final Collection<GridInClosure<? super GridFuture<R>>> tmp; synchronized (mux) { tmp = new ArrayList<GridInClosure<? super GridFuture<R>>>(lsnrs); } boolean concurNotify = this.concurNotify; boolean syncNotify = this.syncNotify; if (concurNotify) { for (final GridInClosure<? super GridFuture<R>> lsnr : tmp) ctx.closure() .runLocalSafe( new GPR() { @Override public void run() { notifyListener(lsnr); } }, true); } else { // Always notify in the thread different from start thread. if (Thread.currentThread() == thread && !syncNotify) { ctx.closure() .runLocalSafe( new GPR() { @Override public void run() { // Since concurrent notifications are off, we notify // all listeners in one thread. for (GridInClosure<? super GridFuture<R>> lsnr : tmp) notifyListener(lsnr); } }, true); } else { for (GridInClosure<? super GridFuture<R>> lsnr : tmp) notifyListener(lsnr); } } } /** * Notifies single listener. * * @param lsnr Listener. */ private void notifyListener(GridInClosure<? super GridFuture<R>> lsnr) { assert lsnr != null; try { lsnr.apply(this); } catch (IllegalStateException ignore) { U.warn( null, "Failed to notify listener (grid is stopped) [grid=" + ctx.gridName() + ", lsnr=" + lsnr + ']'); } catch (RuntimeException e) { U.error(log, "Failed to notify listener: " + lsnr, e); throw e; } catch (Error e) { U.error(log, "Failed to notify listener: " + lsnr, e); throw e; } } /** * Default no-op implementation that always returns {@code false}. Futures that do support * cancellation should override this method and call {@link #onCancelled()} callback explicitly if * cancellation indeed did happen. */ @Override public boolean cancel() throws GridException { checkValid(); return false; } /** {@inheritDoc} */ @Override public boolean isDone() { // Don't check for "valid" here, as "done" flag can be read // even in invalid state. synchronized (mux) { return done || cancelled; } } /** {@inheritDoc} */ @Override public GridAbsPredicate predicate() { return new PA() { @Override public boolean apply() { return isDone(); } }; } /** {@inheritDoc} */ @Override public boolean isCancelled() { checkValid(); synchronized (mux) { return cancelled; } } /** * Callback to notify that future is finished with {@code null} result. This method must delegate * to {@link #onDone(Object, Throwable)} method. * * @return {@code True} if result was set by this call. */ public final boolean onDone() { return onDone(null, null); } /** * Callback to notify that future is finished. This method must delegate to {@link #onDone(Object, * Throwable)} method. * * @param res Result. * @return {@code True} if result was set by this call. */ public final boolean onDone(@Nullable R res) { return onDone(res, null); } /** * Callback to notify that future is finished. This method must delegate to {@link #onDone(Object, * Throwable)} method. * * @param err Error. * @return {@code True} if result was set by this call. */ public final boolean onDone(@Nullable Throwable err) { return onDone(null, err); } /** * Callback to notify that future is finished. Note that if non-{@code null} exception is passed * in the result value will be ignored. * * @param res Optional result. * @param err Optional error. * @return {@code True} if result was set by this call. */ public boolean onDone(@Nullable R res, @Nullable Throwable err) { checkValid(); boolean notify = false; boolean gotDone = false; try { synchronized (mux) { if (!done) { gotDone = true; endTime = System.currentTimeMillis(); this.res = res; this.err = err; done = true; notify = true; mux.notifyAll(); // Notify possibly waiting child classes. return true; } return false; } } finally { if (gotDone) { GridStopwatch w = watch; if (w != null) w.stop(); } if (notify) notifyListeners(); } } /** * Callback to notify that future is cancelled. * * @return {@code True} if cancel flag was set by this call. */ public boolean onCancelled() { checkValid(); synchronized (mux) { if (cancelled || done) return false; cancelled = true; mux.notifyAll(); // Notify possibly waiting child classes. } return true; } /** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { boolean done; boolean cancelled; Object res; Throwable err; boolean syncNotify; boolean concurNotify; synchronized (mux) { done = this.done; cancelled = this.cancelled; res = this.res; err = this.err; syncNotify = this.syncNotify; concurNotify = this.concurNotify; } out.writeBoolean(done); out.writeBoolean(syncNotify); out.writeBoolean(concurNotify); // Don't write any further if not done, as deserialized future // will be invalid anyways. if (done) { out.writeBoolean(cancelled); out.writeObject(res); out.writeObject(err); } } /** {@inheritDoc} */ @SuppressWarnings({"unchecked"}) @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { boolean done = in.readBoolean(); syncNotify = in.readBoolean(); concurNotify = in.readBoolean(); if (!done) valid = false; else { boolean cancelled = in.readBoolean(); R res = (R) in.readObject(); Throwable err = (Throwable) in.readObject(); synchronized (mux) { this.done = done; this.cancelled = cancelled; this.res = res; this.err = err; } } } /** {@inheritDoc} */ @Override public String toString() { return S.toString(GridFutureAdapter.class, this); } }
/** {@inheritDoc} */ @Override public void unlockAll( Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys.isEmpty()) return; try { GridCacheVersion ver = null; Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null; Collection<K> locKeys = new LinkedList<K>(); GridCacheVersion obsoleteVer = ctx.versions().next(); for (K key : keys) { while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While. try { GridCacheMvccCandidate<K> cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId()); if (cand != null) { ver = cand.version(); if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } // Send request to remove from remote nodes. GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); GridNearUnlockRequest<K, V> req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } // Remove candidate from local node first. GridCacheMvccCandidate<K> rmv = entry.removeLock(); if (rmv != null) { if (!rmv.reentry()) { if (ver != null && !ver.equals(rmv.version())) throw new GridException( "Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys); if (!primary.isLocal()) { assert req != null; req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } else locKeys.add(key); if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv); } else if (log.isDebugEnabled()) log.debug( "Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']'); } // Try to evict near entry if it's dht-mapped locally. evictNearEntry(entry, obsoleteVer); } break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Attempted to unlock removed entry (will retry): " + entry); } } } if (ver == null) return; for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridRichNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true); else if (!req.keyBytes().isEmpty()) // We don't wait for reply to this message. ctx.io().send(n, req); } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }