/** @throws Exception If failed. */ public void testAffinityPut() throws Exception { Thread.sleep(2 * TOP_REFRESH_FREQ); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT); GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME); GridClientCompute compute = client.compute(); for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i)); for (int i = 0; i < 100; i++) { String key = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id(); assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key)); assertEquals(primaryNodeId, partitioned.affinity(key)); // Must go to primary node only. Since backup count is 0, value must present on // primary node only. partitioned.put(key, "val" + key); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key); if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val); else assertNull(val); } } // Now check that we will see value in near cache in pinned mode. for (int i = 100; i < 200; i++) { String pinnedKey = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id(); UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId))); GridClientNode node = compute.node(pinnedNodeId); partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey); if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey())) assertEquals("val" + pinnedKey, val); else assertNull(val); } } }
/** {@inheritDoc} */ @Override public void stopListenAsync(@Nullable GridInClosure<? super GridFuture<R>>... lsnr) { if (F.isEmpty(lsnr)) synchronized (mux) { lsnrs.clear(); } else synchronized (mux) { lsnrs.removeAll(F.asList(lsnr)); } }
/** {@inheritDoc} */ @Override public GridDeployment getDeployment(final GridUuid ldrId) { synchronized (mux) { return F.find( F.flat(cache.values()), null, new P1<SharedDeployment>() { @Override public boolean apply(SharedDeployment d) { return d.classLoaderId().equals(ldrId); } }); } }
/** @throws Exception If failed. */ public void testTopologyListener() throws Exception { final Collection<UUID> added = new ArrayList<>(1); final Collection<UUID> rmvd = new ArrayList<>(1); final CountDownLatch addedLatch = new CountDownLatch(1); final CountDownLatch rmvLatch = new CountDownLatch(1); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); GridClientTopologyListener lsnr = new GridClientTopologyListener() { @Override public void onNodeAdded(GridClientNode node) { added.add(node.nodeId()); addedLatch.countDown(); } @Override public void onNodeRemoved(GridClientNode node) { rmvd.add(node.nodeId()); rmvLatch.countDown(); } }; client.addTopologyListener(lsnr); try { Grid g = startGrid(NODES_CNT + 1); UUID id = g.localNode().id(); assertTrue(addedLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, added.size()); assertEquals(id, F.first(added)); stopGrid(NODES_CNT + 1); assertTrue(rmvLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, rmvd.size()); assertEquals(id, F.first(rmvd)); } finally { client.removeTopologyListener(lsnr); stopGrid(NODES_CNT + 1); } }
/** * Add information about key and version to request. * * <p>Other version has to be provided if suspect lock is DHT local. * * @param key Key. * @param cand Candidate. */ @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"}) void addCandidate(K key, GridCacheDgcLockCandidate cand) { Collection<GridCacheDgcLockCandidate> col = F.addIfAbsent(map, key, new ArrayList<GridCacheDgcLockCandidate>()); assert col != null; col.add(cand); }
/** * Checks {@link GridSystemProperties#GG_JETTY_PORT} system property and overrides default * connector port if it present. Then initializes {@code port} with the found value. * * @param con Jetty connector. */ private void override(AbstractNetworkConnector con) { String host = System.getProperty(GG_JETTY_HOST); if (!F.isEmpty(host)) con.setHost(host); int currPort = con.getPort(); Integer overridePort = Integer.getInteger(GG_JETTY_PORT); if (overridePort != null && overridePort != 0) currPort = overridePort; con.setPort(currPort); port = currPort; }
/** {@inheritDoc} */ @Override public void isolated(boolean isolated) throws GridException { if (isolated()) return; GridNode node = F.first(ctx.grid().forCache(cacheName).nodes()); if (node == null) throw new GridException("Failed to get node for cache: " + cacheName); GridCacheAttributes a = U.cacheAttributes(node, cacheName); assert a != null; updater = a.atomicityMode() == GridCacheAtomicityMode.ATOMIC ? GridDataLoadCacheUpdaters.<K, V>batched() : GridDataLoadCacheUpdaters.<K, V>groupLocked(); }
/** {@inheritDoc} */ @Override public void prepareMarshal(GridCacheContext<K, V> ctx) throws GridException { super.prepareMarshal(ctx); if (err != null) errBytes = ctx.marshaller().marshal(err); metaDataBytes = marshalCollection(metadata, ctx); dataBytes = fields ? marshalFieldsCollection(data, ctx) : marshalCollection(data, ctx); if (ctx.deploymentEnabled() && !F.isEmpty(data)) { for (Object o : data) { if (o instanceof Map.Entry) { Map.Entry e = (Map.Entry) o; prepareObject(e.getKey(), ctx); prepareObject(e.getValue(), ctx); } } } }
/** * Add key to request. * * @param key Key to evict. * @param ver Entry version. * @param near {@code true} if key should be evicted from near cache. */ void addKey(K key, GridCacheVersion ver, boolean near) { assert key != null; assert ver != null; entries.add(F.t(key, ver, near)); }
/** {@inheritDoc} */ @Override public GridFuture<?> addData(Map.Entry<K, V> entry) throws GridException, IllegalStateException { A.notNull(entry, "entry"); return addData(F.asList(entry)); }
/** * @param nodeId Node ID to send message to. * @param msg Message to send. * @throws GridException If send failed. */ private void send0(UUID nodeId, Object msg) throws GridException { GridNode node = ctx.kernalContext().discovery().node(nodeId); ctx.kernalContext().io().sendUserMessage(F.asList(node), msg, GridTopic.TOPIC_HADOOP, false, 0); }
/** * @param depMode Deployment mode. * @param ldr Class loader to deploy. * @param cls Class. * @param alias Class alias. * @return Deployment. */ @SuppressWarnings({"ConstantConditions"}) private GridDeployment deploy( GridDeploymentMode depMode, ClassLoader ldr, Class<?> cls, String alias) { assert Thread.holdsLock(mux); LinkedList<GridDeployment> cachedDeps = null; GridDeployment dep = null; // Find existing class loader info. for (LinkedList<GridDeployment> deps : cache.values()) { for (GridDeployment d : deps) { if (d.classLoader() == ldr) { // Cache class and alias. d.addDeployedClass(cls, alias); cachedDeps = deps; dep = d; break; } } if (cachedDeps != null) { break; } } if (cachedDeps != null) { assert dep != null; cache.put(alias, cachedDeps); if (!cls.getName().equals(alias)) { // Cache by class name as well. cache.put(cls.getName(), cachedDeps); } return dep; } GridUuid ldrId = GridUuid.randomUuid(); long seqNum = seq.incrementAndGet(); String userVer = getUserVersion(ldr); dep = new GridDeployment(depMode, ldr, ldrId, seqNum, userVer, cls.getName(), true); dep.addDeployedClass(cls, alias); LinkedList<GridDeployment> deps = F.addIfAbsent(cache, alias, F.<GridDeployment>newLinkedList()); if (!deps.isEmpty()) { for (GridDeployment d : deps) { if (!d.isUndeployed()) { U.error( log, "Found more than one active deployment for the same resource " + "[cls=" + cls + ", depMode=" + depMode + ", dep=" + d + ']'); return null; } } } // Add at the beginning of the list for future fast access. deps.addFirst(dep); if (!cls.getName().equals(alias)) { // Cache by class name as well. cache.put(cls.getName(), deps); } if (log.isDebugEnabled()) { log.debug("Created new deployment: " + dep); } return dep; }
/** * Creates and caches new deployment. * * @param meta Deployment metadata. * @param isCache Whether or not to cache. * @return New deployment. */ private SharedDeployment createNewDeployment(GridDeploymentMetadata meta, boolean isCache) { assert Thread.holdsLock(mux); assert meta.parentLoader() == null; GridUuid ldrId = GridUuid.randomUuid(); GridDeploymentClassLoader clsLdr; if (meta.deploymentMode() == CONTINUOUS || meta.participants() == null) { // Create peer class loader. // Note that we are passing empty list for local P2P exclude, as it really // does not make sense with shared deployment. clsLdr = new GridDeploymentClassLoader( ldrId, meta.userVersion(), meta.deploymentMode(), false, ctx, ctxLdr, meta.classLoaderId(), meta.senderNodeId(), meta.sequenceNumber(), comm, ctx.config().getNetworkTimeout(), log, ctx.config().getPeerClassLoadingClassPathExclude(), ctx.config().getPeerClassLoadingMissedResourcesCacheSize(), meta.deploymentMode() == CONTINUOUS /* enable class byte cache in CONTINUOUS mode */); if (meta.participants() != null) for (Map.Entry<UUID, GridTuple2<GridUuid, Long>> e : meta.participants().entrySet()) clsLdr.register(e.getKey(), e.getValue().get1(), e.getValue().get2()); if (log.isDebugEnabled()) log.debug( "Created class loader in CONTINUOUS mode or without participants " + "[ldr=" + clsLdr + ", meta=" + meta + ']'); } else { assert meta.deploymentMode() == SHARED; // Create peer class loader. // Note that we are passing empty list for local P2P exclude, as it really // does not make sense with shared deployment. clsLdr = new GridDeploymentClassLoader( ldrId, meta.userVersion(), meta.deploymentMode(), false, ctx, ctxLdr, meta.participants(), comm, ctx.config().getNetworkTimeout(), log, ctx.config().getPeerClassLoadingClassPathExclude(), ctx.config().getPeerClassLoadingMissedResourcesCacheSize(), false); if (log.isDebugEnabled()) log.debug( "Created classloader in SHARED mode with participants " + "[ldr=" + clsLdr + ", meta=" + meta + ']'); } // Give this deployment a unique class loader to emphasize that this // ID is unique to this shared deployment and is not ID of loader on // sender node. SharedDeployment dep = new SharedDeployment( meta.deploymentMode(), clsLdr, ldrId, -1, meta.userVersion(), meta.alias()); if (log.isDebugEnabled()) log.debug("Created new deployment: " + dep); if (isCache) { List<SharedDeployment> deps = F.addIfAbsent(cache, meta.userVersion(), new LinkedList<SharedDeployment>()); assert deps != null; deps.add(dep); if (log.isDebugEnabled()) log.debug("Added deployment to cache: " + cache); } return dep; }
/** * Removes obsolete deployments in case of redeploy. * * @param meta Request metadata. * @return List of shares deployment. */ private GridTuple2<Boolean, SharedDeployment> checkRedeploy(GridDeploymentMetadata meta) { assert Thread.holdsLock(mux); SharedDeployment newDep = null; for (List<SharedDeployment> deps : cache.values()) { for (SharedDeployment dep : deps) { if (!dep.isUndeployed() && !dep.isPendingUndeploy()) { long undeployTimeout = ctx.config().getNetworkTimeout(); SharedDeployment doomed = null; // Only check deployments with no participants. if (!dep.hasParticipants()) { // In case of SHARED deployment it is possible to get hear if // unmarshalling happens during undeploy. In this case, we // simply don't do anything. if (dep.deployMode() == CONTINUOUS) { if (dep.existingDeployedClass(meta.className()) != null) { // Change from shared deploy to shared undeploy or user version change. // Simply remove all deployments with no participating nodes. if (meta.deploymentMode() == SHARED || !meta.userVersion().equals(dep.userVersion())) doomed = dep; } } } // If there are participants, we undeploy if class loader ID on some node changed. else if (dep.existingDeployedClass(meta.className()) != null) { GridTuple2<GridUuid, Long> ldr = dep.getClassLoaderId(meta.senderNodeId()); if (ldr != null) { if (!ldr.get1().equals(meta.classLoaderId())) { // If deployed sequence number is less, then schedule for undeployment. if (ldr.get2() < meta.sequenceNumber()) { if (log.isDebugEnabled()) log.debug( "Received request for a class with newer sequence number " + "(will schedule current class for undeployment) [newSeq=" + meta.sequenceNumber() + ", oldSeq=" + ldr.get2() + ", senderNodeId=" + meta.senderNodeId() + ", newClsLdrId=" + meta.classLoaderId() + ", oldClsLdrId=" + ldr.get1() + ']'); doomed = dep; } else if (ldr.get2() > meta.sequenceNumber()) { long time = System.currentTimeMillis() - dep.timestamp(); if (newDep == null && time < ctx.config().getNetworkTimeout()) { // Set undeployTimeout, so the class will be scheduled // for undeployment. undeployTimeout = ctx.config().getNetworkTimeout() - time; if (log.isDebugEnabled()) log.debug( "Received execution request for a stale class (will deploy and " + "schedule undeployment in " + undeployTimeout + "ms) " + "[curSeq=" + ldr.get2() + ", staleSeq=" + meta.sequenceNumber() + ", cls=" + meta.className() + ", senderNodeId=" + meta.senderNodeId() + ", curLdrId=" + ldr.get1() + ", staleLdrId=" + meta.classLoaderId() + ']'); // We got the redeployed class before the old one. // Simply create a temporary deployment for the sender node, // and schedule undeploy for it. newDep = createNewDeployment(meta, false); doomed = newDep; } else { U.warn( log, "Received execution request for a class that has been redeployed " + "(will ignore): " + meta.alias()); if (log.isDebugEnabled()) log.debug( "Received execution request for a class that has been redeployed " + "(will ignore) [alias=" + meta.alias() + ", dep=" + dep + ']'); return F.t(false, null); } } else { U.error( log, "Sequence number does not correspond to class loader ID [seqNum=" + meta.sequenceNumber() + ", dep=" + dep + ']'); return F.t(false, null); } } } } if (doomed != null) { doomed.onUndeployScheduled(); if (log.isDebugEnabled()) log.debug("Deployment was scheduled for undeploy: " + doomed); // Lifespan time. final long endTime = System.currentTimeMillis() + undeployTimeout; // Deployment to undeploy. final SharedDeployment undep = doomed; ctx.timeout() .addTimeoutObject( new GridTimeoutObject() { @Override public GridUuid timeoutId() { return undep.classLoaderId(); } @Override public long endTime() { return endTime < 0 ? Long.MAX_VALUE : endTime; } @Override public void onTimeout() { boolean removed = false; // Hot redeployment. synchronized (mux) { assert undep.isPendingUndeploy(); if (!undep.isUndeployed()) { undep.undeploy(); undep.onRemoved(); removed = true; Collection<SharedDeployment> deps = cache.get(undep.userVersion()); if (deps != null) { for (Iterator<SharedDeployment> i = deps.iterator(); i.hasNext(); ) if (i.next() == undep) i.remove(); if (deps.isEmpty()) cache.remove(undep.userVersion()); } if (log.isInfoEnabled()) log.info( "Undeployed class loader due to deployment mode change, " + "user version change, or hot redeployment: " + undep); } } // Outside synchronization. if (removed) undep.recordUndeployed(null); } }); } } } } if (newDep != null) { List<SharedDeployment> list = F.addIfAbsent(cache, meta.userVersion(), F.<SharedDeployment>newList()); assert list != null; list.add(newDep); } return F.t(true, newDep); }
/** * Records all undeployed tasks. * * @param nodeId Left node ID. * @param undeployed Undeployed deployments. */ private void recordUndeployed(@Nullable UUID nodeId, Collection<SharedDeployment> undeployed) { if (!F.isEmpty(undeployed)) for (SharedDeployment d : undeployed) d.recordUndeployed(nodeId); }