/** @throws Exception If failed. */ public void testCreateFileColocated() throws Exception { GridGgfsPath path = new GridGgfsPath("/colocated"); UUID uuid = UUID.randomUUID(); GridUuid affKey; long idx = 0; while (true) { affKey = new GridUuid(uuid, idx); if (grid(0).mapKeyToNode(DATA_CACHE_NAME, affKey).id().equals(grid(0).localNode().id())) break; idx++; } try (GridGgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) { // Write 5M, should be enough to test distribution. for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]); } GridGgfsFile info = fs.info(path); Collection<GridGgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length()); assertEquals(1, affNodes.size()); Collection<UUID> nodeIds = F.first(affNodes).nodeIds(); assertEquals(1, nodeIds.size()); assertEquals(grid(0).localNode().id(), F.first(nodeIds)); }
/** * @param nodes Nodes. * @param id ID. * @throws IgniteCheckedException If failed. */ private void sendAllPartitions( Collection<? extends ClusterNode> nodes, GridDhtPartitionExchangeId id) throws IgniteCheckedException { GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(id, lastVer.get(), id.topologyVersion()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (!cacheCtx.isLocal()) { AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion(); boolean ready = startTopVer == null || startTopVer.compareTo(id.topologyVersion()) <= 0; if (ready) m.addFullPartitionsMap(cacheCtx.cacheId(), cacheCtx.topology().partitionMap(true)); } } // It is important that client topologies be added after contexts. for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) m.addFullPartitionsMap(top.cacheId(), top.partitionMap(true)); if (log.isDebugEnabled()) log.debug( "Sending full partition map [nodeIds=" + F.viewReadOnly(nodes, F.node2id()) + ", exchId=" + exchId + ", msg=" + m + ']'); cctx.io().safeSend(nodes, m, SYSTEM_POOL, null); }
/** @param node Node to remove. */ public void removeMappedNode(GridNode node) { if (mappedDhtNodes.contains(node)) mappedDhtNodes = new ArrayList<>(F.view(mappedDhtNodes, F.notEqualTo(node))); if (mappedNearNodes != null && mappedNearNodes.contains(node)) mappedNearNodes = new ArrayList<>(F.view(mappedNearNodes, F.notEqualTo(node))); }
/** @throws Exception If failed. */ public void testAffinityPut() throws Exception { Thread.sleep(2 * TOP_REFRESH_FREQ); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT); GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME); GridClientCompute compute = client.compute(); for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i)); for (int i = 0; i < 100; i++) { String key = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id(); assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key)); assertEquals(primaryNodeId, partitioned.affinity(key)); // Must go to primary node only. Since backup count is 0, value must present on // primary node only. partitioned.put(key, "val" + key); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key); if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val); else assertNull(val); } } // Now check that we will see value in near cache in pinned mode. for (int i = 100; i < 200; i++) { String pinnedKey = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id(); UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId))); GridClientNode node = compute.node(pinnedNodeId); partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey); if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey())) assertEquals("val" + pinnedKey, val); else assertNull(val); } } }
/** * Checks for explicit events configuration. * * @param ignite Grid instance. * @return {@code true} if all task events explicitly specified in configuration. */ public static boolean checkExplicitTaskMonitoring(Ignite ignite) { int[] evts = ignite.configuration().getIncludeEventTypes(); if (F.isEmpty(evts)) return false; for (int evt : VISOR_TASK_EVTS) { if (!F.contains(evts, evt)) return false; } return true; }
/** * @param cacheId Cache ID. * @return {@code True} if local client has been added. */ public boolean isLocalClientAdded(int cacheId) { if (!F.isEmpty(reqs)) { for (DynamicCacheChangeRequest req : reqs) { if (req.start() && F.eq(req.initiatingNodeId(), cctx.localNodeId())) { if (CU.cacheId(req.cacheName()) == cacheId) return true; } } } return false; }
/** * @param cacheId Cache ID to check. * @param topVer Topology version. * @return {@code True} if cache was added during this exchange. */ public boolean isCacheAdded(int cacheId, AffinityTopologyVersion topVer) { if (!F.isEmpty(reqs)) { for (DynamicCacheChangeRequest req : reqs) { if (req.start() && !req.clientStartOnly()) { if (CU.cacheId(req.cacheName()) == cacheId) return true; } } } GridCacheContext<?, ?> cacheCtx = cctx.cacheContext(cacheId); return cacheCtx != null && F.eq(cacheCtx.startTopologyVersion(), topVer); }
/** @throws Exception If failed. */ public void testTopologyListener() throws Exception { final Collection<UUID> added = new ArrayList<>(1); final Collection<UUID> rmvd = new ArrayList<>(1); final CountDownLatch addedLatch = new CountDownLatch(1); final CountDownLatch rmvLatch = new CountDownLatch(1); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); GridClientTopologyListener lsnr = new GridClientTopologyListener() { @Override public void onNodeAdded(GridClientNode node) { added.add(node.nodeId()); addedLatch.countDown(); } @Override public void onNodeRemoved(GridClientNode node) { rmvd.add(node.nodeId()); rmvLatch.countDown(); } }; client.addTopologyListener(lsnr); try { Grid g = startGrid(NODES_CNT + 1); UUID id = g.localNode().id(); assertTrue(addedLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, added.size()); assertEquals(id, F.first(added)); stopGrid(NODES_CNT + 1); assertTrue(rmvLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, rmvd.size()); assertEquals(id, F.first(rmvd)); } finally { client.removeTopologyListener(lsnr); stopGrid(NODES_CNT + 1); } }
/** * @param mapping Mapping to order. * @param committedVers Committed versions. * @param rolledbackVers Rolled back versions. */ void orderCompleted( GridDistributedTxMapping<K, V> mapping, Collection<GridCacheVersion> committedVers, Collection<GridCacheVersion> rolledbackVers) { for (GridCacheTxEntry<K, V> txEntry : F.concat(false, mapping.reads(), mapping.writes())) { while (true) { GridDistributedCacheEntry<K, V> entry = (GridDistributedCacheEntry<K, V>) txEntry.cached(); try { // Handle explicit locks. GridCacheVersion base = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer; entry.doneRemote(xidVer, base, committedVers, rolledbackVers); if (ec()) entry.recheck(); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null; if (log.isDebugEnabled()) log.debug( "Replacing obsolete entry in remote transaction [entry=" + entry + ", tx=" + this + ']'); // Replace the entry. txEntry.cached(cctx.cache().entryEx(txEntry.key()), entry.keyBytes()); } } } }
/** * @param pid PID of the other party. * @param size Size of the space. * @return Token pair. */ private IgnitePair<String> inOutToken(int pid, int size) { while (true) { long idx = tokIdxGen.get(); if (tokIdxGen.compareAndSet(idx, idx + 2)) return F.pair( new File(tokDir, TOKEN_FILE_NAME + idx + "-" + pid + "-" + size).getAbsolutePath(), new File(tokDir, TOKEN_FILE_NAME + (idx + 1) + "-" + pid + "-" + size) .getAbsolutePath()); } }
/** * Starts dynamic caches. * * @throws IgniteCheckedException If failed. */ private void startCaches() throws IgniteCheckedException { cctx.cache() .prepareCachesStart( F.view( reqs, new IgnitePredicate<DynamicCacheChangeRequest>() { @Override public boolean apply(DynamicCacheChangeRequest req) { return req.start(); } }), exchId.topologyVersion()); }
/** * Run command in separated console. * * @param workFolder Work folder for command. * @param args A string array containing the program and its arguments. * @return Started process. * @throws IOException If failed to start process. */ public static Process openInConsole(@Nullable File workFolder, String... args) throws IOException { String[] commands = args; String cmd = F.concat(Arrays.asList(args), " "); if (U.isWindows()) commands = F.asArray("cmd", "/c", String.format("start %s", cmd)); if (U.isMacOs()) commands = F.asArray( "osascript", "-e", String.format("tell application \"Terminal\" to do script \"%s\"", cmd)); if (U.isUnix()) commands = F.asArray("xterm", "-sl", "1024", "-geometry", "200x50", "-e", cmd); ProcessBuilder pb = new ProcessBuilder(commands); if (workFolder != null) pb.directory(workFolder); return pb.start(); }
/** @return Involved nodes. */ @Override public Collection<? extends GridNode> nodes() { return F.viewReadOnly( futures(), new GridClosure<GridFuture<?>, GridRichNode>() { @Nullable @Override public GridRichNode apply(GridFuture<?> f) { if (isMini(f)) return ((MiniFuture) f).node(); return cctx.rich().rich(cctx.discovery().localNode()); } }); }
/** * @param cacheId Cache ID to check. * @return {@code True} if cache is stopping by this exchange. */ private boolean stopping(int cacheId) { boolean stopping = false; if (!F.isEmpty(reqs)) { for (DynamicCacheChangeRequest req : reqs) { if (cacheId == CU.cacheId(req.cacheName())) { stopping = req.stop(); break; } } } return stopping; }
/** @param mappings Mappings. */ void addEntryMapping(@Nullable Map<UUID, GridDistributedTxMapping<K, V>> mappings) { if (!F.isEmpty(mappings)) { this.mappings.putAll(mappings); if (log.isDebugEnabled()) log.debug( "Added mappings to transaction [locId=" + cctx.nodeId() + ", mappings=" + mappings + ", tx=" + this + ']'); } }
/** * @param nodeId Node ID. * @param retryCnt Number of retries. */ private void sendAllPartitions(final UUID nodeId, final int retryCnt) { ClusterNode n = cctx.node(nodeId); try { if (n != null) sendAllPartitions(F.asList(n), exchId); } catch (IgniteCheckedException e) { if (e instanceof ClusterTopologyCheckedException || !cctx.discovery().alive(n)) { log.debug( "Failed to send full partition map to node, node left grid " + "[rmtNode=" + nodeId + ", exchangeId=" + exchId + ']'); return; } if (retryCnt > 0) { long timeout = cctx.gridConfig().getNetworkSendRetryDelay(); LT.error( log, e, "Failed to send full partition map to node (will retry after timeout) " + "[node=" + nodeId + ", exchangeId=" + exchId + ", timeout=" + timeout + ']'); cctx.time() .addTimeoutObject( new GridTimeoutObjectAdapter(timeout) { @Override public void onTimeout() { sendAllPartitions(nodeId, retryCnt - 1); } }); } else U.error( log, "Failed to send full partition map [node=" + n + ", exchangeId=" + exchId + ']', e); } }
/** * @param cctx Context. * @param tx Transaction. * @param commit Commit flag. */ public GridNearTxFinishFuture( GridCacheContext<K, V> cctx, GridNearTxLocal<K, V> tx, boolean commit) { super(cctx.kernalContext(), F.<GridCacheTx>identityReducer(tx)); assert cctx != null; this.cctx = cctx; this.tx = tx; this.commit = commit; mappings = tx.mappings(); futId = GridUuid.randomUuid(); log = U.logger(ctx, logRef, GridNearTxFinishFuture.class); }
/** {@inheritDoc} */ @Override public void isolated(boolean isolated) throws GridException { if (isolated()) return; GridNode node = F.first(ctx.grid().forCache(cacheName).nodes()); if (node == null) throw new GridException("Failed to get node for cache: " + cacheName); GridCacheAttributes a = U.cacheAttributes(node, cacheName); assert a != null; updater = a.atomicityMode() == GridCacheAtomicityMode.ATOMIC ? GridDataLoadCacheUpdaters.<K, V>batched() : GridDataLoadCacheUpdaters.<K, V>groupLocked(); }
/** {@inheritDoc} */ @Override public void start() throws IgniteCheckedException { IpcSharedMemoryNativeLoader.load(log); pid = IpcSharedMemoryUtils.pid(); if (pid == -1) throw new IpcEndpointBindException("Failed to get PID of the current process."); if (size <= 0) throw new IpcEndpointBindException("Space size should be positive: " + size); String tokDirPath = this.tokDirPath; if (F.isEmpty(tokDirPath)) throw new IpcEndpointBindException("Token directory path is empty."); tokDirPath = tokDirPath + '/' + locNodeId.toString() + '-' + IpcSharedMemoryUtils.pid(); tokDir = U.resolveWorkDirectory(tokDirPath, false); if (port <= 0 || port >= 0xffff) throw new IpcEndpointBindException("Port value is illegal: " + port); try { srvSock = new ServerSocket(); // Always bind to loopback. srvSock.bind(new InetSocketAddress("127.0.0.1", port)); } catch (IOException e) { // Although empty socket constructor never throws exception, close it just in case. U.closeQuiet(srvSock); throw new IpcEndpointBindException( "Failed to bind shared memory IPC endpoint (is port already " + "in use?): " + port, e); } gcWorker = new GcWorker(gridName, "ipc-shmem-gc", log); new IgniteThread(gcWorker).start(); if (log.isInfoEnabled()) log.info( "IPC shared memory server endpoint started [port=" + port + ", tokDir=" + tokDir.getAbsolutePath() + ']'); }
/** {@inheritDoc} */ @Override public void testRemoteNodes() throws Exception { int size = remoteNodeIds().size(); String name = "oneMoreGrid"; try { Ignite g = startGrid(name); UUID joinedId = g.cluster().localNode().id(); assert projection().forRemotes().nodes().size() == size + 1; assert F.nodeIds(projection().forRemotes().nodes()).contains(joinedId); } finally { stopGrid(name); } }
/** * Finds all files in folder and in it's sub-tree of specified depth. * * @param file Starting folder * @param maxDepth Depth of the tree. If 1 - just look in the folder, no sub-folders. * @param filter file filter. * @return List of found files. */ public static List<VisorLogFile> fileTree(File file, int maxDepth, @Nullable FileFilter filter) { if (file.isDirectory()) { File[] files = (filter == null) ? file.listFiles() : file.listFiles(filter); if (files == null) return Collections.emptyList(); List<VisorLogFile> res = new ArrayList<>(files.length); for (File f : files) { if (f.isFile() && f.length() > 0) res.add(new VisorLogFile(f)); else if (maxDepth > 1) res.addAll(fileTree(f, maxDepth - 1, filter)); } return res; } return F.asList(new VisorLogFile(file)); }
/** @return {@code True} if succeeded. */ private boolean spreadPartitions() { try { sendAllPartitions(rmtNodes, exchId); return true; } catch (IgniteCheckedException e) { scheduleRecheck(); if (!X.hasCause(e, InterruptedException.class)) U.error( log, "Failed to send full partition map to nodes (will retry after timeout) [nodes=" + F.nodeId8s(rmtNodes) + ", exchangeId=" + exchId + ']', e); return false; } }
/** {@inheritDoc} */ @Override public GridFuture<?> addData(Map.Entry<K, V> entry) throws GridException, IllegalStateException { A.notNull(entry, "entry"); return addData(F.asList(entry)); }
/** Clears values for this partition. */ private void clearAll() { GridCacheVersion clearVer = cctx.versions().next(); boolean swap = cctx.isSwapOrOffheapEnabled(); boolean rec = cctx.events().isRecordable(EVT_CACHE_REBALANCE_OBJECT_UNLOADED); Iterator<GridDhtCacheEntry> it = map.values().iterator(); GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> swapIt = null; if (swap && GridQueryProcessor.isEnabled(cctx.config())) { // Indexing needs to unswap cache values. Iterator<GridDhtCacheEntry> unswapIt = null; try { swapIt = cctx.swap().iterator(id); unswapIt = unswapIterator(swapIt); } catch (Exception e) { U.error(log, "Failed to clear swap for evicted partition: " + this, e); } if (unswapIt != null) it = F.concat(it, unswapIt); } try { while (it.hasNext()) { GridDhtCacheEntry cached = it.next(); try { if (cached.clearInternal(clearVer, swap)) { map.remove(cached.key(), cached); if (!cached.isInternal()) { mapPubSize.decrement(); if (rec) cctx.events() .addEvent( cached.partition(), cached.key(), cctx.localNodeId(), (IgniteUuid) null, null, EVT_CACHE_REBALANCE_OBJECT_UNLOADED, null, false, cached.rawGet(), cached.hasValue(), null, null, null); } } } catch (IgniteCheckedException e) { U.error(log, "Failed to clear cache entry for evicted partition: " + cached, e); } } } finally { U.close(swapIt, log); } }
/** @throws Exception If failed. */ public void testCreateFileFragmented() throws Exception { GridGgfsEx impl = (GridGgfsEx) grid(0).ggfs("ggfs"); GridGgfsFragmentizerManager fragmentizer = impl.context().fragmentizer(); GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false); GridGgfsPath path = new GridGgfsPath("/file"); try { GridGgfs fs0 = grid(0).ggfs("ggfs"); GridGgfs fs1 = grid(1).ggfs("ggfs"); GridGgfs fs2 = grid(2).ggfs("ggfs"); try (GridGgfsOutputStream out = fs0.create( path, 128, false, 1, CFG_GRP_SIZE, F.asMap(GridGgfs.PROP_PREFER_LOCAL_WRITES, "true"))) { // 1.5 blocks byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2]; Arrays.fill(data, (byte) 1); out.write(data); } try (GridGgfsOutputStream out = fs1.append(path, false)) { // 1.5 blocks. byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2]; Arrays.fill(data, (byte) 2); out.write(data); } // After this we should have first two block colocated with grid 0 and last block colocated // with grid 1. GridGgfsFileImpl fileImpl = (GridGgfsFileImpl) fs.info(path); GridCache<Object, Object> metaCache = grid(0).cachex(META_CACHE_NAME); GridGgfsFileInfo fileInfo = (GridGgfsFileInfo) metaCache.get(fileImpl.fileId()); GridGgfsFileMap map = fileInfo.fileMap(); List<GridGgfsFileAffinityRange> ranges = map.ranges(); assertEquals(2, ranges.size()); assertTrue(ranges.get(0).startOffset() == 0); assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1); assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE); assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1); // Validate data read after colocated writes. try (GridGgfsInputStream in = fs2.open(path)) { // Validate first part of file. for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read()); // Validate second part of file. for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read()); assertEquals(-1, in.read()); } } finally { GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true); boolean hasData = false; for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(DATA_CACHE_NAME).isEmpty(); assertTrue(hasData); fs.delete(path, true); } GridTestUtils.retryAssert( log, ASSERT_RETRIES, ASSERT_RETRY_INTERVAL, new CAX() { @Override public void applyx() { for (int i = 0; i < NODES_CNT; i++) assertTrue(grid(i).cachex(DATA_CACHE_NAME).isEmpty()); } }); }
/** {@inheritDoc} */ @Override public Map<String, Object> getNodeAttributes() throws GridSpiException { return F.<String, Object>asMap( createSpiAttributeName(PRIORITY_ATTRIBUTE_KEY), getPriorityAttributeKey()); }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
/** {@inheritDoc} */ @Override public void onCollision( Collection<GridCollisionJobContext> waitJobs, Collection<GridCollisionJobContext> activeJobs) { assert waitJobs != null; assert activeJobs != null; int activeSize = F.size(activeJobs, RUNNING_JOBS); waitingCnt.set(waitJobs.size()); runningCnt.set(activeSize); heldCnt.set(activeJobs.size() - activeSize); int waitSize = waitJobs.size(); int activateCnt = parallelJobsNum - activeSize; if (activateCnt > 0 && !waitJobs.isEmpty()) { if (waitJobs.size() <= activateCnt) { for (GridCollisionJobContext waitJob : waitJobs) { waitJob.activate(); waitSize--; } } else { List<GridCollisionJobContext> passiveList = new ArrayList<GridCollisionJobContext>(waitJobs); Collections.sort( passiveList, new Comparator<GridCollisionJobContext>() { /** {@inheritDoc} */ @Override public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) { int p1 = getJobPriority(o1); int p2 = getJobPriority(o2); return p1 < p2 ? 1 : p1 == p2 ? 0 : -1; } }); if (preventStarvation) bumpPriority(waitJobs, passiveList); for (int i = 0; i < activateCnt; i++) { passiveList.get(i).activate(); waitSize--; } } } if (waitSize > waitJobsNum) { List<GridCollisionJobContext> waitList = new ArrayList<GridCollisionJobContext>(waitJobs); // Put jobs with highest priority first. Collections.sort( waitList, new Comparator<GridCollisionJobContext>() { /** {@inheritDoc} */ @Override public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) { int p1 = getJobPriority(o1); int p2 = getJobPriority(o2); return p1 < p2 ? 1 : p1 == p2 ? 0 : -1; } }); int skip = waitJobs.size() - waitSize; int i = 0; for (GridCollisionJobContext waitCtx : waitList) { if (++i >= skip) { waitCtx.cancel(); if (--waitSize <= waitJobsNum) break; } } } }
/** * Initializes store. * * @throws GridException If failed to initialize. */ private void init() throws GridException { if (initGuard.compareAndSet(false, true)) { if (log.isDebugEnabled()) log.debug("Initializing cache store."); try { if (sesFactory != null) // Session factory has been provided - nothing to do. return; if (!F.isEmpty(hibernateCfgPath)) { try { URL url = new URL(hibernateCfgPath); sesFactory = new Configuration().configure(url).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using URL: " + url); // Session factory has been successfully initialized. return; } catch (MalformedURLException e) { if (log.isDebugEnabled()) log.debug("Caught malformed URL exception: " + e.getMessage()); } // Provided path is not a valid URL. File? File cfgFile = new File(hibernateCfgPath); if (cfgFile.exists()) { sesFactory = new Configuration().configure(cfgFile).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using file: " + hibernateCfgPath); // Session factory has been successfully initialized. return; } // Provided path is not a file. Classpath resource? sesFactory = new Configuration().configure(hibernateCfgPath).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using classpath resource: " + hibernateCfgPath); } else { if (hibernateProps == null) { U.warn( log, "No Hibernate configuration has been provided for store (will use default)."); hibernateProps = new Properties(); hibernateProps.setProperty("hibernate.connection.url", DFLT_CONN_URL); hibernateProps.setProperty("hibernate.show_sql", DFLT_SHOW_SQL); hibernateProps.setProperty("hibernate.hbm2ddl.auto", DFLT_HBM2DDL_AUTO); } Configuration cfg = new Configuration(); cfg.setProperties(hibernateProps); assert resourceAvailable(MAPPING_RESOURCE); cfg.addResource(MAPPING_RESOURCE); sesFactory = cfg.buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using properties: " + hibernateProps); } } catch (HibernateException e) { throw new GridException("Failed to initialize store.", e); } finally { initLatch.countDown(); } } else if (initLatch.getCount() > 0) U.await(initLatch); if (sesFactory == null) throw new GridException("Cache store was not properly initialized."); }
/** @return Remaining node IDs. */ Collection<UUID> remaining() { if (rmtIds == null) return Collections.emptyList(); return F.lose(rmtIds, true, rcvdIds); }