/** * JUnit. * * @throws Exception If failed. */ public void testPrepareQueue() throws Exception { // Random sequence names. String queueName1 = UUID.randomUUID().toString(); String queueName2 = UUID.randomUUID().toString(); CollectionConfiguration colCfg = config(false); IgniteQueue queue1 = grid(0).queue(queueName1, 0, colCfg); IgniteQueue queue2 = grid(0).queue(queueName2, 0, colCfg); IgniteQueue queue3 = grid(0).queue(queueName1, 0, colCfg); assertNotNull(queue1); assertNotNull(queue2); assertNotNull(queue3); assert queue1.equals(queue3); assert queue3.equals(queue1); assert !queue3.equals(queue2); queue1.close(); queue2.close(); queue3.close(); assertNull(grid(0).queue(queueName1, 0, null)); assertNull(grid(0).queue(queueName2, 0, null)); }
/** * @param meta Job metadata. * @return {@code true} If local node is participating in job execution. */ public boolean isParticipating(HadoopJobMetadata meta) { UUID locNodeId = localNodeId(); if (locNodeId.equals(meta.submitNodeId())) return true; HadoopMapReducePlan plan = meta.mapReducePlan(); return plan.mapperNodeIds().contains(locNodeId) || plan.reducerNodeIds().contains(locNodeId) || jobUpdateLeader(); }
/** * JUnit. * * @throws Exception If failed. */ public void testAddUnbounded() throws Exception { // Random queue name. String queueName = UUID.randomUUID().toString(); String val = UUID.randomUUID().toString(); IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false)); assert queue.add(val); assert val.equals(queue.poll()); }
/** * JUnit. * * @throws Exception If failed. */ public void testAddDeleteUnbounded() throws Exception { // Random queue name. String queueName = UUID.randomUUID().toString(); String val = UUID.randomUUID().toString(); IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false)); assert queue.add(val); assert queue.remove(val); assert queue.isEmpty(); }
/** * JUnit. * * @throws Exception If failed. */ public void testPutGetMultithreadUnbounded() throws Exception { // Random queue name. String queueName = UUID.randomUUID().toString(); final IgniteQueue<String> queue = grid(0).queue(queueName, QUEUE_CAPACITY, config(false)); multithreaded( new Callable<Void>() { @Override public Void call() throws Exception { String thName = Thread.currentThread().getName(); for (int i = 0; i < 5; i++) { queue.put(thName); queue.peek(); queue.take(); } return null; } }, THREAD_NUM); assert queue.isEmpty() : queue.size(); }
/** * JUnit. * * @throws Exception If failed. */ public void testGetAndIncrement() throws Exception { Collection<Long> res = new HashSet<>(); String seqName = UUID.randomUUID().toString(); for (int i = 0; i < GRID_CNT; i++) { Set<Long> retVal = compute(grid(i).cluster().forLocal()).call(new GetAndIncrementJob(seqName, RETRIES)); for (Long l : retVal) assert !res.contains(l) : "Value already was used " + l; res.addAll(retVal); } assert res.size() == GRID_CNT * RETRIES; int gapSize = 0; for (long i = 0; i < GRID_CNT * RETRIES; i++) { if (!res.contains(i)) gapSize++; else gapSize = 0; assert gapSize <= BATCH_SIZE + 1 : "Gap above id " + i + " is " + gapSize + " more than batch size: " + (BATCH_SIZE + 1); } }
/** * JUnit. * * @throws Exception If failed. */ public void testPutRemoveMultiThreadedUnbounded() throws Exception { // Random queue name. String queueName = UUID.randomUUID().toString(); final IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false)); multithreaded( new Callable<String>() { @Override public String call() throws Exception { String thread = Thread.currentThread().getName(); for (int i = 0; i < QUEUE_CAPACITY; i++) queue.put(thread); info("Finished loop 1: " + thread); queue.clear(); info("Cleared queue 1: " + thread); return ""; } }, THREAD_NUM); assert queue.isEmpty() : "Queue must be empty. " + queue.size(); }
/** {@inheritDoc} */ @SuppressWarnings({"unchecked"}) @Override public boolean equals(Object obj) { if (obj == this) return true; CancelMessageId other = (CancelMessageId) obj; return reqId == other.reqId && nodeId.equals(other.nodeId); }
/** * JUnit. * * @throws Exception If failed. */ public void testIterator() throws Exception { // Random queue name. String queueName = UUID.randomUUID().toString(); IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false)); for (int i = 0; i < 100; i++) assert queue.add(Integer.toString(i)); Iterator<String> iter1 = queue.iterator(); int cnt = 0; for (int i = 0; i < 100; i++) { assertNotNull(iter1.next()); cnt++; } assertEquals(100, queue.size()); assertEquals(100, cnt); assertNotNull(queue.take()); assertNotNull(queue.take()); assertTrue(queue.remove("33")); assertTrue(queue.remove("77")); assertEquals(96, queue.size()); Iterator<String> iter2 = queue.iterator(); try { iter2.remove(); } catch (IllegalStateException e) { info("Caught expected exception: " + e); } iter2.next(); iter2.remove(); cnt = 0; while (iter2.hasNext()) { assertNotNull(iter2.next()); cnt++; } assertEquals(95, cnt); assertEquals(95, queue.size()); iter2.remove(); }
/** * JUnit. * * @throws Exception If failed. */ public void testAddPollUnbounded() throws Exception { // Random queue name. String queueName = UUID.randomUUID().toString(); IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false)); assert queue.add("1"); assert queue.add("2"); assert queue.add("3"); assertEquals("1", queue.poll()); assertEquals("2", queue.poll()); assertEquals("3", queue.poll()); }
/** * JUnit. * * @throws Exception If failed. */ public void testPutGetUnbounded() throws Exception { // Random queue name. String queueName = UUID.randomUUID().toString(); IgniteQueue<String> queue = grid(0).queue(queueName, QUEUE_CAPACITY, config(false)); String thName = Thread.currentThread().getName(); for (int i = 0; i < 5; i++) { queue.put(thName); queue.peek(); queue.take(); } assert queue.isEmpty() : queue.size(); }
/** * JUnit. * * @throws Exception If failed. */ public void testMarshalling() throws Exception { String seqName = UUID.randomUUID().toString(); final IgniteAtomicSequence seq = grid(0).atomicSequence(seqName, 0, true); grid(1) .compute() .run( new CAX() { @Override public void applyx() { assertNotNull(seq); for (int i = 0; i < RETRIES; i++) seq.incrementAndGet(); } }); }
/** {@inheritDoc} */ @Override public void start() throws IgniteCheckedException { IpcSharedMemoryNativeLoader.load(log); pid = IpcSharedMemoryUtils.pid(); if (pid == -1) throw new IpcEndpointBindException("Failed to get PID of the current process."); if (size <= 0) throw new IpcEndpointBindException("Space size should be positive: " + size); String tokDirPath = this.tokDirPath; if (F.isEmpty(tokDirPath)) throw new IpcEndpointBindException("Token directory path is empty."); tokDirPath = tokDirPath + '/' + locNodeId.toString() + '-' + IpcSharedMemoryUtils.pid(); tokDir = U.resolveWorkDirectory(tokDirPath, false); if (port <= 0 || port >= 0xffff) throw new IpcEndpointBindException("Port value is illegal: " + port); try { srvSock = new ServerSocket(); // Always bind to loopback. srvSock.bind(new InetSocketAddress("127.0.0.1", port)); } catch (IOException e) { // Although empty socket constructor never throws exception, close it just in case. U.closeQuiet(srvSock); throw new IpcEndpointBindException( "Failed to bind shared memory IPC endpoint (is port already " + "in use?): " + port, e); } gcWorker = new GcWorker(gridName, "ipc-shmem-gc", log); new IgniteThread(gcWorker).start(); if (log.isInfoEnabled()) log.info( "IPC shared memory server endpoint started [port=" + port + ", tokDir=" + tokDir.getAbsolutePath() + ']'); }
/** * JUnit. * * @throws Exception If failed. */ public void testPutRemoveUnbounded() throws Exception { // Random queue name. String queueName = UUID.randomUUID().toString(); IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false)); String thread = Thread.currentThread().getName(); for (int i = 0; i < QUEUE_CAPACITY; i++) queue.put(thread); info("Finished loop 1: " + thread); queue.clear(); info("Cleared queue 1: " + thread); assert queue.isEmpty() : "Queue must be empty. " + queue.size(); }
/** @throws Exception if error occur. */ @SuppressWarnings("unchecked") private void checkGar() throws Exception { initGar = true; String garDir = "modules/extdata/p2p/deploy"; String garFileName = "p2p.gar"; File origGarPath = U.resolveIgnitePath(garDir + '/' + garFileName); File tmpPath = new File(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); if (!tmpPath.mkdir()) throw new IOException("Can not create temp directory"); try { File newGarFile = new File(tmpPath, garFileName); U.copy(origGarPath, newGarFile, false); assert newGarFile.exists(); try { garFile = "file:///" + tmpPath.getAbsolutePath(); try { Ignite ignite1 = startGrid(1); Ignite ignite2 = startGrid(2); Integer res = ignite1 .compute() .<UUID, Integer>execute(TASK_NAME, ignite2.cluster().localNode().id()); assert res != null; } finally { stopGrid(1); stopGrid(2); } } finally { if (newGarFile != null && !newGarFile.delete()) error("Can not delete temp gar file"); } } finally { if (!tmpPath.delete()) error("Can not delete temp directory"); } }
/** * JUnit. * * @throws Exception If failed. */ public void testAddPeekUnbounded() throws Exception { // Random queue name. String queueName = UUID.randomUUID().toString(); IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false)); String item1 = "1"; assert queue.add(item1); String item2 = "2"; assert queue.add(item2); String item3 = "3"; assert queue.add(item3); assert item1.equals(queue.peek()); assert item1.equals(queue.peek()); assert !item2.equals(queue.peek()); }
/** {@inheritDoc} */ @Override protected Object executeJob(int gridSize, String type) { log.info(">>> Starting new grid node [currGridSize=" + gridSize + ", arg=" + type + "]"); if (type == null) throw new IllegalArgumentException("Node type to start should be specified."); IgniteConfiguration cfg = getConfig(type); // Generate unique for this VM grid name. String gridName = cfg.getGridName() + " (" + UUID.randomUUID() + ")"; // Update grid name (required to be unique). cfg.setGridName(gridName); // Start new node in current VM. Ignite g = G.start(cfg); log.info( ">>> Grid started [nodeId=" + g.cluster().localNode().id() + ", name='" + g.name() + "']"); return true; }
/** {@inheritDoc} */ @Override public ClusterStartNodeResult call() { JSch ssh = new JSch(); Session ses = null; try { if (spec.key() != null) ssh.addIdentity(spec.key().getAbsolutePath()); ses = ssh.getSession(spec.username(), spec.host(), spec.port()); if (spec.password() != null) ses.setPassword(spec.password()); ses.setConfig("StrictHostKeyChecking", "no"); ses.connect(timeout); boolean win = isWindows(ses); char separator = win ? '\\' : '/'; spec.fixPaths(separator); String igniteHome = spec.igniteHome(); if (igniteHome == null) igniteHome = win ? DFLT_IGNITE_HOME_WIN : DFLT_IGNITE_HOME_LINUX; String script = spec.script(); if (script == null) script = DFLT_SCRIPT_LINUX; String cfg = spec.configuration(); if (cfg == null) cfg = ""; String startNodeCmd; String scriptOutputFileName = FILE_NAME_DATE_FORMAT.format(new Date()) + '-' + UUID.randomUUID().toString().substring(0, 8) + ".log"; if (win) throw new UnsupportedOperationException( "Apache Ignite cannot be auto-started on Windows from IgniteCluster.startNodes(…) API."); else { // Assume Unix. int spaceIdx = script.indexOf(' '); String scriptPath = spaceIdx > -1 ? script.substring(0, spaceIdx) : script; String scriptArgs = spaceIdx > -1 ? script.substring(spaceIdx + 1) : ""; String rmtLogArgs = buildRemoteLogArguments(spec.username(), spec.host()); String tmpDir = env(ses, "$TMPDIR", "/tmp/"); String scriptOutputDir = tmpDir + "ignite-startNodes"; shell(ses, "mkdir " + scriptOutputDir); // Mac os don't support ~ in double quotes. Trying get home path from remote system. if (igniteHome.startsWith("~")) { String homeDir = env(ses, "$HOME", "~"); igniteHome = igniteHome.replaceFirst("~", homeDir); } startNodeCmd = new SB() . // Console output is consumed, started nodes must use Ignite file appenders for log. a("nohup ") .a("\"") .a(igniteHome) .a('/') .a(scriptPath) .a("\"") .a(" ") .a(scriptArgs) .a(!cfg.isEmpty() ? " \"" : "") .a(cfg) .a(!cfg.isEmpty() ? "\"" : "") .a(rmtLogArgs) .a(" > ") .a(scriptOutputDir) .a("/") .a(scriptOutputFileName) .a(" 2>& 1 &") .toString(); } info("Starting remote node with SSH command: " + startNodeCmd, spec.logger(), log); shell(ses, startNodeCmd); return new ClusterStartNodeResultImpl(spec.host(), true, null); } catch (IgniteInterruptedCheckedException e) { return new ClusterStartNodeResultImpl(spec.host(), false, e.getMessage()); } catch (Exception e) { return new ClusterStartNodeResultImpl(spec.host(), false, X.getFullStackTrace(e)); } finally { if (ses != null && ses.isConnected()) ses.disconnect(); } }
/** {@inheritDoc} */ @Override public int hashCode() { return 31 * ((int) (reqId ^ (reqId >>> 32))) + nodeId.hashCode(); }
/** * @param updateSeq Update sequence. * @return Checks if any of the local partitions need to be evicted. */ private boolean checkEvictions(long updateSeq) { assert lock.isWriteLockedByCurrentThread(); boolean changed = false; UUID locId = cctx.nodeId(); for (GridDhtLocalPartition part : locParts.values()) { GridDhtPartitionState state = part.state(); if (state.active()) { int p = part.id(); List<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer); if (!affNodes.contains(cctx.localNode())) { Collection<UUID> nodeIds = F.nodeIds(nodes(p, topVer, OWNING)); // If all affinity nodes are owners, then evict partition from local node. if (nodeIds.containsAll(F.nodeIds(affNodes))) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug("Evicted local partition (all affinity nodes are owners): " + part); } else { int ownerCnt = nodeIds.size(); int affCnt = affNodes.size(); if (ownerCnt > affCnt) { List<ClusterNode> sorted = new ArrayList<>(cctx.discovery().nodes(nodeIds)); // Sort by node orders in ascending order. Collections.sort(sorted, CU.nodeComparator(true)); int diff = sorted.size() - affCnt; for (int i = 0; i < diff; i++) { ClusterNode n = sorted.get(i); if (locId.equals(n.id())) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug( "Evicted local partition (this node is oldest non-affinity node): " + part); break; } } } } } } } return changed; }
/** * Updates value for single partition. * * @param p Partition. * @param nodeId Node ID. * @param state State. * @param updateSeq Update sequence. */ @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"}) private void updateLocal(int p, UUID nodeId, GridDhtPartitionState state, long updateSeq) { assert lock.isWriteLockedByCurrentThread(); assert nodeId.equals(cctx.nodeId()); // In case if node joins, get topology at the time of joining node. ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx.shared(), topVer); assert oldest != null; // If this node became the oldest node. if (oldest.id().equals(cctx.nodeId())) { long seq = node2part.updateSequence(); if (seq != updateSeq) { if (seq > updateSeq) { if (this.updateSeq.get() < seq) { // Update global counter if necessary. boolean b = this.updateSeq.compareAndSet(this.updateSeq.get(), seq + 1); assert b : "Invalid update sequence [updateSeq=" + updateSeq + ", seq=" + seq + ", curUpdateSeq=" + this.updateSeq.get() + ", node2part=" + node2part.toFullString() + ']'; updateSeq = seq + 1; } else updateSeq = seq; } node2part.updateSequence(updateSeq); } } GridDhtPartitionMap map = node2part.get(nodeId); if (map == null) node2part.put( nodeId, map = new GridDhtPartitionMap( nodeId, updateSeq, Collections.<Integer, GridDhtPartitionState>emptyMap(), false)); map.updateSequence(updateSeq); map.put(p, state); Set<UUID> ids = part2node.get(p); if (ids == null) part2node.put(p, ids = U.newHashSet(3)); ids.add(nodeId); }
/** * JUnit. * * @throws Exception If failed. */ public void testQueueRemoveMultithreadBounded() throws Exception { // Random queue name. final String queueName = UUID.randomUUID().toString(); final IgniteQueue<String> queue = grid(0).queue(queueName, QUEUE_CAPACITY, config(false)); final CountDownLatch putLatch = new CountDownLatch(THREAD_NUM); final CountDownLatch clearLatch = new CountDownLatch(THREAD_NUM); for (int t = 0; t < THREAD_NUM; t++) { Thread th = new Thread( new Runnable() { @Override public void run() { if (log.isDebugEnabled()) log.debug("Thread has been started." + Thread.currentThread().getName()); try { // Thread must be blocked on put operation. for (int i = 0; i < (QUEUE_CAPACITY * THREAD_NUM); i++) queue.offer("anything", 3, TimeUnit.MINUTES); fail("Queue failed"); } catch (IgniteException | IllegalStateException e) { putLatch.countDown(); assert e.getMessage().contains("removed"); assert queue.removed(); } if (log.isDebugEnabled()) log.debug("Thread has been stopped." + Thread.currentThread().getName()); } }); th.start(); } for (int t = 0; t < THREAD_NUM; t++) { Thread th = new Thread( new Runnable() { @Override public void run() { try { IgniteQueue<String> queue = grid(0).queue(queueName, 0, null); if (queue != null) queue.close(); } catch (Exception e) { fail("Unexpected exception: " + e); } finally { clearLatch.countDown(); } } }); th.start(); } assert putLatch.await(3, TimeUnit.MINUTES); assert clearLatch.await(3, TimeUnit.MINUTES); try { assert queue.isEmpty() : queue.size(); fail("Queue must be removed."); } catch (IgniteException | IllegalStateException e) { assert e.getMessage().contains("removed"); assert queue.removed(); } }
/** * JUnit. * * @throws Exception If failed. */ public void testCollectionMethods() throws Exception { // Random queue name. String queueName = UUID.randomUUID().toString(); IgniteQueue<SameHashItem> queue = grid(0).queue(queueName, 0, config(false)); int retries = 100; // Initialize queue. for (int i = 0; i < retries; i++) queue.addAll( Arrays.asList( new SameHashItem(Integer.toString(i)), new SameHashItem(Integer.toString(i)))); // Get arrays from queue. assertEquals(retries * 2, queue.toArray().length); SameHashItem[] arr2 = new SameHashItem[retries * 3]; Object[] arr3 = queue.toArray(arr2); assertEquals(arr2, arr3); assertEquals(arr3[0], new SameHashItem("0")); // Check queue items. assertEquals(retries * 2, queue.size()); assertTrue(queue.contains(new SameHashItem(Integer.toString(14)))); assertFalse(queue.contains(new SameHashItem(Integer.toString(144)))); Collection<SameHashItem> col1 = Arrays.asList( new SameHashItem(Integer.toString(14)), new SameHashItem(Integer.toString(14)), new SameHashItem(Integer.toString(18))); assertTrue(queue.containsAll(col1)); Collection<SameHashItem> col2 = Arrays.asList( new SameHashItem(Integer.toString(245)), new SameHashItem(Integer.toString(14)), new SameHashItem(Integer.toString(18))); assertFalse(queue.containsAll(col2)); // Try to remove item. assertTrue(queue.remove(new SameHashItem(Integer.toString(14)))); assertEquals((retries * 2) - 1, queue.size()); assertTrue(queue.contains(new SameHashItem(Integer.toString(14)))); assertTrue(queue.remove(new SameHashItem(Integer.toString(14)))); assertEquals((retries - 1) * 2, queue.size()); assertFalse(queue.remove(new SameHashItem(Integer.toString(14)))); // Try to remove some items. assertTrue(queue.contains(new SameHashItem(Integer.toString(33)))); assertTrue( queue.removeAll( Arrays.asList( new SameHashItem(Integer.toString(15)), new SameHashItem(Integer.toString(14)), new SameHashItem(Integer.toString(33)), new SameHashItem(Integer.toString(1))))); assertFalse(queue.contains(new SameHashItem(Integer.toString(33)))); // Try to retain all items. assertTrue( queue.retainAll( Arrays.asList( new SameHashItem(Integer.toString(15)), new SameHashItem(Integer.toString(14)), new SameHashItem(Integer.toString(33)), new SameHashItem(Integer.toString(1))))); assertFalse(queue.contains(new SameHashItem(Integer.toString(2)))); assert queue.isEmpty(); }