/* * Launches against the agent& main */ public void testAgentAndMain() throws Exception { Project project = workspace.getProject("p1"); Run bndrun = new Run(workspace, project.getBase(), project.getFile("one.bndrun")); bndrun.setProperty("-runpath", "biz.aQute.remote.launcher"); bndrun.setProperty("-runbundles", "bsn-1,bsn-2"); bndrun.setProperty("-runremote", "agent,main;agent=1090"); final RemoteProjectLauncherPlugin pl = (RemoteProjectLauncherPlugin) bndrun.getProjectLauncher(); pl.prepare(); List<? extends RunSession> sessions = pl.getRunSessions(); assertEquals(2, sessions.size()); RunSession agent = sessions.get(0); RunSession main = sessions.get(1); CountDownLatch agentLatch = launch(agent); CountDownLatch mainLatch = launch(main); agent.waitTillStarted(1000); main.waitTillStarted(1000); Thread.sleep(500); agent.cancel(); main.cancel(); agentLatch.await(); mainLatch.await(); assertEquals(-3, agent.getExitCode()); assertEquals(-3, main.getExitCode()); bndrun.close(); }
/** {@inheritDoc} */ @Override public Serializable execute() { int arg = this.<Integer>argument(0); try { if (log.isInfoEnabled()) log.info("Executing job [job=" + this + ", arg=" + arg + ']'); startSignal.countDown(); try { if (!startSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS)) fail(); if (arg == 1) { if (log.isInfoEnabled()) log.info("Job one is proceeding."); } else Thread.sleep(WAIT_TIME); } catch (InterruptedException e) { if (log.isInfoEnabled()) log.info("Job got cancelled [arg=" + arg + ", ses=" + ses + ", e=" + e + ']'); return 0; } if (log.isInfoEnabled()) log.info("Completing job: " + ses); return argument(0); } finally { stopSignal.countDown(); processedCnt++; } }
@Test public void testPublishLast() throws InterruptedException { final AtomicInteger count = new AtomicInteger(); ConnectableObservable<String> connectable = Observable.<String>create( observer -> { observer.onSubscribe(EmptySubscription.INSTANCE); count.incrementAndGet(); new Thread( () -> { observer.onNext("first"); observer.onNext("last"); observer.onComplete(); }) .start(); }) .takeLast(1) .publish(); // subscribe once final CountDownLatch latch = new CountDownLatch(1); connectable.subscribe( value -> { assertEquals("last", value); latch.countDown(); }); // subscribe twice connectable.subscribe(); Disposable subscription = connectable.connect(); assertTrue(latch.await(1000, TimeUnit.MILLISECONDS)); assertEquals(1, count.get()); subscription.dispose(); }
@Test public void testSubmitToMemberRunnable() throws InterruptedException { final int k = simpleTestNodeCount; TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(k); final HazelcastInstance[] instances = factory.newInstances(new Config()); final AtomicInteger count = new AtomicInteger(0); final CountDownLatch latch = new CountDownLatch(k); final ExecutionCallback callback = new ExecutionCallback() { public void onResponse(Object response) { if (response == null) { count.incrementAndGet(); } latch.countDown(); } public void onFailure(Throwable t) {} }; for (int i = 0; i < k; i++) { final HazelcastInstance instance = instances[i]; final IExecutorService service = instance.getExecutorService("testSubmitToMemberRunnable"); final String script = "if(!hazelcast.getCluster().getLocalMember().equals(member)) " + "hazelcast.getAtomicLong('testSubmitToMemberRunnable').incrementAndGet();"; final HashMap map = new HashMap(); map.put("member", instance.getCluster().getLocalMember()); service.submitToMember( new ScriptRunnable(script, map), instance.getCluster().getLocalMember(), callback); } latch.await(10, TimeUnit.SECONDS); assertEquals(0, instances[0].getAtomicLong("testSubmitToMemberRunnable").get()); assertEquals(k, count.get()); }
/** @throws Exception If failed. */ @SuppressWarnings("unchecked") public void testCancel() throws Exception { Grid grid = G.grid(getTestGridName()); grid.compute() .localDeployTask(GridCancelTestTask.class, U.detectClassLoader(GridCancelTestTask.class)); GridComputeTaskFuture<?> fut = grid.compute().execute(GridCancelTestTask.class.getName(), null); // Wait until jobs begin execution. boolean await = startSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS); assert await : "Jobs did not start."; info("Test task result: " + fut); assert fut != null; // Only first job should successfully complete. Object res = fut.get(); assert (Integer) res == 1; // Wait for all jobs to finish. await = stopSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS); assert await : "Jobs did not stop."; // One is definitely processed. But there might be some more processed or cancelled or processed // and cancelled. // Thus total number should be at least SPLIT_COUNT and at most (SPLIT_COUNT - 1) *2 +1 assert (cancelCnt + processedCnt) >= SPLIT_COUNT && (cancelCnt + processedCnt) <= (SPLIT_COUNT - 1) * 2 + 1 : "Invalid cancel count value: " + cancelCnt; }
@Test public void testSubmitToKeyOwnerCallable() throws Exception { final int k = simpleTestNodeCount; TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(k); final HazelcastInstance[] instances = factory.newInstances(new Config()); final AtomicInteger count = new AtomicInteger(0); final CountDownLatch latch = new CountDownLatch(k / 2); final ExecutionCallback callback = new ExecutionCallback() { public void onResponse(Object response) { if ((Boolean) response) count.incrementAndGet(); latch.countDown(); } public void onFailure(Throwable t) {} }; for (int i = 0; i < k; i++) { final HazelcastInstance instance = instances[i]; final IExecutorService service = instance.getExecutorService("testSubmitToKeyOwnerCallable"); final String script = "hazelcast.getCluster().getLocalMember().equals(member)"; final HashMap map = new HashMap(); final Member localMember = instance.getCluster().getLocalMember(); map.put("member", localMember); int key = 0; while (!localMember.equals(instance.getPartitionService().getPartition(++key).getOwner())) ; if (i % 2 == 0) { final Future f = service.submitToKeyOwner(new ScriptCallable(script, map), key); assertTrue((Boolean) f.get(5, TimeUnit.SECONDS)); } else { service.submitToKeyOwner(new ScriptCallable(script, map), key, callback); } } assertTrue(latch.await(30, TimeUnit.SECONDS)); assertEquals(k / 2, count.get()); }
/** @param log Logger. */ private void execute(GridLogger log) { try { log.info("Started execute."); // Countdown shared job latch so that the main thread know that all jobs are // inside the "execute" routine. jobLatch.countDown(); log.info("After job latch."); // Await for the main thread to allow jobs to proceed. latch.await(); log.info("After latch."); if (awaitMasterLeaveCallback) { latch0.await(); log.info("After latch0."); } else log.info("Latch 0 skipped."); } catch (InterruptedException e) { // We do not expect any interruptions here, hence this statement. fail("Unexpected exception: " + e); } }
/** * Ensure that {@link GridComputeJobMasterLeaveAware} callback is invoked on job which is * initiated by master and is currently running on it. * * @throws Exception If failed. */ public void testLocalJobOnMaster() throws Exception { invokeLatch = new CountDownLatch(1); jobLatch = new CountDownLatch(1); Grid g = startGrid(0); g.compute().execute(new TestTask(1), null); jobLatch.await(); // Count down the latch in a separate thread. new Thread( new Runnable() { @Override public void run() { try { U.sleep(500); } catch (GridInterruptedException ignore) { // No-op. } latch.countDown(); } }) .start(); stopGrid(0, true); latch.countDown(); assert invokeLatch.await(5000, MILLISECONDS); }
/** * Ensure that {@link GridComputeJobMasterLeaveAware} callback is invoked when fails to send * {@link GridJobExecuteResponse} to master node. * * @throws Exception If failed. */ public void testCannotSendJobExecuteResponse() throws Exception { awaitMasterLeaveCallback = false; // Start grids. for (int i = 0; i < GRID_CNT; i++) startGrid(i); int lastGridIdx = GRID_CNT - 1; grid(lastGridIdx) .forPredicate(excludeLastPredicate()) .compute() .execute(new TestTask(GRID_CNT - 1), null); jobLatch.await(); for (int i = 0; i < lastGridIdx; i++) ((CommunicationSpi) grid(i).configuration().getCommunicationSpi()).waitLatch(); latch.countDown(); // Ensure that all worker nodes has already started job response sending. for (int i = 0; i < lastGridIdx; i++) ((CommunicationSpi) grid(i).configuration().getCommunicationSpi()).awaitResponse(); // Now we stop master grid. stopGrid(lastGridIdx, true); // Release communication SPI wait latches. As master node is stopped, job worker will receive // and exception. for (int i = 0; i < lastGridIdx; i++) ((CommunicationSpi) grid(i).configuration().getCommunicationSpi()).releaseWaitLatch(); assert invokeLatch.await(5000, MILLISECONDS); }
/** @throws Exception If failed. */ public void testInternalTaskMetrics() throws Exception { Ignite ignite = grid(); // Visor task is internal and should not affect metrics. ignite.compute().withName("visor-test-task").execute(new TestInternalTask(), "testArg"); // Let metrics update twice. final CountDownLatch latch = new CountDownLatch(2); ignite .events() .localListen( new IgnitePredicate<Event>() { @Override public boolean apply(Event evt) { assert evt.type() == EVT_NODE_METRICS_UPDATED; latch.countDown(); return true; } }, EVT_NODE_METRICS_UPDATED); // Wait for metrics update. latch.await(); ClusterMetrics metrics = ignite.cluster().localNode().metrics(); info("Node metrics: " + metrics); assert metrics.getAverageActiveJobs() == 0; assert metrics.getAverageCancelledJobs() == 0; assert metrics.getAverageJobExecuteTime() == 0; assert metrics.getAverageJobWaitTime() == 0; assert metrics.getAverageRejectedJobs() == 0; assert metrics.getAverageWaitingJobs() == 0; assert metrics.getCurrentActiveJobs() == 0; assert metrics.getCurrentCancelledJobs() == 0; assert metrics.getCurrentJobExecuteTime() == 0; assert metrics.getCurrentJobWaitTime() == 0; assert metrics.getCurrentWaitingJobs() == 0; assert metrics.getMaximumActiveJobs() == 0; assert metrics.getMaximumCancelledJobs() == 0; assert metrics.getMaximumJobExecuteTime() == 0; assert metrics.getMaximumJobWaitTime() == 0; assert metrics.getMaximumRejectedJobs() == 0; assert metrics.getMaximumWaitingJobs() == 0; assert metrics.getTotalCancelledJobs() == 0; assert metrics.getTotalExecutedJobs() == 0; assert metrics.getTotalRejectedJobs() == 0; assert metrics.getTotalExecutedTasks() == 0; assertTrue( "MaximumJobExecuteTime=" + metrics.getMaximumJobExecuteTime() + " is less than AverageJobExecuteTime=" + metrics.getAverageJobExecuteTime(), metrics.getMaximumJobExecuteTime() >= metrics.getAverageJobExecuteTime()); }
@Ignore("Non-positive requests are relayed to the plugin and is a no-op otherwise") @Test public void testNegativeRequestThrowsIllegalArgumentException() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final AtomicReference<Throwable> exception = new AtomicReference<>(); Observable.just(1, 2, 3, 4) .subscribe( new Observer<Integer>() { @Override public void onStart() { request(1); } @Override public void onComplete() {} @Override public void onError(Throwable e) { exception.set(e); latch.countDown(); } @Override public void onNext(Integer t) { request(-1); request(1); } }); Assert.assertTrue(latch.await(10, TimeUnit.SECONDS)); Assert.assertTrue(exception.get() instanceof IllegalArgumentException); }
/** Test for issue #39 */ @Test public void testIsMapKeyLocked() throws InterruptedException { HazelcastClient hClient = getHazelcastClient(); final IMap map = hClient.getMap("testIsMapKeyLocked"); assertFalse(map.isLocked("key")); map.lock("key"); assertTrue(map.isLocked("key")); final CountDownLatch latch = new CountDownLatch(1); Thread thread = new Thread( new Runnable() { public void run() { assertTrue(map.isLocked("key")); try { while (map.isLocked("key")) { Thread.sleep(100); } } catch (InterruptedException e) { throw new RuntimeException(e); } latch.countDown(); } }); thread.start(); Thread.sleep(100); map.unlock("key"); assertTrue(latch.await(3, TimeUnit.SECONDS)); }
@Test public void lockMap() throws InterruptedException { HazelcastClient hClient = getHazelcastClient(); final IMap<String, String> map = hClient.getMap("lockMap"); final CountDownLatch unlockLatch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(1); map.put("a", "b"); map.lockMap(1, TimeUnit.SECONDS); assertTrue(map.tryPut("a", "c", 10, TimeUnit.MILLISECONDS)); new Thread( new Runnable() { public void run() { assertFalse(map.lockMap(10, TimeUnit.MILLISECONDS)); unlockLatch.countDown(); assertTrue(map.lockMap(Long.MAX_VALUE, TimeUnit.SECONDS)); latch.countDown(); // map.unlockMap(); } }) .start(); assertTrue(unlockLatch.await(10, TimeUnit.SECONDS)); Thread.sleep(2000); map.unlockMap(); assertEquals("c", map.getMapEntry("a").getValue()); assertTrue(latch.await(10, TimeUnit.SECONDS)); }
@Test(timeout = 1000L) public void testSynchronousError() { final Observable<Observable<String>> o1 = Observable.error(new RuntimeException("unit test")); final CountDownLatch latch = new CountDownLatch(1); Observable.mergeDelayError(o1) .subscribe( new Observer<String>() { @Override public void onComplete() { fail("Expected onError path"); } @Override public void onError(Throwable e) { latch.countDown(); } @Override public void onNext(String s) { fail("Expected onError path"); } }); try { latch.await(); } catch (InterruptedException ex) { fail("interrupted"); } }
/** * https://github.com/ReactiveX/RxJava/issues/198 * * <p>Rx Design Guidelines 5.2 * * <p>"when calling the Subscribe method that only has an onNext argument, the OnError behavior * will be to rethrow the exception on the thread that the message comes out from the Observable. * The OnCompleted behavior in this case is to do nothing." * * @throws InterruptedException */ @Test @Ignore("Subscribers can't throw") public void testErrorThrownWithoutErrorHandlerAsynchronous() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final AtomicReference<Throwable> exception = new AtomicReference<>(); Observable.create( observer -> { new Thread( () -> { try { observer.onError(new Error("failure")); } catch (Throwable e) { // without an onError handler it has to just throw on whatever thread // invokes it exception.set(e); } latch.countDown(); }) .start(); }) .subscribe(); // wait for exception latch.await(3000, TimeUnit.MILLISECONDS); assertNotNull(exception.get()); assertEquals("failure", exception.get().getMessage()); }
/** * @param expJobs Expected jobs number. * @param taskStarter Task started. * @throws Exception If failed. */ private void testMasterLeaveAwareCallback( int expJobs, GridClosure<GridProjection, GridFuture<?>> taskStarter) throws Exception { jobLatch = new CountDownLatch(expJobs); invokeLatch = new CountDownLatch(expJobs); for (int i = 0; i < GRID_CNT; i++) startGrid(i); int lastGridIdx = GRID_CNT - 1; GridFuture<?> fut = taskStarter.apply(grid(lastGridIdx).forPredicate(excludeLastPredicate())); jobLatch.await(); stopGrid(lastGridIdx, true); latch.countDown(); assert invokeLatch.await(5000, MILLISECONDS); try { fut.get(); } catch (GridException e) { log.debug("Task failed: " + e); } }
@Test public void testSubmitToAllMembersCallable() throws InterruptedException { final int k = simpleTestNodeCount; TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(k); final HazelcastInstance[] instances = factory.newInstances(new Config()); final AtomicInteger count = new AtomicInteger(0); final CountDownLatch countDownLatch = new CountDownLatch(k * k); final MultiExecutionCallback callback = new MultiExecutionCallback() { public void onResponse(Member member, Object value) { count.incrementAndGet(); countDownLatch.countDown(); } public void onComplete(Map<Member, Object> values) {} }; for (int i = 0; i < k; i++) { final IExecutorService service = instances[i].getExecutorService("testSubmitToAllMembersCallable"); final String script = "hazelcast.getAtomicLong('testSubmitToAllMembersCallable').incrementAndGet();"; service.submitToAllMembers(new ScriptCallable(script, null), callback); } countDownLatch.await(30, TimeUnit.SECONDS); final IAtomicLong result = instances[0].getAtomicLong("testSubmitToAllMembersCallable"); assertEquals(k * k, result.get()); assertEquals(k * k, count.get()); }
/** * @param log Logger. * @param job Actual job. */ private void onMasterLeave(GridLogger log, Object job) { log.info("Callback executed: " + job); latch0.countDown(); invokeLatch.countDown(); }
/** @throws Exception If failed. */ public void testIoMetrics() throws Exception { Ignite ignite0 = grid(); Ignite ignite1 = startGrid(1); Object msg = new TestMessage(); int size = ignite0.configuration().getMarshaller().marshal(msg).length; assert size > MSG_SIZE; final CountDownLatch latch = new CountDownLatch(MSG_CNT); ignite0 .message() .localListen( null, new MessagingListenActor<TestMessage>() { @Override protected void receive(UUID nodeId, TestMessage rcvMsg) throws Throwable { latch.countDown(); } }); ignite1 .message() .localListen( null, new MessagingListenActor<TestMessage>() { @Override protected void receive(UUID nodeId, TestMessage rcvMsg) throws Throwable { respond(rcvMsg); } }); for (int i = 0; i < MSG_CNT; i++) message(ignite0.cluster().forRemotes()).send(null, msg); latch.await(); ClusterMetrics metrics = ignite0.cluster().localNode().metrics(); info("Node 0 metrics: " + metrics); // Time sync messages are being sent. assert metrics.getSentMessagesCount() >= MSG_CNT; assert metrics.getSentBytesCount() > size * MSG_CNT; assert metrics.getReceivedMessagesCount() >= MSG_CNT; assert metrics.getReceivedBytesCount() > size * MSG_CNT; metrics = ignite1.cluster().localNode().metrics(); info("Node 1 metrics: " + metrics); // Time sync messages are being sent. assert metrics.getSentMessagesCount() >= MSG_CNT; assert metrics.getSentBytesCount() > size * MSG_CNT; assert metrics.getReceivedMessagesCount() >= MSG_CNT; assert metrics.getReceivedBytesCount() > size * MSG_CNT; }
@Test public void testPostregisteredExecutionCallbackCompletableFuture() throws Exception { HazelcastInstanceProxy proxy = (HazelcastInstanceProxy) createHazelcastInstance(); Field originalField = HazelcastInstanceProxy.class.getDeclaredField("original"); originalField.setAccessible(true); HazelcastInstanceImpl hz = (HazelcastInstanceImpl) originalField.get(proxy); NodeEngine nodeEngine = hz.node.nodeEngine; ExecutionService es = nodeEngine.getExecutionService(); final CountDownLatch latch1 = new CountDownLatch(1); final CountDownLatch latch2 = new CountDownLatch(1); final ExecutorService executorService = Executors.newSingleThreadExecutor(); try { Future future = executorService.submit( new Callable<String>() { @Override public String call() { try { return "success"; } finally { latch1.countDown(); } } }); final ICompletableFuture completableFuture = es.asCompletableFuture(future); latch1.await(30, TimeUnit.SECONDS); final AtomicReference reference = new AtomicReference(); completableFuture.andThen( new ExecutionCallback() { @Override public void onResponse(Object response) { reference.set(response); latch2.countDown(); } @Override public void onFailure(Throwable t) { reference.set(t); latch2.countDown(); } }); latch2.await(30, TimeUnit.SECONDS); if (reference.get() instanceof Throwable) { ((Throwable) reference.get()).printStackTrace(); } assertEquals("success", reference.get()); } finally { executorService.shutdown(); } }
@Test public void removeListener() throws InterruptedException, IOException { HazelcastClient hClient = getHazelcastClient(); final IMap<String, String> map = hClient.getMap("removeListener"); final CountDownLatch entryAddLatch = new CountDownLatch(5); final CountDownLatch entryUpdatedLatch = new CountDownLatch(5); final CountDownLatch entryRemovedLatch = new CountDownLatch(5); CountDownLatchEntryListener<String, String> listener1 = new CountDownLatchEntryListener<String, String>( entryAddLatch, entryUpdatedLatch, entryRemovedLatch); CountDownLatchEntryListener<String, String> listener2 = new CountDownLatchEntryListener<String, String>( entryAddLatch, entryUpdatedLatch, entryRemovedLatch); map.addEntryListener(listener1, true); map.put("hello", "world"); map.put("hello", "new world"); map.remove("hello"); Thread.sleep(100); assertEquals(4, entryAddLatch.getCount()); assertEquals(4, entryRemovedLatch.getCount()); assertEquals(4, entryUpdatedLatch.getCount()); map.removeEntryListener(listener1); map.put("hello", "world"); map.put("hello", "new world"); map.remove("hello"); Thread.sleep(100); assertEquals(4, entryAddLatch.getCount()); assertEquals(4, entryRemovedLatch.getCount()); assertEquals(4, entryUpdatedLatch.getCount()); }
@Test public void testReplay() throws InterruptedException { final AtomicInteger counter = new AtomicInteger(); ConnectableObservable<String> o = Observable.<String>create( observer -> { observer.onSubscribe(EmptySubscription.INSTANCE); new Thread( new Runnable() { @Override public void run() { counter.incrementAndGet(); observer.onNext("one"); observer.onComplete(); } }) .start(); }) .replay(); // we connect immediately and it will emit the value Disposable s = o.connect(); try { // we then expect the following 2 subscriptions to get that same value final CountDownLatch latch = new CountDownLatch(2); // subscribe once o.subscribe( v -> { assertEquals("one", v); latch.countDown(); }); // subscribe again o.subscribe( v -> { assertEquals("one", v); latch.countDown(); }); if (!latch.await(1000, TimeUnit.MILLISECONDS)) { fail("subscriptions did not receive values"); } assertEquals(1, counter.get()); } finally { s.dispose(); } }
/** @throws Exception If failed. */ public void testTopologyListener() throws Exception { final Collection<UUID> added = new ArrayList<>(1); final Collection<UUID> rmvd = new ArrayList<>(1); final CountDownLatch addedLatch = new CountDownLatch(1); final CountDownLatch rmvLatch = new CountDownLatch(1); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); GridClientTopologyListener lsnr = new GridClientTopologyListener() { @Override public void onNodeAdded(GridClientNode node) { added.add(node.nodeId()); addedLatch.countDown(); } @Override public void onNodeRemoved(GridClientNode node) { rmvd.add(node.nodeId()); rmvLatch.countDown(); } }; client.addTopologyListener(lsnr); try { Grid g = startGrid(NODES_CNT + 1); UUID id = g.localNode().id(); assertTrue(addedLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, added.size()); assertEquals(id, F.first(added)); stopGrid(NODES_CNT + 1); assertTrue(rmvLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, rmvd.size()); assertEquals(id, F.first(rmvd)); } finally { client.removeTopologyListener(lsnr); stopGrid(NODES_CNT + 1); } }
/* * Launches against the agent */ public void testSimpleLauncher() throws Exception { Project project = workspace.getProject("p1"); Run bndrun = new Run(workspace, project.getBase(), project.getFile("one.bndrun")); bndrun.setProperty("-runpath", "biz.aQute.remote.launcher"); bndrun.setProperty("-runbundles", "bsn-1,bsn-2"); bndrun.setProperty("-runremote", "test"); final RemoteProjectLauncherPlugin pl = (RemoteProjectLauncherPlugin) bndrun.getProjectLauncher(); pl.prepare(); final CountDownLatch latch = new CountDownLatch(1); final AtomicInteger exitCode = new AtomicInteger(-1); List<? extends RunSession> sessions = pl.getRunSessions(); assertEquals(1, sessions.size()); final RunSession session = sessions.get(0); Thread t = new Thread("test-launch") { public void run() { try { exitCode.set(session.launch()); } catch (Exception e) { e.printStackTrace(); } finally { latch.countDown(); } } }; t.start(); Thread.sleep(500); for (Bundle b : context.getBundles()) { System.out.println(b.getLocation()); } assertEquals(4, context.getBundles().length); String p1 = t1.getAbsolutePath(); System.out.println(p1); assertNotNull(context.getBundle(p1)); assertNotNull(context.getBundle(t2.getAbsolutePath())); pl.cancel(); latch.await(); assertEquals(-3, exitCode.get()); bndrun.close(); }
/** * Event callback. * * @param exchId Exchange ID. * @param discoEvt Discovery event. */ public void onEvent(GridDhtPartitionExchangeId exchId, DiscoveryEvent discoEvt) { assert exchId.equals(this.exchId); this.discoEvt = discoEvt; evtLatch.countDown(); }
public void makeSnapshots(Collection<InetAddress> endpoints) { try { snapshotLatch = new CountDownLatch(endpoints.size()); IAsyncCallback callback = new IAsyncCallback() { @Override public boolean isLatencyForSnitch() { return false; } @Override public void response(Message msg) { RepairJob.this.snapshotLatch.countDown(); } }; for (InetAddress endpoint : endpoints) MessagingService.instance() .sendRR( new SnapshotCommand(tablename, cfname, sessionName, false), endpoint, callback); snapshotLatch.await(); snapshotLatch = null; } catch (InterruptedException e) { throw new RuntimeException(e); } }
protected void dispatch(Message m) { // Pull off the connection management messages we're // interested in and then pass on the rest. if (m instanceof ClientRegistrationMessage) { // Then we've gotten our real id this.id = (int) ((ClientRegistrationMessage) m).getId(); log.log(Level.INFO, "Connection established, id:{0}.", this.id); connecting.countDown(); fireConnected(); return; } if (m instanceof DisconnectMessage) { // Can't do too much else yet String reason = ((DisconnectMessage) m).getReason(); log.log(Level.SEVERE, "Connection terminated, reason:{0}.", reason); DisconnectInfo info = new DisconnectInfo(); info.reason = reason; fireDisconnected(info); close(); } // Make sure client MessageListeners are called single-threaded // since it could receive messages from the TCP and UDP // thread simultaneously. synchronized (this) { messageListeners.messageReceived(this, m); } }
@Test public void testManagedPreregisteredExecutionCallbackCompletableFuture() throws Exception { HazelcastInstanceProxy proxy = (HazelcastInstanceProxy) createHazelcastInstance(); Field originalField = HazelcastInstanceProxy.class.getDeclaredField("original"); originalField.setAccessible(true); HazelcastInstanceImpl hz = (HazelcastInstanceImpl) originalField.get(proxy); NodeEngine nodeEngine = hz.node.nodeEngine; ExecutionService es = nodeEngine.getExecutionService(); final CountDownLatch latch1 = new CountDownLatch(1); final CountDownLatch latch2 = new CountDownLatch(1); Future future = es.submit( "default", new Callable<String>() { @Override public String call() { try { latch1.await(30, TimeUnit.SECONDS); return "success"; } catch (Exception e) { throw new RuntimeException(e); } } }); final AtomicReference reference = new AtomicReference(); final ICompletableFuture completableFuture = es.asCompletableFuture(future); completableFuture.andThen( new ExecutionCallback() { @Override public void onResponse(Object response) { reference.set(response); latch2.countDown(); } @Override public void onFailure(Throwable t) { reference.set(t); latch2.countDown(); } }); latch1.countDown(); latch2.await(30, TimeUnit.SECONDS); assertEquals("success", reference.get()); }
public StatementIteratorConsumer( BoundedCQLStatementIterator statementIterator, CQLExecutor cqlExecutor, long timeout) { this.statementIterator = statementIterator; this.cqlExecutor = cqlExecutor; this.timeout = timeout; this.shutdownLatch = new CountDownLatch((new Long(statementIterator.size())).intValue()); logger.trace("Created consumer with countdown {}", shutdownLatch.getCount()); }
/** Test the Execution Callback */ @Test public void testExecutionCallback() throws Exception { Callable<String> task = new BasicTestTask(); IExecutorService executor = createSingleNodeExecutorService("testExecutionCallback"); final CountDownLatch latch = new CountDownLatch(1); final ExecutionCallback executionCallback = new ExecutionCallback() { public void onResponse(Object response) { latch.countDown(); } public void onFailure(Throwable t) {} }; executor.submit(task, executionCallback); assertTrue(latch.await(2, TimeUnit.SECONDS)); }