@Test public void testSharedServersRoundRobinButFirstStartAndStopServer() throws Exception { // Start and stop a server on the same port/host before hand to make sure it doesn't interact CountDownLatch latch = new CountDownLatch(1); HttpServer theServer = vertx.createHttpServer(new HttpServerOptions().setPort(4321)); theServer .websocketHandler( ws -> { fail("Should not connect"); }) .listen( ar -> { if (ar.succeeded()) { latch.countDown(); } else { fail("Failed to bind server"); } }); awaitLatch(latch); CountDownLatch closeLatch = new CountDownLatch(1); theServer.close( ar -> { assertTrue(ar.succeeded()); closeLatch.countDown(); }); assertTrue(closeLatch.await(10, TimeUnit.SECONDS)); testSharedServersRoundRobin(); }
/** Process acknowledgments, if requested. */ @Override public void run() { try { this.ackThreadRunning = true; ackLatch.countDown(); DatagramPacket ackPack = new DatagramPacket(new byte[100], 100); while (true) { this.getSocket().receive(ackPack); String id = new String(ackPack.getData(), ackPack.getOffset(), ackPack.getLength()); if (logger.isDebugEnabled()) { logger.debug("Received ack for " + id + " from " + ackPack.getAddress().getHostAddress()); } CountDownLatch latch = this.ackControl.get(id); if (latch != null) { latch.countDown(); } } } catch (IOException e) { if (this.socket != null && !this.socket.isClosed()) { logger.error("Error on UDP Acknowledge thread:" + e.getMessage()); } } finally { this.ackThreadRunning = false; } }
@Test public void testProgressAndErrorReported() throws Exception { openDialog(); final CountDownLatch latch = new CountDownLatch(1); try { AbstractUIRunner.syncRun( new ThrowableRunnable() { @Override public void run() throws Throwable { mEngine.setConnection( new MockUIConnection() { @Override public void update(DeployedStrategy strategy, Strategy newConfiguration) throws Exception { latch.await(); throw new Exception("Update Failed"); } }); } }); mBot.button("OK").click(); ProgressDialogFixture fixture = new ProgressDialogFixture(); fixture.assertTask("Updating strategy configuration on 'Engine'..."); latch.countDown(); ErrorDialogFixture errorDialog = new ErrorDialogFixture(); errorDialog.assertError("Update Failed"); errorDialog.dismiss(); } finally { latch.countDown(); } }
/** * @param log Logger. * @param job Actual job. */ private void onMasterLeave(GridLogger log, Object job) { log.info("Callback executed: " + job); latch0.countDown(); invokeLatch.countDown(); }
public boolean checkForcedConsumer(ServerMessage message) { if (message.containsProperty(ClientConsumerImpl.FORCED_DELIVERY_MESSAGE)) { System.out.println("MessagePullHandler.checkForcedConsumer"); if (next >= 0) { if (timeout <= 0) { latch.countDown(); } else { messagePullFuture = scheduledPool.schedule( new Runnable() { @Override public void run() { if (next >= 0) { handleDeliverNullDispatch(); } } }, timeout, TimeUnit.MILLISECONDS); } } return false; } else { next = -1; if (messagePullFuture != null) { messagePullFuture.cancel(true); } latch.countDown(); return true; } }
@Override public void run() { locator.setReconnectAttempts(0); final ClientSessionFactory sessionFactory; ClientSession session; try { sessionFactory = locator.createSessionFactory(tc); if (sessionFactory != null) { session = sessionFactory.createSession(); if (session != null) { if (nodeIsDown(total, count.incrementAndGet())) { while (latch.getCount() > 0) latch.countDown(); } session.close(); sessionFactory.close(); } } } catch (Exception e) { // no-op } finally { latch.countDown(); locator.close(); } }
@Test public void unsubscribeAllTest() throws InterruptedException { Key k1 = Key.wrap("key1"); Key k2 = Key.wrap("key2"); Key k3 = Key.wrap("_key3"); final CountDownLatch latch = new CountDownLatch(2); final CountDownLatch latch2 = new CountDownLatch(1); firehose.on( k1, (i) -> { latch.countDown(); }); firehose.on( k2, (i) -> { latch.countDown(); }); firehose.on( k3, (i) -> { latch2.countDown(); }); firehose.unregister(k -> ((String) k.getPart(0)).startsWith("key")); firehose.notify(k1, 1); firehose.notify(k2, 1); firehose.notify(k3, 1); assertThat(latch2.getCount(), is(0L)); assertThat(latch.getCount(), is(2L)); }
@Override public void handle( CacheRpcCommand cmd, Address origin, org.jgroups.blocks.Response response, boolean preserveOrder) throws Throwable { boolean notifyRehashStarted = false; if (cmd instanceof CacheTopologyControlCommand) { CacheTopologyControlCommand rcc = (CacheTopologyControlCommand) cmd; log.debugf("Intercepted command: %s", cmd); switch (rcc.getType()) { case REBALANCE_START: txsReady.await(); notifyRehashStarted = true; break; case CH_UPDATE: // TODO Use another type instead, e.g. REBASE_END joinEnded.countDown(); break; } } delegate.handle(cmd, origin, response, preserveOrder); if (notifyRehashStarted) rehashStarted.countDown(); }
@Test public void testRemoveUpdateExpiration() throws Exception { CyclicBarrier loadBarrier = new CyclicBarrier(2); CountDownLatch preFlushLatch = new CountDownLatch(1); CountDownLatch flushLatch = new CountDownLatch(1); CountDownLatch commitLatch = new CountDownLatch(1); Future<Boolean> first = removeFlushWait(itemId, loadBarrier, null, flushLatch, commitLatch); Future<Boolean> second = updateFlushWait(itemId, loadBarrier, preFlushLatch, null, commitLatch); awaitOrThrow(flushLatch); Map contents = Caches.entrySet(entityCache).toMap(); assertEquals(1, contents.size()); assertEquals(Tombstone.class, contents.get(itemId).getClass()); preFlushLatch.countDown(); commitLatch.countDown(); first.get(WAIT_TIMEOUT, TimeUnit.SECONDS); second.get(WAIT_TIMEOUT, TimeUnit.SECONDS); contents = Caches.entrySet(entityCache).toMap(); assertEquals(1, contents.size()); assertEquals(Tombstone.class, contents.get(itemId).getClass()); TIME_SERVICE.advance(timeout + 1); assertNull(entityCache.get(itemId)); // force expiration contents = Caches.entrySet(entityCache).toMap(); assertEquals(Collections.EMPTY_MAP, contents); }
/** {@inheritDoc} */ @Override public Serializable execute() { int arg = this.<Integer>argument(0); try { if (log.isInfoEnabled()) log.info("Executing job [job=" + this + ", arg=" + arg + ']'); startSignal.countDown(); try { if (!startSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS)) fail(); if (arg == 1) { if (log.isInfoEnabled()) log.info("Job one is proceeding."); } else Thread.sleep(WAIT_TIME); } catch (InterruptedException e) { if (log.isInfoEnabled()) log.info("Job got cancelled [arg=" + arg + ", ses=" + ses + ", e=" + e + ']'); return 0; } if (log.isInfoEnabled()) log.info("Completing job: " + ses); return argument(0); } finally { stopSignal.countDown(); processedCnt++; } }
@Override public void onCoordinateEvent(Event event, String message) { System.err.println("Got unit test even " + event.toString() + " " + message); events.add(event); latch1.countDown(); latch2.countDown(); }
@Test public void testEvictUpdateExpiration() throws Exception { CyclicBarrier loadBarrier = new CyclicBarrier(2); CountDownLatch preFlushLatch = new CountDownLatch(1); CountDownLatch postEvictLatch = new CountDownLatch(1); CountDownLatch flushLatch = new CountDownLatch(1); CountDownLatch commitLatch = new CountDownLatch(1); Future<Boolean> first = evictWait(itemId, loadBarrier, null, postEvictLatch); Future<Boolean> second = updateFlushWait(itemId, loadBarrier, preFlushLatch, flushLatch, commitLatch); awaitOrThrow(postEvictLatch); Map contents = Caches.entrySet(entityCache).toMap(); assertEquals(Collections.EMPTY_MAP, contents); assertNull(contents.get(itemId)); preFlushLatch.countDown(); awaitOrThrow(flushLatch); contents = Caches.entrySet(entityCache).toMap(); assertEquals(1, contents.size()); assertEquals(FutureUpdate.class, contents.get(itemId).getClass()); commitLatch.countDown(); first.get(WAIT_TIMEOUT, TimeUnit.SECONDS); second.get(WAIT_TIMEOUT, TimeUnit.SECONDS); contents = Caches.entrySet(entityCache).toMap(); assertEquals(1, contents.size()); Object value = contents.get(itemId); assertNotNull(value); assertEquals(StandardCacheEntryImpl.class, value.getClass()); TIME_SERVICE.advance(timeout + 1); assertEquals(value, entityCache.get(itemId)); }
@Subscribe public void lifecycleChanged(Lifecycle lifecycle) { LOG.debug("Lifecycle is now {}", lifecycle); // if we switch to RUNNING from STARTING (or unknown) the server is ready to accept connections // on inputs. // we want to postpone opening the inputs earlier, so we don't get swamped with messages before // we can actually process them. if ((lifecycle == Lifecycle.RUNNING) && (previousLifecycle.get() == Lifecycle.STARTING || previousLifecycle.get() == Lifecycle.UNINITIALIZED)) { LOG.info( "Triggering launching persisted inputs, node transitioned from {} to {}", previousLifecycle.get(), lifecycle); // Set lifecycle BEFORE counting down the latch to avoid race conditions! previousLifecycle.set(lifecycle); startLatch.countDown(); } // if we failed to start up due to some other service aborting, we need to get over the barrier. if (lifecycle == Lifecycle.FAILED) { startLatch.countDown(); } }
@Override public void run() { try { m_channelManager = new ClientChannelManager(); long expireTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(1); while (m_channelManager.getActiveChannel() == null && System.currentTimeMillis() < expireTime) { TimeUnit.MILLISECONDS.sleep(1); } m_warmup.countDown(); run0(); } catch (Throwable e) { m_logger.error(e.getMessage(), e); m_warmup.countDown(); } finally { if (m_channelManager != null) { m_channelManager.close(); } m_latch.countDown(); } }
public void store(Object key, Object value) { store.put(key, value); callCount.incrementAndGet(); latchStore.countDown(); if (latchStoreOpCount != null) { latchStoreOpCount.countDown(); } }
private void verifyMetadataConsistency(int numEntries, LedgerHandle lh) throws Exception { final CountDownLatch addDoneLatch = new CountDownLatch(1); final CountDownLatch deadIOLatch = new CountDownLatch(1); final CountDownLatch recoverDoneLatch = new CountDownLatch(1); final CountDownLatch failedLatch = new CountDownLatch(1); // kill first bookie to replace with a unauthorize bookie BookieSocketAddress bookie = lh.getLedgerMetadata().currentEnsemble.get(0); ServerConfiguration conf = killBookie(bookie); // replace a unauthorize bookie startUnauthorizedBookie(conf, addDoneLatch); // kill second bookie to replace with a dead bookie bookie = lh.getLedgerMetadata().currentEnsemble.get(1); conf = killBookie(bookie); // replace a slow dead bookie startDeadBookie(conf, deadIOLatch); // tried to add entries for (int i = 0; i < numEntries; i++) { lh.asyncAddEntry( "data".getBytes(), new AddCallback() { @Override public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) { if (BKException.Code.OK != rc) { failedLatch.countDown(); deadIOLatch.countDown(); } if (0 == entryId) { try { recoverDoneLatch.await(); } catch (InterruptedException ie) { } } } }, null); } // add finished addDoneLatch.countDown(); // wait until entries failed due to UnauthorizedAccessException failedLatch.await(); // simulate the ownership of this ledger is transfer to another host LOG.info("Recover ledger {}.", lh.getId()); ClientConfiguration newConf = new ClientConfiguration(); newConf.addConfiguration(baseClientConf); BookKeeper newBkc = new BookKeeperTestClient(newConf.setReadTimeout(1)); LedgerHandle recoveredLh = newBkc.openLedger(lh.getId(), digestType, "".getBytes()); LOG.info("Recover ledger {} done.", lh.getId()); recoverDoneLatch.countDown(); // wait a bit until add operations failed from second bookie due to IOException TimeUnit.SECONDS.sleep(5); // open the ledger again to make sure we ge the right last confirmed. LedgerHandle newLh = newBkc.openLedger(lh.getId(), digestType, "".getBytes()); assertEquals( "Metadata should be consistent across different opened ledgers", recoveredLh.getLastAddConfirmed(), newLh.getLastAddConfirmed()); }
private void removeIntent(IntentService intentService, Intent intent) { IntentListener listener = null; Key key = intent.key(); final CountDownLatch withdrawLatch, purgeLatch; if (purgeAfterRemove || sync) { // set up latch and listener to track uninstall progress withdrawLatch = new CountDownLatch(1); purgeLatch = purgeAfterRemove ? new CountDownLatch(1) : null; listener = (IntentEvent event) -> { if (Objects.equals(event.subject().key(), key)) { if (event.type() == IntentEvent.Type.WITHDRAWN || event.type() == IntentEvent.Type.FAILED) { withdrawLatch.countDown(); } else if (purgeAfterRemove && event.type() == IntentEvent.Type.PURGED) { purgeLatch.countDown(); } } }; intentService.addListener(listener); } else { purgeLatch = null; withdrawLatch = null; } // request the withdraw intentService.withdraw(intent); if (purgeAfterRemove || sync) { try { // wait for withdraw event withdrawLatch.await(5, TimeUnit.SECONDS); } catch (InterruptedException e) { print("Timed out waiting for intent {} withdraw", key); } // double check the state IntentState state = intentService.getIntentState(key); if (purgeAfterRemove && (state == WITHDRAWN || state == FAILED)) { intentService.purge(intent); } if (sync) { // wait for purge event /* TODO Technically, the event comes before map.remove() is called. If we depend on sync and purge working together, we will need to address this. */ try { purgeLatch.await(5, TimeUnit.SECONDS); } catch (InterruptedException e) { print("Timed out waiting for intent {} purge", key); } } } if (listener != null) { // clean up the listener intentService.removeListener(listener); } }
@Override public synchronized void doTick(final int tps) { if (this.groups.isEmpty()) { if (!CoreMain.isClient()) { SpammyError.err( "There is no tick groups, server don't have anything to do. Do you have any worlds?", 10, key); } return; } if (this.groups.size() == 1) { /** TODO count time of execution and split if needed. */ this.groups.iterator().next().doTick(tps); return; } final AtomicInteger i = new AtomicInteger(0); final ForkJoinPool pool = new ForkJoinPool( this.groups.size(), p -> new NamedForkJoinWorkerThread(p, i.getAndIncrement()), (t, e) -> { // TODO: maybe add some pretty error priting System.err.println("Error in tick thread: " + t.getName()); e.printStackTrace(); }, false); /** * TODO count time of execution for all groups. if any group is creating lags, try split it. * (should not count single-time lags?) if two grups can be join, try join them. */ final CountDownLatch latch = new CountDownLatch(this.groups.size()); for (final Iterator<TickGroupImpl> it = this.groups.iterator(); it.hasNext(); ) { final TickGroupImpl tickGroup = it.next(); if (tickGroup.isEmpty()) { it.remove(); latch.countDown(); continue; } pool.submit( () -> { try { tickGroup.doTick(tps); this.core.runScheduler(false); this.core.runSync(); } finally { latch.countDown(); } }); } try { latch.await(); } catch (final InterruptedException e) { e.printStackTrace(); } }
@Override protected void doStart() throws Exception { super.doStart(); if (vertx == null) { // we are creating vertx so we should handle its lifecycle createdVertx = true; final CountDownLatch latch = new CountDownLatch(1); // lets using a host / port if a host name is specified if (host != null && host.length() > 0) { LOG.info("Creating Clustered Vertx {}:{}", host, port); // use the async api as we want to wait for the eventbus to be ready before we are in // started state VertxFactory.newVertx( port, host, new AsyncResultHandler<Vertx>() { @Override public void handle(AsyncResult<Vertx> event) { if (event.cause() != null) { LOG.warn( "Error creating Clustered Vertx " + host + ":" + port + " due " + event.cause().getMessage(), event.cause()); } else if (event.succeeded()) { vertx = event.result(); LOG.info("EventBus is ready: {}", vertx); } latch.countDown(); } }); } else if (host != null) { LOG.info("Creating Clustered Vertx {}", host); vertx = VertxFactory.newVertx(host); LOG.info("EventBus is ready: {}", vertx); latch.countDown(); } else { LOG.info("Creating Non-Clustered Vertx"); vertx = VertxFactory.newVertx(); LOG.info("EventBus is ready: {}", vertx); latch.countDown(); } if (latch.getCount() > 0) { LOG.info("Waiting for EventBus to be ready using {} sec as timeout", timeout); latch.await(timeout, TimeUnit.SECONDS); } } }
private void waitForTestVerificationAndCleanup(Future future) { try { queryNotifyLatch.await(25, TimeUnit.MILLISECONDS); queryWaitYieldLatch.countDown(); queryWaitLatch.countDown(); future.get(); factory.clearAdapters(); } catch (Exception e) { throw Throwables.propagate(e); } }
public void storeAll(Map map) { store.putAll(map); callCount.incrementAndGet(); latchStoreAll.countDown(); if (latchStoreAllOpCount != null) { for (int i = 0; i < map.size(); i++) { latchStoreAllOpCount.countDown(); } } }
@Stop(priority = 20) public void stop() { notifier.removeListener(listener); synchronized (rehashInProgressMonitor) { rehashInProgressMonitor.notifyAll(); } rehashExecutor.shutdownNow(); joinStartedLatch.countDown(); joinCompletedLatch.countDown(); joinComplete = true; }
@Test public void testConcatNonBlockingObservables() { final CountDownLatch okToContinueW1 = new CountDownLatch(1); final CountDownLatch okToContinueW2 = new CountDownLatch(1); final TestObservable<String> w1 = new TestObservable<String>(null, okToContinueW1, "one", "two", "three"); final TestObservable<String> w2 = new TestObservable<String>(null, okToContinueW2, "four", "five", "six"); @SuppressWarnings("unchecked") Observer<String> observer = mock(Observer.class); Observable<Observable<String>> observableOfObservables = Observable.create( new Observable.OnSubscribeFunc<Observable<String>>() { @Override public Subscription onSubscribe(Observer<? super Observable<String>> observer) { // simulate what would happen in an observable observer.onNext(Observable.create(w1)); observer.onNext(Observable.create(w2)); observer.onCompleted(); return Subscriptions.empty(); } }); Observable<String> concat = Observable.concat(observableOfObservables); concat.subscribe(observer); verify(observer, times(0)).onCompleted(); try { // release both threads okToContinueW1.countDown(); okToContinueW2.countDown(); // wait for both to finish w1.t.join(); w2.t.join(); } catch (InterruptedException e) { e.printStackTrace(); } InOrder inOrder = inOrder(observer); inOrder.verify(observer, times(1)).onNext("one"); inOrder.verify(observer, times(1)).onNext("two"); inOrder.verify(observer, times(1)).onNext("three"); inOrder.verify(observer, times(1)).onNext("four"); inOrder.verify(observer, times(1)).onNext("five"); inOrder.verify(observer, times(1)).onNext("six"); verify(observer, times(1)).onCompleted(); }
@Override public void nodeUP(TopologyMember topologyMember, boolean last) { if (topologyMember.getLive() != null && !liveNode.contains(topologyMember.getLive().getName())) { liveNode.add(topologyMember.getLive().getName()); latch.countDown(); } if (topologyMember.getBackup() != null && !backupNode.contains(topologyMember.getBackup().getName())) { backupNode.add(topologyMember.getBackup().getName()); latch.countDown(); } }
@Override public void actionCanceled() throws Exception { this.isCancelled = true; try { writer.flush(); writer.close(); latch.countDown(); log.info("Writing operation cancelled by users."); } catch (Exception e) { latch.countDown(); DialogUtils.showQuickErrorDialog(null, e, "Error during cancel action."); } }
@Test public void testAllMethods() throws IOException, DeploymentException, InterruptedException, EncodeException { Server server = startServer(SessionBuilderEncDecTestEndpoint.class); CountDownLatch messageLatch = new CountDownLatch(1); CountDownLatch onOpenLatch = new CountDownLatch(1); CountDownLatch onCloseLatch = new CountDownLatch(1); CountDownLatch onErrorLatch = new CountDownLatch(1); final ClientEndpointConfig clientEndpointConfig = ClientEndpointConfig.Builder.create() .encoders(Collections.singletonList(AClassCoder.class)) .decoders(Collections.singletonList(AClassCoder.class)) .build(); try { Session session = new SessionBuilder() .uri(getURI(SessionBuilderEncDecTestEndpoint.class)) .clientEndpointConfig(clientEndpointConfig) .messageHandler( AClass.class, aClass -> { if (MESSAGE.equals(aClass.toString())) { messageLatch.countDown(); } }) .onOpen((session1, endpointConfig) -> onOpenLatch.countDown()) .onError((session1, throwable) -> onErrorLatch.countDown()) .onClose( (session1, closeReason) -> { onCloseLatch.countDown(); throw new RuntimeException("onErrorTrigger"); }) .connect(); session.getBasicRemote().sendObject(new AClass()); assertTrue(onOpenLatch.await(3, TimeUnit.SECONDS)); assertTrue(messageLatch.await(3, TimeUnit.SECONDS)); session.close(); assertTrue(onCloseLatch.await(3, TimeUnit.SECONDS)); assertTrue(onErrorLatch.await(3, TimeUnit.SECONDS)); } finally { stopServer(server); } }
@Test public void testReplay() throws InterruptedException { final AtomicInteger counter = new AtomicInteger(); ConnectableObservable<String> o = Observable.<String>create( observer -> { observer.onSubscribe(EmptySubscription.INSTANCE); new Thread( new Runnable() { @Override public void run() { counter.incrementAndGet(); observer.onNext("one"); observer.onComplete(); } }) .start(); }) .replay(); // we connect immediately and it will emit the value Disposable s = o.connect(); try { // we then expect the following 2 subscriptions to get that same value final CountDownLatch latch = new CountDownLatch(2); // subscribe once o.subscribe( v -> { assertEquals("one", v); latch.countDown(); }); // subscribe again o.subscribe( v -> { assertEquals("one", v); latch.countDown(); }); if (!latch.await(1000, TimeUnit.MILLISECONDS)) { fail("subscriptions did not receive values"); } assertEquals(1, counter.get()); } finally { s.dispose(); } }
@Test public void testContainerWithDestNameNoCorrelation() throws Exception { BeanFactory beanFactory = mock(BeanFactory.class); when(beanFactory.containsBean(IntegrationContextUtils.TASK_SCHEDULER_BEAN_NAME)) .thenReturn(true); ThreadPoolTaskScheduler scheduler = new ThreadPoolTaskScheduler(); scheduler.initialize(); when(beanFactory.getBean(IntegrationContextUtils.TASK_SCHEDULER_BEAN_NAME, TaskScheduler.class)) .thenReturn(scheduler); final JmsOutboundGateway gateway = new JmsOutboundGateway(); gateway.setBeanFactory(beanFactory); gateway.setConnectionFactory(getConnectionFactory()); gateway.setRequestDestination(requestQueue4); gateway.setReplyDestinationName("reply4"); gateway.setUseReplyContainer(true); gateway.afterPropertiesSet(); gateway.start(); final AtomicReference<Object> reply = new AtomicReference<Object>(); final CountDownLatch latch1 = new CountDownLatch(1); final CountDownLatch latch2 = new CountDownLatch(1); Executors.newSingleThreadExecutor() .execute( () -> { latch1.countDown(); try { reply.set(gateway.handleRequestMessage(new GenericMessage<String>("foo"))); } finally { latch2.countDown(); } }); assertTrue(latch1.await(10, TimeUnit.SECONDS)); JmsTemplate template = new JmsTemplate(); template.setConnectionFactory(getConnectionFactory()); template.setReceiveTimeout(10000); javax.jms.Message request = template.receive(requestQueue4); assertNotNull(request); final javax.jms.Message jmsReply = request; template.send( request.getJMSReplyTo(), (MessageCreator) session -> { jmsReply.setJMSCorrelationID(jmsReply.getJMSMessageID()); return jmsReply; }); assertTrue(latch2.await(10, TimeUnit.SECONDS)); assertNotNull(reply.get()); gateway.stop(); scheduler.destroy(); }
public void nodeUP( final long uniqueEventID, String nodeID, Pair<TransportConfiguration, TransportConfiguration> connectorPair, boolean last) { if (connectorPair.getA() != null && !liveNode.contains(connectorPair.getA().getName())) { liveNode.add(connectorPair.getA().getName()); latch.countDown(); } if (connectorPair.getB() != null && !backupNode.contains(connectorPair.getB().getName())) { backupNode.add(connectorPair.getB().getName()); latch.countDown(); } }
@GET @Path("1") @ManagedAsync public void get1(final @Suspended AsyncResponse asyncResponse) throws IOException, InterruptedException { if (asyncResponse.cancel()) { callbackCalledSignal1.countDown(); } if (asyncResponse.cancel()) { callbackCalledSignal1.countDown(); } if (asyncResponse.cancel()) { callbackCalledSignal1.countDown(); } }