private void cancelFuture(final NamespaceImplData implDatum) { synchronized (implDatum.enabled) { final CountDownLatch latch = new CountDownLatch(1); final ListenableFuture<?> future = implDatum.future; Futures.addCallback( future, new FutureCallback<Object>() { @Override public void onSuccess(Object result) { latch.countDown(); } @Override public void onFailure(Throwable t) { // Expect CancellationException latch.countDown(); if (!(t instanceof CancellationException)) { log.error(t, "Error in namespace [%s]", implDatum.name); } } }); if (!future.isDone() && !future.cancel( true)) { // Interrupt to make sure we don't pollute stuff after we've already cleaned // up throw new ISE("Future for namespace [%s] was not able to be canceled", implDatum.name); } try { latch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } } }
private synchronized void scheduleNextRequest() { // stopped or done? TaskInfo taskInfo = HttpRemoteTask.this.taskInfo.get(); if (!running || taskInfo.getState().isDone()) { return; } // outstanding request? if (future != null && !future.isDone()) { // this should never happen log.error("Can not reschedule update because an update is already running"); return; } // if throttled due to error, asynchronously wait for timeout and try again ListenableFuture<?> errorRateLimit = getErrorTracker.acquireRequestPermit(); if (!errorRateLimit.isDone()) { errorRateLimit.addListener(this::scheduleNextRequest, executor); return; } Request request = prepareGet() .setUri(uriBuilderFrom(taskInfo.getSelf()).addParameter("summarize").build()) .setHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8.toString()) .setHeader(PrestoHeaders.PRESTO_CURRENT_STATE, taskInfo.getState().toString()) .setHeader(PrestoHeaders.PRESTO_MAX_WAIT, refreshMaxWait.toString()) .build(); getErrorTracker.startRequest(); future = httpClient.executeAsync(request, createFullJsonResponseHandler(taskInfoCodec)); Futures.addCallback( future, new SimpleHttpResponseHandler<>(this, request.getUri()), executor); }
@Test public void pingPong() throws Exception { connect(); Utils.rollMockClock(0); // No ping pong happened yet. assertEquals(Long.MAX_VALUE, peer.getLastPingTime()); assertEquals(Long.MAX_VALUE, peer.getPingTime()); ListenableFuture<Long> future = peer.ping(); assertEquals(Long.MAX_VALUE, peer.getLastPingTime()); assertEquals(Long.MAX_VALUE, peer.getPingTime()); assertFalse(future.isDone()); Ping pingMsg = (Ping) outbound(writeTarget); Utils.rollMockClock(5); // The pong is returned. inbound(writeTarget, new Pong(pingMsg.getNonce())); pingAndWait(writeTarget); assertTrue(future.isDone()); long elapsed = future.get(); assertTrue("" + elapsed, elapsed > 1000); assertEquals(elapsed, peer.getLastPingTime()); assertEquals(elapsed, peer.getPingTime()); // Do it again and make sure it affects the average. future = peer.ping(); pingMsg = (Ping) outbound(writeTarget); Utils.rollMockClock(50); inbound(writeTarget, new Pong(pingMsg.getNonce())); elapsed = future.get(); assertEquals(elapsed, peer.getLastPingTime()); assertEquals(7250, peer.getPingTime()); }
private static void send(PaymentSession session) { try { System.out.println("Payment Request"); System.out.println("Amount: " + session.getValue().doubleValue() / 100000 + "mDOGE"); System.out.println("Date: " + session.getDate()); System.out.println("Memo: " + session.getMemo()); if (session.pkiVerificationData != null) { System.out.println("Pki-Verified Name: " + session.pkiVerificationData.name); if (session.pkiVerificationData.orgName != null) System.out.println("Pki-Verified Org: " + session.pkiVerificationData.orgName); System.out.println( "PKI data verified by: " + session.pkiVerificationData.rootAuthorityName); } final Wallet.SendRequest req = session.getSendRequest(); if (password != null) { if (!wallet.checkPassword(password)) { System.err.println("Password is incorrect."); return; } req.aesKey = wallet.getKeyCrypter().deriveKey(password); } wallet.completeTx(req); // may throw InsufficientMoneyException. if (options.has("offline")) { wallet.commitTx(req.tx); return; } setup(); // No refund address specified, no user-specified memo field. ListenableFuture<PaymentSession.Ack> future = session.sendPayment(ImmutableList.of(req.tx), null, null); if (future == null) { // No payment_url for submission so, broadcast and wait. peers.startAndWait(); peers.broadcastTransaction(req.tx).get(); } else { PaymentSession.Ack ack = future.get(); wallet.commitTx(req.tx); System.out.println("Memo from server: " + ack.getMemo()); } } catch (PaymentRequestException e) { System.err.println("Failed to send payment " + e.getMessage()); System.exit(1); } catch (VerificationException e) { System.err.println("Failed to send payment " + e.getMessage()); System.exit(1); } catch (ExecutionException e) { System.err.println("Failed to send payment " + e.getMessage()); System.exit(1); } catch (IOException e) { System.err.println("Invalid payment " + e.getMessage()); System.exit(1); } catch (InterruptedException e1) { // Ignore. } catch (InsufficientMoneyException e) { System.err.println( "Insufficient funds: have " + Utils.bitcoinValueToFriendlyString(wallet.getBalance())); } catch (BlockStoreException e) { throw new RuntimeException(e); } }
@Test public void testBufferNotCloseOnFail() throws Exception { SqlTask sqlTask = createInitialTask(); updateTask( sqlTask, EMPTY_SOURCES, INITIAL_EMPTY_OUTPUT_BUFFERS.withBuffer("out", new UnpartitionedPagePartitionFunction())); ListenableFuture<BufferResult> bufferResult = sqlTask.getTaskResults("out", 0, new DataSize(1, MEGABYTE)); assertFalse(bufferResult.isDone()); TaskState taskState = sqlTask.getTaskInfo().getState(); sqlTask.failed(new Exception("test")); assertEquals( sqlTask.getTaskInfo(taskState).get(200, MILLISECONDS).getState(), TaskState.FAILED); // buffer will not be closed by fail event. event is async so wait a bit for event to fire try { assertTrue(bufferResult.get(200, MILLISECONDS).isBufferClosed()); fail("expected TimeoutException"); } catch (TimeoutException expected) { } assertFalse(sqlTask.getTaskResults("out", 0, new DataSize(1, MEGABYTE)).isDone()); }
@Override protected void doSubscribe(String eventName, final SettableFuture<?> future) { List<SettableFuture<?>> futures = new ArrayList<SettableFuture<?>>(); for (RedisConnection conn : connections) { SettableFuture<?> newFuture = SettableFuture.create(); conn.subscribe(eventName, newFuture); futures.add(newFuture); } final ListenableFuture<?> combined = Futures.allAsList(futures); combined.addListener( new Runnable() { @Override public void run() { try { combined.get(); future.set(null); } catch (InterruptedException e) { future.setException(e); } catch (ExecutionException e) { future.setException(e.getCause()); } } }, getExecutorService()); }
@Test public void testBufferCloseOnFinish() throws Exception { SqlTask sqlTask = createInitialTask(); OutputBuffers outputBuffers = INITIAL_EMPTY_OUTPUT_BUFFERS .withBuffer("out", new UnpartitionedPagePartitionFunction()) .withNoMoreBufferIds(); updateTask(sqlTask, EMPTY_SOURCES, outputBuffers); ListenableFuture<BufferResult> bufferResult = sqlTask.getTaskResults("out", 0, new DataSize(1, MEGABYTE)); assertFalse(bufferResult.isDone()); // finish the task by closing the sources (no splits will ever be added) updateTask( sqlTask, ImmutableList.of( new TaskSource(TABLE_SCAN_NODE_ID, ImmutableSet.<ScheduledSplit>of(), true)), outputBuffers); assertEquals(sqlTask.getTaskInfo().getState(), TaskState.FINISHED); // buffer will be closed by cancel event (wait for 500 MS for event to fire) assertTrue(bufferResult.get(200, MILLISECONDS).isBufferClosed()); assertEquals(sqlTask.getTaskInfo().getOutputBuffers().getState(), BufferState.FINISHED); // verify the buffer is closed bufferResult = sqlTask.getTaskResults("out", 0, new DataSize(1, MEGABYTE)); assertTrue(bufferResult.isDone()); assertTrue(bufferResult.get().isBufferClosed()); }
@Test public void fourPeers() throws Exception { InboundMessageQueuer[] channels = { connectPeer(1), connectPeer(2), connectPeer(3), connectPeer(4) }; Transaction tx = new Transaction(params); TransactionBroadcast broadcast = new TransactionBroadcast(peerGroup, tx); ListenableFuture<Transaction> future = broadcast.broadcast(); assertFalse(future.isDone()); // We expect two peers to receive a tx message, and at least one of the others must announce for // the future to // complete successfully. Message[] messages = { (Message) outbound(channels[0]), (Message) outbound(channels[1]), (Message) outbound(channels[2]), (Message) outbound(channels[3]) }; // 0 and 3 are randomly selected to receive the broadcast. assertEquals(tx, messages[0]); assertEquals(tx, messages[3]); assertNull(messages[1]); assertNull(messages[2]); Threading.waitForUserCode(); assertFalse(future.isDone()); inbound(channels[1], InventoryMessage.with(tx)); pingAndWait(channels[1]); Threading.waitForUserCode(); assertTrue(future.isDone()); }
/** * Sends a {@link Message} to the remote service. Returns a future that will be completed when the * message has been processed. * * @param message The message to send. * @param result Object to set into the future when message is being processed. * @param <V> Type of the result. * @return A {@link ListenableFuture} that will be completed when the message has been processed. */ protected final synchronized <V> ListenableFuture<V> sendMessage(Message message, V result) { if (!isRunning()) { return Futures.immediateFailedFuture( new IllegalStateException("Cannot send message to non-running application")); } final ListenableFuture<V> messageFuture = ZKMessages.sendMessage(zkClient, getMessagePrefix(), message, result); messageFutures.add(messageFuture); messageFuture.addListener( new Runnable() { @Override public void run() { // If the completion is triggered when stopping, do nothing. if (state() == State.STOPPING) { return; } synchronized (AbstractZKServiceController.this) { messageFutures.remove(messageFuture); } } }, Threads.SAME_THREAD_EXECUTOR); return messageFuture; }
@Override public ListenableFuture<?> isBlocked() { ListenableFuture<?> blocked = exchangeClient.isBlocked(); if (blocked.isDone()) { return NOT_BLOCKED; } return blocked; }
@Override public ListenableFuture<?> isBlocked() { ListenableFuture<?> blocked = inMemoryExchange.waitForWriting(); if (blocked.isDone()) { return NOT_BLOCKED; } return blocked; }
private void copyToS3(String fileName) { String bucketName = (String) properties.get(BUCKET_PROPNAME); String accessId = (String) properties.get(ACCESS_ID_PROPNAME); String secretKey = (String) properties.get(SECRET_KEY_PROPNAME); Properties overrides = new Properties(); overrides.setProperty("s3" + ".identity", accessId); overrides.setProperty("s3" + ".credential", secretKey); final Iterable<? extends Module> MODULES = ImmutableSet.of( new JavaUrlHttpCommandExecutorServiceModule(), new Log4JLoggingModule(), new NettyPayloadModule()); BlobStoreContext context = ContextBuilder.newBuilder("s3") .credentials(accessId, secretKey) .modules(MODULES) .overrides(overrides) .buildView(BlobStoreContext.class); // Create Container (the bucket in s3) try { AsyncBlobStore blobStore = context.getAsyncBlobStore(); // it can be changed to sync // BlobStore (returns false if it already exists) ListenableFuture<Boolean> container = blobStore.createContainerInLocation(null, bucketName); if (container.get()) { LOG.info("Created bucket " + bucketName); } } catch (Exception ex) { logger.error("Could not start binary service: {}", ex.getMessage()); throw new RuntimeException(ex); } try { File file = new File(fileName); AsyncBlobStore blobStore = context.getAsyncBlobStore(); BlobBuilder blobBuilder = blobStore .blobBuilder(file.getName()) .payload(file) .calculateMD5() .contentType("text/plain") .contentLength(file.length()); Blob blob = blobBuilder.build(); ListenableFuture<String> futureETag = blobStore.putBlob(bucketName, blob, PutOptions.Builder.multipart()); LOG.info("Uploaded file etag=" + futureETag.get()); } catch (Exception e) { LOG.error("Error uploading to blob store", e); } }
private static void sendPaymentRequest(String location, boolean verifyPki) { if (location.startsWith("http") || location.startsWith("defcoin")) { try { ListenableFuture<PaymentSession> future; if (location.startsWith("http")) { future = PaymentSession.createFromUrl(location, verifyPki); } else { BitcoinURI paymentRequestURI = new BitcoinURI(location); future = PaymentSession.createFromBitcoinUri(paymentRequestURI, verifyPki); } PaymentSession session = future.get(); if (session != null) { send(session); } else { System.err.println("Server returned null session"); System.exit(1); } } catch (PaymentRequestException e) { System.err.println("Error creating payment session " + e.getMessage()); System.exit(1); } catch (BitcoinURIParseException e) { System.err.println("Invalid defcoin uri: " + e.getMessage()); System.exit(1); } catch (InterruptedException e) { // Ignore. } catch (ExecutionException e) { throw new RuntimeException(e); } } else { // Try to open the payment request as a file. FileInputStream stream = null; try { File paymentRequestFile = new File(location); stream = new FileInputStream(paymentRequestFile); } catch (Exception e) { System.err.println("Failed to open file: " + e.getMessage()); System.exit(1); } try { paymentRequest = org.bitcoin.protocols.payments.Protos.PaymentRequest.newBuilder() .mergeFrom(stream) .build(); } catch (IOException e) { System.err.println("Failed to parse payment request from file " + e.getMessage()); System.exit(1); } PaymentSession session = null; try { session = new PaymentSession(paymentRequest, verifyPki); } catch (PaymentRequestException e) { System.err.println("Error creating payment session " + e.getMessage()); System.exit(1); } send(session); } }
@Override public boolean cancel(final boolean mayInterruptIfRunning) { // stop the last future final ListenableFuture<V> future = lastFuture.get(); if (future != null) { future.cancel(true); } return super.cancel(mayInterruptIfRunning); }
private synchronized void scheduleUpdate() { // don't update if the task hasn't been started yet or if it is already finished if (!needsUpdate.get() || taskInfo.get().getState().isDone()) { return; } // if we have an old request outstanding, cancel it if (currentRequest != null && Duration.nanosSince(currentRequestStartNanos).compareTo(new Duration(2, SECONDS)) >= 0) { needsUpdate.set(true); currentRequest.cancel(true); currentRequest = null; currentRequestStartNanos = 0; } // if there is a request already running, wait for it to complete if (this.currentRequest != null && !this.currentRequest.isDone()) { return; } // if throttled due to error, asynchronously wait for timeout and try again ListenableFuture<?> errorRateLimit = updateErrorTracker.acquireRequestPermit(); if (!errorRateLimit.isDone()) { errorRateLimit.addListener(this::scheduleUpdate, executor); return; } List<TaskSource> sources = getSources(); TaskUpdateRequest updateRequest = new TaskUpdateRequest( session.toSessionRepresentation(), planFragment, sources, outputBuffers.get()); Request request = preparePost() .setUri(uriBuilderFrom(taskInfo.get().getSelf()).addParameter("summarize").build()) .setHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8.toString()) .setBodyGenerator(jsonBodyGenerator(taskUpdateRequestCodec, updateRequest)) .build(); updateErrorTracker.startRequest(); ListenableFuture<JsonResponse<TaskInfo>> future = httpClient.executeAsync(request, createFullJsonResponseHandler(taskInfoCodec)); currentRequest = future; currentRequestStartNanos = System.nanoTime(); // The needsUpdate flag needs to be set to false BEFORE adding the Future callback since // callback might change the flag value // and does so without grabbing the instance lock. needsUpdate.set(false); Futures.addCallback( future, new SimpleHttpResponseHandler<>(new UpdateResponseHandler(sources), request.getUri()), executor); }
@Test public void testMasterAwareExecution() throws Exception { Settings settings = settingsBuilder().put("discovery.type", "local").build(); ListenableFuture<String> master = internalCluster().startNodeAsync(settings); ListenableFuture<String> nonMaster = internalCluster() .startNodeAsync(settingsBuilder().put(settings).put("node.master", false).build()); master.get(); ensureGreen(); // make sure we have a cluster ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nonMaster.get()); final boolean[] taskFailed = {false}; final CountDownLatch latch1 = new CountDownLatch(1); clusterService.submitStateUpdateTask( "test", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { latch1.countDown(); return currentState; } @Override public void onFailure(String source, Throwable t) { taskFailed[0] = true; latch1.countDown(); } }); latch1.await(); assertTrue("cluster state update task was executed on a non-master", taskFailed[0]); taskFailed[0] = true; final CountDownLatch latch2 = new CountDownLatch(1); clusterService.submitStateUpdateTask( "test", new ClusterStateNonMasterUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { taskFailed[0] = false; latch2.countDown(); return currentState; } @Override public void onFailure(String source, Throwable t) { taskFailed[0] = true; latch2.countDown(); } }); latch2.await(); assertFalse("non-master cluster state update task was not executed", taskFailed[0]); }
@Nullable @Override public BuildResult getBuildRuleResult(BuildTarget buildTarget) throws ExecutionException, InterruptedException { ListenableFuture<BuildResult> result = results.get(buildTarget); if (result == null) { return null; } return result.get(); }
@Override public ListenableFuture<ServiceFilterResponse> handleRequest( final ServiceFilterRequest request, final NextServiceFilterCallback nextServiceFilterCallback) { // In this example, if authentication is already in progress we block the request // until authentication is complete to avoid unnecessary authentications as // a result of HTTP status code 401. // If authentication was detected, add the token to the request. waitAndUpdateRequestToken(request); // Send the request down the filter chain // retrying up to 5 times on 401 response codes. ListenableFuture<ServiceFilterResponse> future = null; ServiceFilterResponse response = null; int responseCode = 401; for (int i = 0; (i < 5) && (responseCode == 401); i++) { future = nextServiceFilterCallback.onNext(request); try { response = future.get(); responseCode = response.getStatus().getStatusCode(); } catch (InterruptedException e) { e.printStackTrace(); } catch (ExecutionException e) { if (e.getCause().getClass() == MobileServiceException.class) { MobileServiceException mEx = (MobileServiceException) e.getCause(); responseCode = mEx.getResponse().getStatus().getStatusCode(); if (responseCode == 401) { // Two simultaneous requests from independent threads could get HTTP status 401. // Protecting against that right here so multiple authentication requests are // not setup to run on the UI thread. // We only want to authenticate once. Requests should just wait and retry // with the new token. if (mAtomicAuthenticatingFlag.compareAndSet(false, true)) { // Authenticate on UI thread runOnUiThread( new Runnable() { @Override public void run() { // Force a token refresh during authentication. authenticate(true); } }); } // Wait for authentication to complete then update the token in the request. waitAndUpdateRequestToken(request); mAtomicAuthenticatingFlag.set(false); } } } } return future; }
public void shutdown(int exitStatusCode) { try { if (_systemConfig != null) { ListenableFuture<Void> closeResult = _systemConfig.closeAsync(); closeResult.get(30000l, TimeUnit.MILLISECONDS); } } catch (TimeoutException | InterruptedException | ExecutionException e) { LOGGER.warn("Attempting to cleanly shutdown took too long, exiting immediately"); } finally { cleanUp(exitStatusCode); } }
@Override public JsonObject visit(UpdateOperation operation) throws Throwable { MobileServiceJsonTable table = this.getRemoteTable(operation.getTableName()); table.setSystemProperties(getSystemProperties(this.mItem)); ListenableFuture<JsonObject> future = table.update(this.mItem); try { return future.get(); } catch (ExecutionException ex) { throw ex.getCause(); } }
@Override public void run() { int numSourcesDrained = m_drainedSources.incrementAndGet(); exportLog.info( "Drained source in generation " + m_timestamp + " with " + numSourcesDrained + " of " + m_numSources + " drained"); if (numSourcesDrained == m_numSources) { if (m_partitionLeaderZKName.isEmpty()) { m_onAllSourcesDrained.run(); } else { ListenableFuture<?> removeLeadership = m_childUpdatingThread.submit( new Runnable() { @Override public void run() { for (Map.Entry<Integer, String> entry : m_partitionLeaderZKName.entrySet()) { m_zk.delete( m_leadersZKPath + "/" + entry.getKey() + "/" + entry.getValue(), -1, new AsyncCallback.VoidCallback() { @Override public void processResult(int rc, String path, Object ctx) { KeeperException.Code code = KeeperException.Code.get(rc); if (code != KeeperException.Code.OK) { VoltDB.crashLocalVoltDB( "Error in export leader election giving up leadership of " + path, true, KeeperException.create(code)); } } }, null); } } }, null); removeLeadership.addListener( m_onAllSourcesDrained, MoreExecutors.sameThreadExecutor()); } ; } }
/** * Creates a zip file of the metadata and recorded artifacts and stores it in the artifact cache. */ public void performUploadToArtifactCache( ImmutableSet<RuleKey> ruleKeys, ArtifactCache artifactCache, BuckEventBus eventBus) throws InterruptedException { // Skip all of this if caching is disabled. Although artifactCache.store() will be a noop, // building up the zip is wasted I/O. if (!artifactCache.isStoreSupported()) { return; } ArtifactCompressionEvent.Started started = ArtifactCompressionEvent.started(ArtifactCompressionEvent.Operation.COMPRESS, ruleKeys); eventBus.post(started); final Path zip; ImmutableSet<Path> pathsToIncludeInZip = ImmutableSet.of(); ImmutableMap<String, String> buildMetadata; try { pathsToIncludeInZip = getRecordedDirsAndFiles(); zip = Files.createTempFile( "buck_artifact_" + MoreFiles.sanitize(buildTarget.getShortName()), ".zip"); buildMetadata = getBuildMetadata(); projectFilesystem.createZip(pathsToIncludeInZip, zip, ImmutableMap.<Path, String>of()); } catch (IOException e) { eventBus.post( ConsoleEvent.info( "Failed to create zip for %s containing:\n%s", buildTarget, Joiner.on('\n').join(ImmutableSortedSet.copyOf(pathsToIncludeInZip)))); e.printStackTrace(); return; } finally { eventBus.post(ArtifactCompressionEvent.finished(started)); } // Store the artifact, including any additional metadata. ListenableFuture<Void> storeFuture = artifactCache.store(ruleKeys, buildMetadata, BorrowablePath.notBorrowablePath(zip)); storeFuture.addListener( new Runnable() { @Override public void run() { try { Files.deleteIfExists(zip); } catch (IOException e) { throw new RuntimeException(e); } } }, directExecutor()); }
public static void main(String[] args) { ExecutorService executorService = Executors.newFixedThreadPool(1); ListeningExecutorService executor = MoreExecutors.listeningDecorator(executorService); ListenableFuture<String> future = executor.submit( () -> { Thread.sleep(1000); return "Task completed"; }); future.addListener(() -> runOnCompletion(), executor); }
@Test public void testAsyncOutOfOrder() throws Exception { ListenableFuture<String> getFuture; ListenableFuture<Void> putFuture; try (DelayedMap.AsyncClient client = createClient(DelayedMap.AsyncClient.class, syncServer).get()) { getFuture = client.getValueSlowly(500, TimeUnit.MILLISECONDS, "testKey"); putFuture = client.putValueSlowly(250, TimeUnit.MILLISECONDS, "testKey", "testValue"); assertEquals(getFuture.get(1, TimeUnit.SECONDS), "testValue"); putFuture.get(1, TimeUnit.SECONDS); } }
@Override public JsonObject visit(InsertOperation operation) throws Throwable { MobileServiceJsonTable table = this.getRemoteTable(operation.getTableName()); table.setSystemProperties(EnumSet.allOf(MobileServiceSystemProperty.class)); JsonObject item = removeSystemProperties(this.mItem); ListenableFuture<JsonObject> future = table.insert(item); try { return future.get(); } catch (ExecutionException ex) { throw ex.getCause(); } }
@Override protected final synchronized void shutDown() { if (stopMessageFuture == null) { stopMessageFuture = ZKMessages.sendMessage( zkClient, getMessagePrefix(), SystemMessages.stopApplication(), State.TERMINATED); } // Cancel all pending message futures. for (ListenableFuture<?> future : messageFutures) { future.cancel(true); } doShutDown(); }
@Override public ListenableFuture<?> isBlocked() { if (blocked != NOT_BLOCKED && blocked.isDone()) { blocked = NOT_BLOCKED; } return blocked; }
// Call a method that will sleep for longer than the channel timeout, and expect a // TimeoutException (wrapped in a TTransportException) @Test public void testAsyncTimeout() throws Exception { try (DelayedMap.AsyncClient client = createClient(DelayedMap.AsyncClient.class, syncServer).get()) { ListenableFuture<String> getFuture = client.getValueSlowly(1500, TimeUnit.MILLISECONDS, "testKey"); try { getFuture.get(2000, TimeUnit.MILLISECONDS); fail("Call did not timeout as expected"); } catch (java.util.concurrent.TimeoutException e) { fail("Waited too long for channel timeout"); } catch (ExecutionException e) { checkTransportException(e.getCause(), ReadTimeoutException.class); } } }
private ClientResponse extractWebSocketUriFromRpc(final String methodName) throws ExecutionException, InterruptedException, UnsupportedEncodingException { ListenableFuture<ClientResponse> clientFuture = restconfClient.get( ResourceUri.STREAM.getPath() + "/" + encodeUri(this.streamInfo.getIdentifier()), MediaType.APPLICATION_XML, new Function<ClientResponse, ClientResponse>() { @Override public ClientResponse apply(final ClientResponse clientResponse) { return clientResponse; } }); return clientFuture.get(); }
protected synchronized <T> ListenableFuture<T> register(ListenableFuture<T> future) { if (state.get() == State.CANCELED) { future.cancel(true); } else { asyncTasks.add(future); } return future; }