Esempio n. 1
0
  @Test
  public void testNotificationExecutor() throws Exception {
    ListeningExecutorService executor = SingletonHolder.getDefaultNotificationExecutor();
    ThreadPoolExecutor tpExecutor =
        (ThreadPoolExecutor)
            setAccessible(executor.getClass().getDeclaredField("delegate")).get(executor);
    BlockingQueue<Runnable> queue = tpExecutor.getQueue();

    for (int idx = 0; idx < 100; idx++) {
      final int idx2 = idx;
      logger.info("Adding {}\t{}\t{}", idx, queue.size(), tpExecutor.getActiveCount());
      executor.execute(
          new Runnable() {

            @Override
            public void run() {
              logger.info("in  {}", idx2);
              try {
                Thread.sleep(1000);
              } catch (InterruptedException e) {
                e.printStackTrace();
              }
              logger.info("out {}", idx2);
            }
          });
    }
    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.SECONDS);
  }
Esempio n. 2
0
 public void shutDown(boolean awaitTermination) {
   if (!isShutdown.getAndSet(true)) {
     if (awaitTermination) {
       if (isDebugEnabled) {
         LOG.debug(
             "awaitTermination: "
                 + awaitTermination
                 + " shutting down task executor"
                 + " service gracefully");
       }
       shutdownExecutor(waitQueueExecutorService);
       shutdownExecutor(executorService);
       shutdownExecutor(executionCompletionExecutorService);
     } else {
       if (isDebugEnabled) {
         LOG.debug(
             "awaitTermination: "
                 + awaitTermination
                 + " shutting down task executor"
                 + " service immediately");
       }
       executorService.shutdownNow();
       waitQueueExecutorService.shutdownNow();
       executionCompletionExecutorService.shutdownNow();
     }
   }
 }
Esempio n. 3
0
  @Nonnull
  @VisibleForTesting
  public Map<Path, String> parsePackageStrings(
      @Nonnull List<Path> absolutePaths, @Nonnull List<Path> executionPaths) throws Exception {

    ListeningExecutorService executorService =
        MoreExecutors.listeningDecorator(
            Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()));

    Map<Path, ListenableFuture<String>> futures = Maps.newHashMap();
    for (int i = 0; i < absolutePaths.size(); i++) {
      final Path source = executionPaths.get(i);
      futures.put(
          absolutePaths.get(i),
          executorService.submit(
              new Callable<String>() {
                @Override
                public String call() throws Exception {
                  return getDeclaredPackageOfJavaFile(source);
                }
              }));
    }
    Map<Path, String> map = Maps.newHashMap();
    for (Entry<Path, ListenableFuture<String>> entry : futures.entrySet()) {
      String value = entry.getValue().get();
      if (value != null) {
        map.put(entry.getKey(), value);
      }
    }
    return map;
  }
  public void collectQueueStats(
      final Azure azure,
      final String namespaceName,
      Set<String> queueNames,
      Set<String> queueStats,
      int queueThreads)
      throws TaskExecutionException {

    final Map<String, String> valueMap = createValueMap(azure, namespaceName, QUEUES, queueStats);

    ListeningExecutorService queueService =
        MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(queueThreads));
    final CountDownLatch countDownLatch = new CountDownLatch(queueNames.size());

    try {
      for (final String queueName : queueNames) {
        try {
          ListenableFuture getQueueNames =
              queueService.submit(
                  new Runnable() {
                    public void run() {
                      getStatsFromAzure(azure, namespaceName, valueMap, queueName, QUEUES);
                    }
                  });

          Futures.addCallback(
              getQueueNames,
              new FutureCallback<Void>() {
                public void onSuccess(Void nothing) {
                  countDownLatch.countDown();
                }

                public void onFailure(Throwable thrown) {
                  countDownLatch.countDown();
                  logger.error(
                      "Unable to get stats for queue ["
                          + queueName
                          + "] in namespace ["
                          + namespaceName
                          + "]",
                      thrown);
                }
              });

        } catch (Exception e) {
          logger.error(
              "Error getting stats for queue [" + namespaceName + "/" + queueName + "]", e);
          throw new TaskExecutionException(
              "Error getting stats for queue [" + namespaceName + "/" + queueName + "]", e);
        }
      }
    } finally {
      queueService.shutdown();
    }
    try {
      countDownLatch.await();
    } catch (InterruptedException e) {
      logger.error("Unable to wait till getting the queue stats", e);
    }
  }
Esempio n. 5
0
  public void run() throws IOException {
    Preconditions.checkState(inputManager != null, "InputManager must be configured");

    ListenableFuture<Void> runShuffleFuture = schedulerExecutor.submit(schedulerCallable);
    Futures.addCallback(runShuffleFuture, new SchedulerFutureCallback());
    // Shutdown this executor once this task, and the callback complete.
    schedulerExecutor.shutdown();
  }
  private void migrate(Set<Integer> scheduleIds, PreparedStatement query, final Bucket bucket) {
    log.info("Migrating " + bucket + " data for " + scheduleIds.size() + " schedules");

    CountDownLatch latch = new CountDownLatch(scheduleIds.size());
    MigrationProgressLogger progressLogger = new MigrationProgressLogger(bucket, latch);
    File logFile = new File(dataDir, bucket + "_migration.log");
    MigrationLog migrationLog = null;
    try {
      migrationLog = new MigrationLog(logFile);
      Set<Integer> migratedScheduleIds = migrationLog.read();
      threadPool.submit(progressLogger);
      for (Integer scheduleId : scheduleIds) {
        if (migratedScheduleIds.contains(scheduleId)) {
          log.debug(
              bucket
                  + " data for schedule id "
                  + scheduleId
                  + " has already been migrated. It will "
                  + "be skipped.");
          latch.countDown();
        } else {
          readPermits.acquire();
          ResultSet resultSet = session.execute(query.bind(scheduleId));
          ListenableFuture<Integer> migrationFuture =
              threadPool.submit(new MetricsWriter(scheduleId, bucket, resultSet));
          Futures.addCallback(
              migrationFuture, migrationFinished(scheduleId, bucket, latch, migrationLog));
        }
      }
      latch.await();
      log.info("Finished migrating " + bucket + " data");
    } catch (InterruptedException e) {
      threadPool.shutdownNow();
      throw new RuntimeException(
          "Migration of "
              + bucket
              + " data did not complete due to an interrupt. The "
              + "upgrade will have to be run again to finish the migration",
          e);
    } catch (IOException e) {
      throw new RuntimeException(
          "Migration of "
              + bucket
              + " data did not complete due to an I/O error. The "
              + "upgrade will have to be run again to finish the migration",
          e);
    } finally {
      progressLogger.finished();
      try {
        migrationLog.close();
      } catch (IOException e) {
        log.warn("There was an error closing " + logFile.getAbsolutePath(), e);
      }
    }
  }
 @Override
 public void close() {
   source.close();
   cachedAuths.invalidateAll();
   executorService.shutdown();
   try {
     executorService.awaitTermination(5, TimeUnit.SECONDS);
   } catch (InterruptedException e) {
     ConsoleLogger.writeStackTrace(e);
   }
 }
Esempio n. 8
0
  private void runCallables(Set<Target> unfinished) {
    Set<PipelineCallable<?>> oldCallables = activePipelineCallables;
    activePipelineCallables = Sets.newHashSet();
    List<PipelineCallable<?>> callablesToRun = Lists.newArrayList();
    List<PipelineCallable<?>> failedCallables = Lists.newArrayList();
    for (PipelineCallable<?> pipelineCallable : oldCallables) {
      if (Sets.intersection(allPipelineCallables.get(pipelineCallable), unfinished).isEmpty()) {
        if (pipelineCallable.runSingleThreaded()) {
          try {
            if (pipelineCallable.call() != PipelineCallable.Status.SUCCESS) {
              failedCallables.add(pipelineCallable);
            }
          } catch (Throwable t) {
            pipelineCallable.setMessage(t.getLocalizedMessage());
            failedCallables.add(pipelineCallable);
          }
        } else {
          callablesToRun.add(pipelineCallable);
        }
      } else {
        // Still need to run this one
        activePipelineCallables.add(pipelineCallable);
      }
    }

    ListeningExecutorService es = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
    try {
      List<Future<PipelineCallable.Status>> res = es.invokeAll(callablesToRun);
      for (int i = 0; i < res.size(); i++) {
        if (res.get(i).get() != PipelineCallable.Status.SUCCESS) {
          failedCallables.add((PipelineCallable) callablesToRun.get(i));
        }
      }
    } catch (Throwable t) {
      t.printStackTrace();
      failedCallables.addAll((List) callablesToRun);
    } finally {
      es.shutdownNow();
    }

    if (!failedCallables.isEmpty()) {
      LOG.error("{} callable failure(s) occurred:", failedCallables.size());
      for (PipelineCallable<?> c : failedCallables) {
        LOG.error("{} : {}", c.getName(), c.getMessage());
      }
      status.set(Status.FAILED);
      set(PipelineResult.EMPTY);
      doneSignal.countDown();
    }
  }
  public static void main(String[] args) {

    ExecutorService executorService = Executors.newFixedThreadPool(1);
    ListeningExecutorService executor = MoreExecutors.listeningDecorator(executorService);

    ListenableFuture<String> future =
        executor.submit(
            () -> {
              Thread.sleep(1000);
              return "Task completed";
            });

    future.addListener(() -> runOnCompletion(), executor);
  }
Esempio n. 10
0
 @Override
 public boolean shutdown(Thread mainSiteThread) throws InterruptedException {
   if (m_faultDistributor != null) {
     m_faultDistributor.shutDown();
   }
   VoltDB.wasCrashCalled = false;
   VoltDB.crashMessage = null;
   m_snapshotCompletionMonitor.shutdown();
   m_es.shutdown();
   m_es.awaitTermination(1, TimeUnit.DAYS);
   m_statsAgent.shutdown();
   m_hostMessenger.shutdown();
   return true;
 }
Esempio n. 11
0
 @Override
 public void process(final WatchedEvent event) {
   try {
     if (!m_shutdown.get()) {
       m_es.submit(new ChildEvent(event));
     }
   } catch (RejectedExecutionException e) {
     if (m_es.isShutdown()) {
       return;
     } else {
       org.voltdb.VoltDB.crashLocalVoltDB(
           "Unexpected rejected execution exception", false, e);
     }
   }
 }
  @Override
  public Iterable<Provisionable> listImages() {
    // fetch images..
    ListenableFuture<List<Image>> images =
        executorService.submit(
            new Callable<List<Image>>() {

              @Override
              public List<Image> call() throws Exception {
                logger.trace("<< fetching images..");
                // Filter HDD types only, since JClouds doesn't have a concept of "CD-ROM" anyway
                Iterable<Image> filteredImages =
                    Iterables.filter(
                        api.imageApi().getList(new DepthOptions().depth(1)),
                        new Predicate<Image>() {

                          @Override
                          public boolean apply(Image image) {
                            return image.properties().imageType() == Image.Type.HDD;
                          }
                        });
                logger.trace(">> images fetched.");

                return ImmutableList.copyOf(filteredImages);
              }
            });
    // and snapshots at the same time
    ListenableFuture<List<Snapshot>> snapshots =
        executorService.submit(
            new Callable<List<Snapshot>>() {

              @Override
              public List<Snapshot> call() throws Exception {
                logger.trace("<< fetching snapshots");
                List<Snapshot> remoteSnapshots =
                    api.snapshotApi().list(new DepthOptions().depth(1));
                logger.trace(">> snapshots feched.");

                return remoteSnapshots;
              }
            });

    ImmutableList.Builder<Provisionable> provisionables = ImmutableList.builder();
    provisionables.addAll(getUnchecked(images));
    provisionables.addAll(getUnchecked(snapshots));

    return provisionables.build();
  }
Esempio n. 13
0
  private ListenableFuture<MacAddress> waitForMacAddress(final Ipv4Address gatewayIp) {

    return arpWatcherWall.submit(
        new Callable<MacAddress>() {

          @Override
          public MacAddress call() throws Exception {
            for (int cycle = 0; cycle < WAIT_CYCLES; cycle++) {
              // Sleep before checking mac address, so meanwhile ARP request packets
              // will be broadcasted on the bridge.
              Thread.sleep(PER_CYCLE_WAIT_DURATION);
              ArpResolverMetadata arpResolverMetadata = gatewayToArpMetadataMap.get(gatewayIp);
              if (arpResolverMetadata != null
                  && arpResolverMetadata.getGatewayMacAddress() != null) {
                if (!arpResolverMetadata.isPeriodicRefresh()) {
                  resetFlowToRemove(gatewayIp, arpResolverMetadata);
                  return gatewayToArpMetadataMap.remove(gatewayIp).getGatewayMacAddress();
                }
                return arpResolverMetadata.getGatewayMacAddress();
              }
            }
            return null;
          }
        });
  }
Esempio n. 14
0
  private void trySchedule(final TaskWrapper taskWrapper) throws RejectedExecutionException {

    synchronized (lock) {
      boolean canFinish = taskWrapper.getTaskRunnerCallable().canFinish();
      LOG.info("Attempting to execute {}", taskWrapper);
      ListenableFuture<TaskRunner2Result> future =
          executorService.submit(taskWrapper.getTaskRunnerCallable());
      taskWrapper.setIsInWaitQueue(false);
      FutureCallback<TaskRunner2Result> wrappedCallback =
          createInternalCompletionListener(taskWrapper);
      // Callback on a separate thread so that when a task completes, the thread in the main queue
      // is actually available for execution and will not potentially result in a RejectedExecution
      Futures.addCallback(future, wrappedCallback, executionCompletionExecutorService);

      if (isDebugEnabled) {
        LOG.debug(
            "{} scheduled for execution. canFinish={}", taskWrapper.getRequestId(), canFinish);
      }

      // only tasks that cannot finish immediately are pre-emptable. In other words, if all inputs
      // to the tasks are not ready yet, the task is eligible for pre-emptable.
      if (enablePreemption) {
        if (!canFinish) {
          if (isInfoEnabled) {
            LOG.info(
                "{} is not finishable. Adding it to pre-emption queue", taskWrapper.getRequestId());
          }
          addToPreemptionQueue(taskWrapper);
        }
      }
    }
    numSlotsAvailable.decrementAndGet();
  }
Esempio n. 15
0
  @VisibleForTesting
  protected ListenableFuture<PaymentProtocol.Ack> sendPayment(
      final URL url, final Protos.Payment payment) {
    return executor.submit(
        new Callable<PaymentProtocol.Ack>() {
          @Override
          public PaymentProtocol.Ack call() throws Exception {
            HttpURLConnection connection = (HttpURLConnection) url.openConnection();
            connection.setRequestMethod("POST");
            connection.setRequestProperty("Content-Type", PaymentProtocol.MIMETYPE_PAYMENT);
            connection.setRequestProperty("Accept", PaymentProtocol.MIMETYPE_PAYMENTACK);
            connection.setRequestProperty(
                "Content-Length", Integer.toString(payment.getSerializedSize()));
            connection.setUseCaches(false);
            connection.setDoInput(true);
            connection.setDoOutput(true);

            // Send request.
            DataOutputStream outStream = new DataOutputStream(connection.getOutputStream());
            payment.writeTo(outStream);
            outStream.flush();
            outStream.close();

            // Get response.
            Protos.PaymentACK paymentAck = Protos.PaymentACK.parseFrom(connection.getInputStream());
            return PaymentProtocol.parsePaymentAck(paymentAck);
          }
        });
  }
  @Override
  public ListenableFuture<RpcResult<Void>> startListening() {

    ClientResponse response = null;
    try {
      response = extractWebSocketUriFromRpc(this.streamInfo.getIdentifier());
    } catch (ExecutionException e) {
      logger.trace("Execution exception while extracting stream name {}", e);
      throw new IllegalStateException(e);
    } catch (InterruptedException e) {
      logger.trace("InterruptedException while extracting stream name {}", e);
      throw new IllegalStateException(e);
    } catch (UnsupportedEncodingException e) {
      logger.trace("UnsupportedEncodingException while extracting stream name {}", e);
      throw new IllegalStateException(e);
    }
    boolean success = true;
    if (response.getStatus() != STATUS_OK) {
      success = false;
    }

    final RestRpcResult rpcResult = new RestRpcResult(success, response.getLocation());
    createWebsocketClient(response.getLocation());

    ListenableFuture<RpcResult<Void>> future =
        pool.submit(
            new Callable<RpcResult<Void>>() {
              @Override
              public RpcResult<Void> call() {
                return rpcResult;
              }
            });

    return future;
  }
Esempio n. 17
0
  @Override
  public void read(
      final PentoQuery query,
      final Distribution distribution,
      final PentoCallback handler,
      final OperationContext operationContext) {
    List<PentoStoreWorker> workers = readWorkerFactory.getWorkers(operationContext, distribution);
    for (PentoStoreWorker worker : workers) {
      Callable callable = worker.execute(query);
      ListenableFuture future = ioExecutor.submit(callable);

      Futures.addCallback(
          future,
          new FutureCallback() {
            public void onSuccess(Object response) {
              handler.callback(response);
            }

            public void onFailure(Throwable thrown) {
              logger.error(thrown.getMessage());
              handler.error(thrown);
            }
          });
    }
  }
Esempio n. 18
0
  @VisibleForTesting
  protected ListenableFuture<Ack> sendPayment(final URL url, final Protos.Payment payment) {
    return executor.submit(
        new Callable<Ack>() {
          @Override
          public Ack call() throws Exception {
            HttpURLConnection connection = (HttpURLConnection) url.openConnection();
            connection.setRequestMethod("POST");
            connection.setRequestProperty("Content-Type", "application/bitcoin-payment");
            connection.setRequestProperty("Accept", "application/bitcoin-paymentack");
            connection.setRequestProperty(
                "Content-Length", Integer.toString(payment.getSerializedSize()));
            connection.setUseCaches(false);
            connection.setDoInput(true);
            connection.setDoOutput(true);

            // Send request.
            DataOutputStream outStream = new DataOutputStream(connection.getOutputStream());
            payment.writeTo(outStream);
            outStream.flush();
            outStream.close();

            // Get response.
            InputStream inStream = connection.getInputStream();
            Protos.PaymentACK.Builder paymentAckBuilder =
                Protos.PaymentACK.newBuilder().mergeFrom(inStream);
            Protos.PaymentACK paymentAck = paymentAckBuilder.build();
            String memo = null;
            if (paymentAck.hasMemo()) memo = paymentAck.getMemo();
            return new Ack(memo);
          }
        });
  }
  @After
  public void tearDown() {
    reporter.stop();
    reporter.report();

    executor.shutdownNow();
  }
Esempio n. 20
0
  private List<CassandraPartition> getCassandraPartitions(
      CassandraTable table, TupleDomain<ColumnHandle> tupleDomain) {
    if (tupleDomain.isNone()) {
      return ImmutableList.of();
    }

    Set<List<Comparable<?>>> partitionKeysSet = getPartitionKeysSet(table, tupleDomain);

    // empty filter means, all partitions
    if (partitionKeysSet.isEmpty()) {
      return schemaProvider.getAllPartitions(table);
    }

    ImmutableList.Builder<ListenableFuture<List<CassandraPartition>>> getPartitionResults =
        ImmutableList.builder();
    for (List<Comparable<?>> partitionKeys : partitionKeysSet) {
      getPartitionResults.add(
          executor.submit(() -> schemaProvider.getPartitions(table, partitionKeys)));
    }

    ImmutableList.Builder<CassandraPartition> partitions = ImmutableList.builder();
    for (ListenableFuture<List<CassandraPartition>> result : getPartitionResults.build()) {
      try {
        partitions.addAll(result.get());
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw Throwables.propagate(e);
      } catch (ExecutionException e) {
        throw new PrestoException(EXTERNAL, "Error fetching cassandra partitions", e);
      }
    }

    return partitions.build();
  }
Esempio n. 21
0
 /**
  * Attempt to complete submitted requests on close so that as much information is recorded as
  * possible. This aids debugging when close is called during exception processing.
  */
 @Override
 public void close() {
   requestService.shutdown();
   try {
     if (!requestService.awaitTermination(timeoutMillis, TimeUnit.MILLISECONDS)) {
       LOG.warn(
           Joiner.on(System.lineSeparator())
               .join(
                   "A BlockingHttpEndpoint failed to shut down within the standard timeout.",
                   "Your build might have succeeded, but some requests made to ",
                   this.url + " were probably lost.",
                   "Here's some debugging information:",
                   requestService.toString()));
     }
   } catch (InterruptedException e) {
     Thread.currentThread().interrupt();
   }
 }
Esempio n. 22
0
 @Override
 public void close() {
   LOG.info("Closing LlapIoImpl..");
   if (buddyAllocatorMXBean != null) {
     MBeans.unregister(buddyAllocatorMXBean);
     buddyAllocatorMXBean = null;
   }
   executor.shutdownNow();
 }
Esempio n. 23
0
  /**
   * Perform an asynchronous parse of the MultiBit.org Atom XML feed using JAXB
   *
   * @return A listenable future containing the result of the asynchronous read
   */
  public static ListenableFuture<AtomFeed> parseMultiBitOrgFeed() {

    ListeningExecutorService executorService =
        SafeExecutors.newSingleThreadExecutor("atom-feed-check");

    return executorService.submit(
        new Callable<AtomFeed>() {
          @Override
          public AtomFeed call() throws Exception {

            URL url = new URL("https://multibit.org/atom.xml");
            URLConnection connection = url.openConnection();

            try (InputStream is = connection.getInputStream()) {
              return JAXB.unmarshal(is, AtomFeed.class);
            }
          }
        });
  }
  @After
  public void tearDown() {
    if (executor != null) {
      executor.shutdownNow();
    }

    if (futureExecutor != null) {
      futureExecutor.shutdownNow();
    }
  }
Esempio n. 25
0
 public void shutdown() {
   if (executor != null) {
     executor.shutdownNow();
   }
   if (taskReporter != null) {
     taskReporter.shutdown();
   }
   if (umbilical != null) {
     RPC.stopProxy(umbilical);
   }
 }
  @Test(enabled = true, dependsOnMethods = "testCompareSizes")
  public void testConcurrentUseOfComputeServiceToCreateNodes() throws Exception {
    final long timeoutMs = 20 * 60 * 1000;
    List<String> groups = Lists.newArrayList();
    List<ListenableFuture<NodeMetadata>> futures = Lists.newArrayList();
    ListeningExecutorService userExecutor =
        context
            .utils()
            .injector()
            .getInstance(
                Key.get(ListeningExecutorService.class, Names.named(PROPERTY_USER_THREADS)));

    try {
      for (int i = 0; i < 2; i++) {
        final int groupNum = i;
        final String group = "twin" + groupNum;
        groups.add(group);
        template = buildTemplate(client.templateBuilder());
        template.getOptions().inboundPorts(22, 8080).blockOnPort(22, 300 + groupNum);
        ListenableFuture<NodeMetadata> future =
            userExecutor.submit(
                new Callable<NodeMetadata>() {
                  public NodeMetadata call() throws Exception {
                    NodeMetadata node =
                        getOnlyElement(client.createNodesInGroup(group, 1, template));
                    getAnonymousLogger().info("Started node " + node.getId());
                    return node;
                  }
                });
        futures.add(future);
      }

      ListenableFuture<List<NodeMetadata>> compoundFuture = Futures.allAsList(futures);
      compoundFuture.get(timeoutMs, TimeUnit.MILLISECONDS);

    } finally {
      for (String group : groups) {
        client.destroyNodesMatching(inGroup(group));
      }
    }
  }
  @Override
  public void startStream() {

    client = getFacebookClient();

    if (configuration.getInfo() != null && configuration.getInfo().size() > 0) {
      for (String id : configuration.getInfo()) {
        executor.submit(new FacebookFeedPollingTask(this, id));
      }
      running.set(true);
    } else {
      try {
        String id = client.getMe().getId();
        executor.submit(new FacebookFeedPollingTask(this, id));
        running.set(true);
      } catch (FacebookException e) {
        LOGGER.error(e.getMessage());
        running.set(false);
      }
    }
  }
Esempio n. 28
0
 public ListenableFuture<?> execute(
     final BlockingQueue<String> queue, final CountDownLatch remainingQueries) {
   return executor.submit(
       new Runnable() {
         @Override
         public void run() {
           for (String query = queue.poll(); query != null; query = queue.poll()) {
             execute(query);
             remainingQueries.countDown();
           }
         }
       });
 }
Esempio n. 29
0
 private static ListenableFuture<PaymentSession> fetchPaymentRequest(
     final URI uri, final boolean verifyPki, @Nullable final String trustStorePath) {
   return executor.submit(
       new Callable<PaymentSession>() {
         @Override
         public PaymentSession call() throws Exception {
           HttpURLConnection connection = (HttpURLConnection) uri.toURL().openConnection();
           connection.setRequestProperty("Accept", "application/bitcoin-paymentrequest");
           connection.setUseCaches(false);
           Protos.PaymentRequest paymentRequest =
               Protos.PaymentRequest.parseFrom(connection.getInputStream());
           return new PaymentSession(paymentRequest, verifyPki, trustStorePath);
         }
       });
 }
    @AfterClass
    public void tearDownClass() throws Throwable {
      listeningExecutorService.shutdown();
      FileUtils.deleteDirectory(storageDirectory);

      if (sourceCloudStore != null) {
        sourceCloudStore.stop();
        sourceCloudStore = null;
      }

      if (destinationCloudStore != null) {
        destinationCloudStore.stop();
        destinationCloudStore = null;
      }
    }