@Override
  public void schedulePush(final PullMessageTask task) {
    if (log.isDebugEnabled()) {
      log.debug(
          "Schedule push(correlation id: {}) for client: {}",
          task.getCorrelationId(),
          task.getTpg());
    }

    if (m_stopped.get() || (task.isWithOffset() && task.getStartOffset() == null)) {
      response(task, null, null);
    } else {
      m_scheduledThreadPool.submit(
          new Runnable() {
            @Override
            public void run() {
              executeTask(
                  task,
                  new ExponentialSchedulePolicy( //
                      m_config.getLongPollingCheckIntervalBaseMillis(), //
                      m_config.getLongPollingCheckIntervalMaxMillis()));
            }
          });
    }
  }
 private Future<Object> exec(
     final Map<String, Object> userCtx,
     final ITemplate tmpl,
     final String template,
     final File file,
     final Object... args) {
   return scheduler.submit(
       new Callable<Object>() {
         @Override
         public Object call() throws Exception {
           try {
             engine.prepare(userCtx);
             ITemplate t = tmpl;
             if (null != t) {
             } else if (null != template) {
               t = engine.getTemplate(template, args);
             } else if (null != file) {
               t = engine.getTemplate(file, args);
             } else {
               throw new NullPointerException();
             }
             return t.__setSecureCode(code).render();
           } catch (Exception e) {
             return e;
           }
         }
       });
 }
示例#3
0
  private void registerInactiveTask(final IJobKey jobKey, final String taskId, long timeRemaining) {

    LOG.fine("Prune task " + taskId + " in " + timeRemaining + " ms.");
    executor.schedule(
        new Runnable() {
          @Override
          public void run() {
            LOG.info("Pruning expired inactive task " + taskId);
            deleteTasks(ImmutableSet.of(taskId));
          }
        },
        timeRemaining,
        TimeUnit.MILLISECONDS);

    executor.submit(
        new Runnable() {
          @Override
          public void run() {
            Iterable<IScheduledTask> inactiveTasks =
                Storage.Util.fetchTasks(storage, jobHistoryQuery(jobKey));
            int numInactiveTasks = Iterables.size(inactiveTasks);
            int tasksToPrune = numInactiveTasks - settings.perJobHistoryGoal;
            if (tasksToPrune > 0 && numInactiveTasks > settings.perJobHistoryGoal) {
              Set<String> toPrune =
                  FluentIterable.from(Tasks.LATEST_ACTIVITY.sortedCopy(inactiveTasks))
                      .filter(safeToDelete)
                      .limit(tasksToPrune)
                      .transform(Tasks::id)
                      .toSet();
              deleteTasks(toPrune);
            }
          }
        });
  }
示例#4
0
 public void scheduleImmediateUpdate() {
   if (!reportStatus.isInProgress()) {
     executorService.submit(new DredgeTask());
     LOGGER.trace(
         PwmConstants.REPORTING_SESSION_LABEL,
         "submitted new ldap dredge task to executorService");
   }
 }
示例#5
0
 public void run(Runnable runnable) {
   // What if we are already in the client thread?? What happens then ?
   if (runningOnClientThread()) {
     runnable.run();
   } else {
     service.submit(errorHandling(runnable));
   }
 }
  public void start() {
    exec.submit(
        new Runnable() {

          public void run() {
            doScheduleNextCheck();
          }
        });
  }
示例#7
0
  private void addToScheduler(Runnable r) {
    if (gaEnabled) {
      synchronized (schedulerLock) {
        initializeSchedulerIfNeeded();

        scheduler.submit(r);
      }
    }
  }
 @ViewChanged
 public void onViewChange(final ViewChangedEvent e) {
   executorService.submit(
       new Callable<Void>() {
         public Void call() {
           cleanupLeaverTransactions(e.getNewMembers());
           return null;
         }
       });
 }
 @Test(expected = SafeScheduledExecutorServiceRethrowsException.class)
 public void testSubmitReturnFutureThrowsException() throws Throwable {
   ScheduledExecutorService executorService = new SafeScheduledExecutorService(1, "test");
   Future<?> future = executorService.submit(new RunnableWhichThrows());
   try {
     future.get();
   } catch (ExecutionException e) {
     throw e.getCause();
   }
 }
示例#10
0
 // Fail 3 times then succeed
 CompletableFuture<Boolean> connect() {
   CompletableFuture<Boolean> future = new CompletableFuture<>();
   executor.submit(
       () -> {
         if (failures.getAndIncrement() < 3)
           future.completeExceptionally(new RuntimeException());
         else future.complete(true);
       });
   return future;
 }
示例#11
0
 public void flush(long fileId) {
   final Future<Void> future = commitExecutor.submit(new FileFlushTask(fileId));
   try {
     future.get();
   } catch (InterruptedException e) {
     Thread.interrupted();
     throw new OException("File flush was interrupted", e);
   } catch (Exception e) {
     throw new OException("File flush was abnormally terminated", e);
   }
 }
示例#12
0
  private void doCheck() {
    exec.submit(
        new Runnable() {

          public void run() {
            doScheduleNextCheck();
          }
        });

    lockManager.acquireLock();
  }
示例#13
0
 private void removeCachedPages(long fileId) {
   Future<Void> future = commitExecutor.submit(new RemoveFilePagesTask(fileId));
   try {
     future.get();
   } catch (InterruptedException e) {
     Thread.interrupted();
     throw new OException("File data removal was interrupted", e);
   } catch (Exception e) {
     throw new OException("File data removal was abnormally terminated", e);
   }
 }
示例#14
0
 /**
  * Submits a task to run in the logic service.
  *
  * @param runnable The runnable.
  */
 public void submitLogic(final Runnable runnable) {
   logicService.submit(
       new Runnable() {
         public void run() {
           try {
             runnable.run();
           } catch (Throwable t) {
             World.getWorld().handleError(t);
           }
         }
       });
 }
示例#15
0
  @Override
  public void init(PwmApplication pwmApplication) throws PwmException {
    status = STATUS.OPENING;
    this.pwmApplication = pwmApplication;

    if (pwmApplication.getApplicationMode() == PwmApplication.MODE.READ_ONLY) {
      LOGGER.debug(
          PwmConstants.REPORTING_SESSION_LABEL,
          "application mode is read-only, will remain closed");
      status = STATUS.CLOSED;
      return;
    }

    if (pwmApplication.getLocalDB() == null
        || LocalDB.Status.OPEN != pwmApplication.getLocalDB().status()) {
      LOGGER.debug(PwmConstants.REPORTING_SESSION_LABEL, "LocalDB is not open, will remain closed");
      status = STATUS.CLOSED;
      return;
    }

    if (!pwmApplication.getConfig().readSettingAsBoolean(PwmSetting.REPORTING_ENABLE)) {
      LOGGER.debug(
          PwmConstants.REPORTING_SESSION_LABEL,
          "reporting module is not enabled, will remain closed");
      status = STATUS.CLOSED;
      clear();
      return;
    }

    try {
      userCacheService = new UserCacheService();
      userCacheService.init(pwmApplication);
    } catch (Exception e) {
      LOGGER.error(PwmConstants.REPORTING_SESSION_LABEL, "unable to init cache service");
      status = STATUS.CLOSED;
      return;
    }

    settings = ReportSettings.readSettingsFromConfig(pwmApplication.getConfig());
    summaryData = ReportSummaryData.newSummaryData(settings.getTrackDays());

    executorService =
        Executors.newSingleThreadScheduledExecutor(
            Helper.makePwmThreadFactory(
                Helper.makeThreadName(pwmApplication, this.getClass()) + "-", true));

    String startupMsg = "report service started";
    LOGGER.debug(startupMsg);

    executorService.submit(new InitializationTask());

    status = STATUS.OPEN;
  }
  /** Called after a new set of properties has been configured on the service */
  private void doUpdate(Map<String, Object> properties) {
    try {

      for (String s : properties.keySet()) {
        s_logger.info("Update - " + s + ": " + properties.get(s));
      }

      // cancel a current worker handle if one if active
      if (m_handle != null) {
        m_handle.cancel(true);
      }

      String topic = (String) m_properties.get(PUBLISH_TOPIC_PROP_NAME);
      if (topic != null) {
        try {
          m_cloudClient.unsubscribe(topic);
        } catch (KuraException e) {
          s_logger.error("Unsubscribe failed", e);
        }
      }

      closePort();

      m_properties.clear();
      m_properties.putAll(properties);

      openPort();

      Boolean cloudEcho = (Boolean) m_properties.get(SERIAL_CLOUD_ECHO_PROP_NAME);
      if (cloudEcho) {
        try {
          m_cloudClient.subscribe(topic, 0);
        } catch (KuraException e) {
          s_logger.error("Subscribe failed", e);
        }
      }

      m_handle =
          m_worker.submit(
              new Runnable() {
                @Override
                public void run() {
                  doSerial();
                }
              });
    } catch (Throwable t) {
      s_logger.error("Unexpected Throwable", t);
    }
  }
 @Override
 public GlobalLock getLock(String lockKey, long aliveMill) {
   GlobalLock lock = locks.get(lockKey);
   if (lock != null) {
     return lock;
   }
   // yaogao000 为 全局锁 的key 加入 gl_ 前缀
   lock = new GlobalLock(globalLockRedisTemplate, GLOBAL_LOCK_KEY_PREFIX + lockKey, aliveMill);
   GlobalLock oldLock = locks.putIfAbsent(lockKey, lock);
   if (oldLock != null) {
     lock = oldLock;
   } else {
     scheduler.submit(newCleanLockTask(lockKey, aliveMill));
   }
   return lock;
 }
 @Override
 public void process(WatchedEvent event) {
   LOG.debug("Received watched event {} from zookeeper based ledger manager.", event);
   if (Event.EventType.None == event.getType()) {
     if (Event.KeeperState.Expired == event.getState()) {
       LOG.info("ZooKeeper client expired on ledger manager.");
       Set<Long> keySet = new HashSet<Long>(listeners.keySet());
       for (Long lid : keySet) {
         scheduler.submit(new ReadLedgerMetadataTask(lid));
         LOG.info("Re-read ledger metadata for {} after zookeeper session expired.", lid);
       }
     }
     return;
   }
   String path = event.getPath();
   if (null == path) {
     return;
   }
   final long ledgerId;
   try {
     ledgerId = getLedgerId(event.getPath());
   } catch (IOException ioe) {
     LOG.info("Received invalid ledger path {} : ", event.getPath(), ioe);
     return;
   }
   switch (event.getType()) {
     case NodeDeleted:
       Set<LedgerMetadataListener> listenerSet = listeners.remove(ledgerId);
       if (null != listenerSet) {
         LOG.debug("Removed ledger metadata listeners on ledger {} : {}", ledgerId, listenerSet);
       } else {
         LOG.debug(
             "No ledger metadata listeners to remove from ledger {} after it's deleted.",
             ledgerId);
       }
       break;
     case NodeDataChanged:
       new ReadLedgerMetadataTask(ledgerId).run();
       break;
     default:
       LOG.debug("Received event {} on {}.", event.getType(), event.getPath());
       break;
   }
 }
  private synchronized void startModeSwitching(
      Runnable switcher, CancellationHandle cancellationHandle) {
    if (modeSwitcherFuture != null) {
      // Cancel any delayed previous switching
      this.cancellationHandle.cancel();
      // Wait for it to actually stop what it was doing
      try {
        modeSwitcherFuture.get();
      } catch (UnableToCopyStoreFromOldMasterException
          | InconsistentlyUpgradedClusterException
          | UnavailableMembersException e) {
        throw e;
      } catch (Exception e) {
        msgLog.warn("Got exception from cancelled task", e);
      }
    }

    this.cancellationHandle = cancellationHandle;
    modeSwitcherFuture = modeSwitcherExecutor.submit(switcher);
  }
示例#20
0
  public Future store(final long fileId, final long pageIndex, final OCachePointer dataPointer) {
    Future future = null;

    synchronized (syncObject) {
      final GroupKey groupKey = new GroupKey(fileId, pageIndex >>> 4);
      lockManager.acquireLock(Thread.currentThread(), groupKey, OLockManager.LOCK.EXCLUSIVE);
      try {
        WriteGroup writeGroup = writeGroups.get(groupKey);
        if (writeGroup == null) {
          writeGroup = new WriteGroup(System.currentTimeMillis());
          writeGroups.put(groupKey, writeGroup);
        }

        int entryIndex = (int) (pageIndex & 15);

        if (writeGroup.pages[entryIndex] == null) {
          dataPointer.incrementReferrer();
          writeGroup.pages[entryIndex] = dataPointer;

          cacheSize.incrementAndGet();
        } else {
          if (!writeGroup.pages[entryIndex].equals(dataPointer)) {
            writeGroup.pages[entryIndex].decrementReferrer();
            dataPointer.incrementReferrer();

            writeGroup.pages[entryIndex] = dataPointer;
          }
        }

        writeGroup.recencyBit = true;
      } finally {
        lockManager.releaseLock(Thread.currentThread(), groupKey, OLockManager.LOCK.EXCLUSIVE);
      }

      if (cacheSize.get() > cacheMaxSize) {
        future = commitExecutor.submit(new PeriodicFlushTask());
      }

      return future;
    }
  }
 @Test
 public void testCorePoolSize() throws Exception {
   long start = System.currentTimeMillis();
   ScheduledExecutorService executorService = new SafeScheduledExecutorService(1, "test");
   Runnable sleepy =
       new Runnable() {
         @Override
         public void run() {
           Utils.checkState(ThreadUtil.sleep(110), "Interrupted while sleeping");
         }
       };
   Future<?> future1 = executorService.submit(sleepy);
   Future<?> future2 = executorService.submit(sleepy);
   future1.get();
   future2.get();
   long elapsed = System.currentTimeMillis() - start;
   // the important aspect of this test is that the time is over 200
   Assert.assertTrue(
       "Elapsed was " + elapsed + " expected between 200 and 30000",
       elapsed > 200 && elapsed < 30000);
 }
  /** Test to make sure we only execute the task once no matter how many times we schedule it. */
  public void testExecuteOnlyOnce() throws InterruptedException {
    ScheduledExecutorService ex = Executors.newScheduledThreadPool(1);

    MyConflationListener listener = new MyConflationListener();
    OneTaskOnlyExecutor decorator = new OneTaskOnlyExecutor(ex, listener);

    final CountDownLatch latch = new CountDownLatch(1);
    ex.submit(
        new Callable() {

          public Object call() throws Exception {
            latch.await();
            return null;
          }
        });

    final AtomicInteger counter = new AtomicInteger();

    Runnable increment =
        new Runnable() {

          public void run() {
            counter.incrementAndGet();
          }
        };

    for (int i = 0; i < 50; i++) {
      decorator.schedule(increment, 0, TimeUnit.SECONDS);
    }

    assertEquals(0, counter.get());
    latch.countDown();
    ex.shutdown();
    ex.awaitTermination(60, TimeUnit.SECONDS);
    assertEquals(1, counter.get());
    assertEquals(49, listener.getDropCount());
  }
 public void run() {
   _numberOfUsers = _dao.getNumberOfUsers();
   for (int i = 0; i < _numberOfUsers; i += FETCH_LIMIT) _executor.submit(new Go(i));
 }
  /**
   * This logic is only executed if we have to retry redelivery asynchronously, which have to be
   * done from the callback.
   *
   * <p>And therefore the logic is a bit different than the synchronous <tt>processErrorHandler</tt>
   * method which can use a loop based redelivery technique. However this means that these two
   * methods in overall have to be in <b>sync</b> in terms of logic.
   */
  protected void processAsyncErrorHandler(
      final Exchange exchange, final AsyncCallback callback, final RedeliveryData data) {
    // can we still run
    if (!isRunAllowed()) {
      if (exchange.getException() == null) {
        exchange.setException(new RejectedExecutionException());
      }
      callback.done(data.sync);
      return;
    }

    // did previous processing cause an exception?
    boolean handle = shouldHandleException(exchange);
    if (handle) {
      handleException(exchange, data);
    }

    // compute if we are exhausted or not
    boolean exhausted = isExhausted(exchange, data);
    if (exhausted) {
      Processor target = null;
      boolean deliver = true;

      // the unit of work may have an optional callback associated we need to leverage
      SubUnitOfWorkCallback uowCallback = exchange.getUnitOfWork().getSubUnitOfWorkCallback();
      if (uowCallback != null) {
        // signal to the callback we are exhausted
        uowCallback.onExhausted(exchange);
        // do not deliver to the failure processor as its been handled by the callback instead
        deliver = false;
      }

      if (deliver) {
        // should deliver to failure processor (either from onException or the dead letter channel)
        target = data.failureProcessor != null ? data.failureProcessor : data.deadLetterProcessor;
      }
      // we should always invoke the deliverToFailureProcessor as it prepares, logs and does a fair
      // bit of work for exhausted exchanges (its only the target processor which may be null if
      // handled by a savepoint)
      deliverToFailureProcessor(target, exchange, data, callback);
      // we are breaking out
      return;
    }

    if (data.redeliveryCounter > 0) {
      // let the RedeliverTask be the logic which tries to redeliver the Exchange which we can used
      // a scheduler to
      // have it being executed in the future, or immediately
      // Note: the data.redeliverFromSync should be kept as is, in case it was enabled previously
      // to ensure the callback will continue routing from where we left
      AsyncRedeliveryTask task = new AsyncRedeliveryTask(exchange, callback, data);

      // calculate the redelivery delay
      data.redeliveryDelay =
          data.currentRedeliveryPolicy.calculateRedeliveryDelay(
              data.redeliveryDelay, data.redeliveryCounter);
      if (data.redeliveryDelay > 0) {
        // schedule the redelivery task
        if (log.isTraceEnabled()) {
          log.trace(
              "Scheduling redelivery task to run in {} millis for exchangeId: {}",
              data.redeliveryDelay,
              exchange.getExchangeId());
        }
        executorService.schedule(task, data.redeliveryDelay, TimeUnit.MILLISECONDS);
      } else {
        // execute the task immediately
        executorService.submit(task);
      }
    }
  }
  void runMirrorCommand(MirrorSettings settings, final Repository repository) {
    if (repositoryMetadataService.isEmpty(repository)) {
      return;
    }

    try {
      final String password = passwordEncryptor.decrypt(settings.password);
      final String authenticatedUrl =
          getAuthenticatedUrl(settings.mirrorRepoUrl, settings.username, password);

      executor.submit(
          new Callable<Void>() {

            int attempts = 0;

            @Override
            public Void call() throws Exception {
              try {
                GitScmCommandBuilder builder =
                    gitScm.getCommandBuilderFactory().builder(repository);
                PasswordHandler passwordHandler = getPasswordHandler(builder, password);

                // Call push command with the prune flag and refspecs for heads and tags
                // Do not use the mirror flag as pull-request refs are included
                String result =
                    builder
                        .command("push")
                        .argument("--prune") // this deletes locally deleted branches
                        .argument(authenticatedUrl)
                        .argument(
                            "--force") // Canonical repository should always take precedence over
                                       // mirror
                        .argument("+refs/heads/*:refs/heads/*") // Only mirror heads
                        .argument("+refs/tags/*:refs/tags/*") // and tags
                        .errorHandler(passwordHandler)
                        .exitHandler(passwordHandler)
                        .build(passwordHandler)
                        .call();

                logger.debug(
                    "MirrorRepositoryHook: postReceive completed with result '{}'.", result);

              } catch (Exception e) {
                if (++attempts >= MAX_ATTEMPTS) {
                  logger.error(
                      "Failed to mirror repository "
                          + repository.getName()
                          + " after "
                          + attempts
                          + " attempts.",
                      e);
                } else {
                  logger.warn(
                      "Failed to mirror repository "
                          + repository.getName()
                          + ", "
                          + "retrying in 1 minute (attempt {} of {}).",
                      attempts,
                      MAX_ATTEMPTS);
                  executor.schedule(this, 1, TimeUnit.MINUTES);
                }
              }

              return null;
            }
          });

    } catch (Exception e) {
      logger.error("MirrorRepositoryHook: Error running mirror hook", e);
    }
  }
示例#26
0
 @VisibleForTesting
 void runMonitor() throws ExecutionException, InterruptedException {
   Future f = executor.submit(monitor);
   f.get();
 }
  /** flushes pending updates to disk */
  public synchronized void flush() {

    if (closed || activityStorage == null) {
      return;
    }
    final boolean flushDeletesNeeded = pendingDeletes.updates.size() > 0;
    final UpdateBatch<Update> batchToDelete = (flushDeletesNeeded) ? pendingDeletes : null;
    if (flushDeletesNeeded) {
      pendingDeletes = new UpdateBatch<Update>(activityConfig);
    }
    final boolean flushUpdatesNeeded =
        updateBatch.updates.size() > 0
            || versionComparator.compare(lastVersion, metadata.version) != 0;
    if (!flushUpdatesNeeded && !flushDeletesNeeded) {
      return;
    }
    final UpdateBatch<Update> batchToPersist = flushUpdatesNeeded ? updateBatch : null;
    final List<Runnable> underlyingFlushes = new ArrayList<Runnable>(valuesMap.size());
    // IF WE DON'T NEED TO FLUSH UPDATES,let's keep the persistent version as it is
    final String version = flushUpdatesNeeded ? lastVersion : metadata.version;
    if (flushUpdatesNeeded) {
      updateBatch = new UpdateBatch<CompositeActivityStorage.Update>(activityConfig);
    }
    for (ActivityValues activityIntValues : valuesMap.values()) {
      underlyingFlushes.add(activityIntValues.prepareFlush());
    }
    executor.submit(
        new Runnable() {
          @Override
          public void run() {
            if (closed) {
              return;
            }
            if (flushUpdatesNeeded) {
              activityStorage.flush(batchToPersist.updates);
            }
            if (flushDeletesNeeded) {
              Collections.reverse(batchToDelete.updates);
              activityStorage.flush(batchToDelete.updates);
              synchronized (deletedIndexes) {
                for (Update update : batchToDelete.updates) {
                  deletedIndexes.add(update.index);
                }
              }
            }
            int count = 0;
            globalLock.readLock().lock();
            try {
              synchronized (deletedIndexes) {
                count = uidToArrayIndex.size() + deletedIndexes.size();
                currentDocumentsCounter.clear();
                currentDocumentsCounter.inc(uidToArrayIndex.size());
                reclaimedDocumentsCounter.clear();
                reclaimedDocumentsCounter.inc(deletedIndexes.size());
                logger.info(
                    "Flush compositeActivityValues. Documents = "
                        + uidToArrayIndex.size()
                        + ", Deletes = "
                        + deletedIndexes.size());
              }
            } finally {
              globalLock.readLock().unlock();
            }
            for (Runnable runnable : underlyingFlushes) {
              runnable.run();
            }
            metadata.update(version, count);
          }
        });
  }
  @Test
  public void testGracefulShutdown() throws Exception {
    ObjectMapper objectMapper = Jackson.newObjectMapper();
    Validator validator = Validation.buildDefaultValidatorFactory().getValidator();
    MetricRegistry metricRegistry = new MetricRegistry();
    Environment environment =
        new Environment(
            "test", objectMapper, validator, metricRegistry, ClassLoader.getSystemClassLoader());

    CountDownLatch requestReceived = new CountDownLatch(1);
    CountDownLatch shutdownInvoked = new CountDownLatch(1);

    environment.jersey().register(new TestResource(requestReceived, shutdownInvoked));

    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(3);
    final Server server = http.build(environment);

    ((AbstractNetworkConnector) server.getConnectors()[0]).setPort(0);

    ScheduledFuture<Void> cleanup =
        executor.schedule(
            new Callable<Void>() {
              @Override
              public Void call() throws Exception {
                if (!server.isStopped()) {
                  server.stop();
                }
                executor.shutdownNow();
                return null;
              }
            },
            5,
            TimeUnit.SECONDS);

    server.start();

    final int port = ((AbstractNetworkConnector) server.getConnectors()[0]).getLocalPort();

    Future<String> futureResult =
        executor.submit(
            new Callable<String>() {
              @Override
              public String call() throws Exception {
                URL url = new URL("http://localhost:" + port + "/app/test");
                URLConnection connection = url.openConnection();
                connection.connect();
                return CharStreams.toString(new InputStreamReader(connection.getInputStream()));
              }
            });

    requestReceived.await();

    Future<Void> serverStopped =
        executor.submit(
            new Callable<Void>() {
              @Override
              public Void call() throws Exception {
                server.stop();
                return null;
              }
            });

    Connector[] connectors = server.getConnectors();
    assertThat(connectors).isNotEmpty();
    assertThat(connectors[0]).isInstanceOf(NetworkConnector.class);
    NetworkConnector connector = (NetworkConnector) connectors[0];

    // wait for server to close the connectors
    while (true) {
      if (!connector.isOpen()) {
        shutdownInvoked.countDown();
        break;
      }
      Thread.sleep(5);
    }

    String result = futureResult.get();
    assertThat(result).isEqualTo("test");

    serverStopped.get();

    // cancel the cleanup future since everything succeeded
    cleanup.cancel(false);
    executor.shutdownNow();
  }