private boolean handleNewOrModifiedS3Metadata(Path filename) throws IOException {
    Optional<S3UploadMetadata> maybeMetadata = readS3UploadMetadata(filename);

    if (!maybeMetadata.isPresent()) {
      return false;
    }

    final S3UploadMetadata metadata = maybeMetadata.get();

    SingularityS3Uploader existingUploader = metadataToUploader.get(metadata);

    if (existingUploader != null) {
      if (existingUploader.getUploadMetadata().isFinished() == metadata.isFinished()) {
        LOG.debug(
            "Ignoring metadata {} from {} because there was already one present",
            metadata,
            filename);
        return false;
      } else {
        LOG.info(
            "Toggling uploader {} finish state to {}", existingUploader, metadata.isFinished());

        if (metadata.isFinished()) {
          expiring.add(existingUploader);
        } else {
          expiring.remove(existingUploader);
        }

        return true;
      }
    }

    try {
      metrics.getUploaderCounter().inc();

      SingularityS3Uploader uploader =
          new SingularityS3Uploader(s3Service, metadata, fileSystem, metrics, filename);

      if (metadata.isFinished()) {
        expiring.add(uploader);
      }

      LOG.info("Created new uploader {}", uploader);

      metadataToUploader.put(metadata, uploader);
      uploaderLastHadFilesAt.put(uploader, System.currentTimeMillis());
      return true;
    } catch (Throwable t) {
      LOG.info("Ignoring metadata {} because uploader couldn't be created", metadata, t);
      return false;
    }
  }
  private int checkUploads() {
    if (metadataToUploader.isEmpty()) {
      return 0;
    }

    final Set<Path> filesToUpload =
        Collections.newSetFromMap(
            new ConcurrentHashMap<Path, Boolean>(
                metadataToUploader.size() * 2, 0.75f, metadataToUploader.size()));
    final Map<SingularityS3Uploader, Future<Integer>> futures =
        Maps.newHashMapWithExpectedSize(metadataToUploader.size());

    for (final SingularityS3Uploader uploader : metadataToUploader.values()) {
      futures.put(
          uploader,
          executorService.submit(
              new Callable<Integer>() {

                @Override
                public Integer call() {

                  Integer returnValue = 0;
                  try {
                    returnValue = uploader.upload(filesToUpload);
                  } catch (Throwable t) {
                    metrics.error();
                    LOG.error("Error while processing uploader {}", uploader, t);
                  }
                  return returnValue;
                }
              }));
    }

    LOG.info("Waiting on {} future(s)", futures.size());
    int totesUploads = 0;

    final long now = System.currentTimeMillis();
    final Set<SingularityS3Uploader> expiredUploaders =
        Sets.newHashSetWithExpectedSize(metadataToUploader.size());

    // TODO cancel/timeouts?
    for (Entry<SingularityS3Uploader, Future<Integer>> uploaderToFuture : futures.entrySet()) {
      final SingularityS3Uploader uploader = uploaderToFuture.getKey();
      try {
        final int foundFiles = uploaderToFuture.getValue().get();

        if (foundFiles == 0) {
          final long durationSinceLastFile = now - uploaderLastHadFilesAt.get(uploader);
          final boolean isFinished = isFinished(uploader);

          if ((durationSinceLastFile > configuration.getStopCheckingAfterMillisWithoutNewFile())
              || isFinished) {
            LOG.info("Expiring uploader {}", uploader);
            expiredUploaders.add(uploader);
          } else {
            LOG.trace(
                "Not expiring uploader {}, duration {} (max {}), isFinished: {})",
                uploader,
                durationSinceLastFile,
                configuration.getStopCheckingAfterMillisWithoutNewFile(),
                isFinished);
          }
        } else {
          LOG.trace("Updating uploader {} last expire time", uploader);
          uploaderLastHadFilesAt.put(uploader, now);
        }

        totesUploads += foundFiles;
      } catch (Throwable t) {
        metrics.error();
        LOG.error("Waiting on future", t);
      }
    }

    for (SingularityS3Uploader expiredUploader : expiredUploaders) {
      metrics.getUploaderCounter().dec();

      metadataToUploader.remove(expiredUploader.getUploadMetadata());
      uploaderLastHadFilesAt.remove(expiredUploader);
      expiring.remove(expiredUploader);

      try {
        Files.delete(expiredUploader.getMetadataPath());
      } catch (IOException e) {
        LOG.warn("Couldn't delete {}", expiredUploader.getMetadataPath(), e);
      }
    }

    return totesUploads;
  }