Ejemplo n.º 1
0
 private static void logError(Throwable t, String format, Object... args) {
   if (isExpectedError(t)) {
     log.error(format + ": %s", ObjectArrays.concat(args, t));
   } else {
     log.error(t, format, args);
   }
 }
Ejemplo n.º 2
0
  private static Map<String, Class<?>> defineClasses(
      List<ClassDefinition> classDefinitions, DynamicClassLoader classLoader) {
    ClassInfoLoader classInfoLoader =
        ClassInfoLoader.createClassInfoLoader(classDefinitions, classLoader);

    if (DUMP_BYTE_CODE_TREE) {
      ByteArrayOutputStream out = new ByteArrayOutputStream();
      DumpByteCodeVisitor dumpByteCode = new DumpByteCodeVisitor(new PrintStream(out));
      for (ClassDefinition classDefinition : classDefinitions) {
        dumpByteCode.visitClass(classDefinition);
      }
      System.out.println(new String(out.toByteArray(), StandardCharsets.UTF_8));
    }

    Map<String, byte[]> byteCodes = new LinkedHashMap<>();
    for (ClassDefinition classDefinition : classDefinitions) {
      ClassWriter cw = new SmartClassWriter(classInfoLoader);
      classDefinition.visit(cw);
      byte[] byteCode = cw.toByteArray();
      if (RUN_ASM_VERIFIER) {
        ClassReader reader = new ClassReader(byteCode);
        CheckClassAdapter.verify(reader, classLoader, true, new PrintWriter(System.out));
      }
      byteCodes.put(classDefinition.getType().getJavaClassName(), byteCode);
    }

    String dumpClassPath = DUMP_CLASS_FILES_TO.get();
    if (dumpClassPath != null) {
      for (Map.Entry<String, byte[]> entry : byteCodes.entrySet()) {
        File file =
            new File(
                dumpClassPath,
                ParameterizedType.typeFromJavaClassName(entry.getKey()).getClassName() + ".class");
        try {
          log.debug("ClassFile: " + file.getAbsolutePath());
          Files.createParentDirs(file);
          Files.write(entry.getValue(), file);
        } catch (IOException e) {
          log.error(e, "Failed to write generated class file to: %s" + file.getAbsolutePath());
        }
      }
    }
    if (DUMP_BYTE_CODE_RAW) {
      for (byte[] byteCode : byteCodes.values()) {
        ClassReader classReader = new ClassReader(byteCode);
        classReader.accept(
            new TraceClassVisitor(new PrintWriter(System.err)), ClassReader.SKIP_FRAMES);
      }
    }
    Map<String, Class<?>> classes = classLoader.defineClasses(byteCodes);
    try {
      for (Class<?> clazz : classes.values()) {
        Reflection.initialize(clazz);
      }
    } catch (VerifyError e) {
      throw new RuntimeException(e);
    }
    return classes;
  }
Ejemplo n.º 3
0
 // TODO : make wait time configurable ?
 public static void shutdownExecutor(ExecutorService executor, final String name) {
   executor.shutdown();
   try {
     log.info("Waiting for %s to shutdown", name);
     if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
       log.warn("%s did not shutdown properly", name);
     }
   } catch (InterruptedException e) {
     log.warn("Interrupted while waiting for %s to shutdown", name);
     Thread.currentThread().interrupt();
   }
 }
Ejemplo n.º 4
0
 public static void closeChannels(ChannelGroup allChannels) {
   if (allChannels.size() > 0) {
     // TODO : allow an option here to control if we need to drain connections and wait instead of
     // killing them all
     try {
       log.info("Closing %s open client connections", allChannels.size());
       if (!allChannels.close().await(5, TimeUnit.SECONDS)) {
         log.warn("Failed to close all open client connections");
       }
     } catch (InterruptedException e) {
       log.warn("Interrupted while closing client connections");
       Thread.currentThread().interrupt();
     }
   }
 }
Ejemplo n.º 5
0
 private void dropTable(SchemaTableName table) {
   try {
     metastoreClient.dropTable(table.getSchemaName(), table.getTableName());
   } catch (RuntimeException e) {
     Logger.get(getClass()).warn(e, "Failed to drop table: %s", table);
   }
 }
Ejemplo n.º 6
0
  public ShardIterator(
      long tableId,
      boolean merged,
      Optional<Map<Integer, String>> bucketToNode,
      TupleDomain<RaptorColumnHandle> effectivePredicate,
      IDBI dbi) {
    this.merged = merged;
    this.bucketToNode = bucketToNode.orElse(null);
    ShardPredicate predicate = ShardPredicate.create(effectivePredicate, bucketToNode.isPresent());

    String sql;
    if (bucketToNode.isPresent()) {
      sql = "SELECT shard_uuid, bucket_number FROM %s WHERE %s ORDER BY bucket_number";
    } else {
      sql = "SELECT shard_uuid, node_ids FROM %s WHERE %s";
    }
    sql = format(sql, shardIndexTable(tableId), predicate.getPredicate());

    dao = onDemandDao(dbi, ShardDao.class);
    fetchNodes();

    try {
      connection = dbi.open().getConnection();
      statement = connection.prepareStatement(sql);
      enableStreamingResults(statement);
      predicate.bind(statement);
      log.debug("Running query: %s", statement);
      resultSet = statement.executeQuery();
    } catch (SQLException e) {
      close();
      throw metadataError(e);
    }
  }
Ejemplo n.º 7
0
 private void partialCancel() {
   try {
     client.cancelLeafStage(new Duration(1, SECONDS));
   } catch (RuntimeException e) {
     log.debug(e, "error canceling leaf stage");
   }
 }
Ejemplo n.º 8
0
  public HiveMetadata(
      HiveConnectorId connectorId,
      HiveMetastore metastore,
      HdfsEnvironment hdfsEnvironment,
      DateTimeZone timeZone,
      boolean allowDropTable,
      boolean allowRenameTable,
      boolean allowCorruptWritesForTesting,
      HiveStorageFormat hiveStorageFormat,
      TypeManager typeManager) {
    this.connectorId = checkNotNull(connectorId, "connectorId is null").toString();

    this.allowDropTable = allowDropTable;
    this.allowRenameTable = allowRenameTable;
    this.allowCorruptWritesForTesting = allowCorruptWritesForTesting;

    this.metastore = checkNotNull(metastore, "metastore is null");
    this.hdfsEnvironment = checkNotNull(hdfsEnvironment, "hdfsEnvironment is null");
    this.timeZone = checkNotNull(timeZone, "timeZone is null");
    this.hiveStorageFormat = hiveStorageFormat;
    this.typeManager = checkNotNull(typeManager, "typeManager is null");

    if (!allowCorruptWritesForTesting && !timeZone.equals(DateTimeZone.getDefault())) {
      log.warn(
          "Hive writes are disabled. "
              + "To write data to Hive, your JVM timezone must match the Hive storage timezone. "
              + "Add -Duser.timezone=%s to your JVM arguments",
          timeZone.getID());
    }
  }
Ejemplo n.º 9
0
public final class ResumableTasks {
  private static final Logger log = Logger.get(ResumableTasks.class);

  private ResumableTasks() {}

  public static void submit(Executor executor, ResumableTask task) {
    AtomicReference<Runnable> runnableReference = new AtomicReference<>();
    Runnable runnable =
        () -> {
          ResumableTask.TaskStatus status = safeProcessTask(task);
          if (!status.isFinished()) {
            status.getContinuationFuture().thenRun(() -> executor.execute(runnableReference.get()));
          }
        };
    runnableReference.set(runnable);
    executor.execute(runnable);
  }

  private static ResumableTask.TaskStatus safeProcessTask(ResumableTask task) {
    try {
      return task.process();
    } catch (Throwable t) {
      log.warn(t, "ResumableTask completed exceptionally");
      return ResumableTask.TaskStatus.finished();
    }
  }
}
Ejemplo n.º 10
0
  /** Remove completed queries after a waiting period */
  public void removeExpiredQueries() {
    List<QueryExecution> sortedQueries =
        IterableTransformer.on(queries.values())
            .select(compose(not(isNull()), endTimeGetter()))
            .orderBy(Ordering.natural().onResultOf(endTimeGetter()))
            .list();

    int toRemove = Math.max(sortedQueries.size() - maxQueryHistory, 0);
    DateTime oldestAllowedQuery = DateTime.now().minus(maxQueryAge.toMillis());

    for (QueryExecution queryExecution : sortedQueries) {
      try {
        DateTime endTime = queryExecution.getQueryInfo().getQueryStats().getEndTime();
        if ((endTime.isBefore(oldestAllowedQuery) || toRemove > 0) && isAbandoned(queryExecution)) {
          removeQuery(queryExecution.getQueryInfo().getQueryId());
          --toRemove;
        }
      } catch (RuntimeException e) {
        log.warn(
            e,
            "Error while inspecting age of query %s",
            queryExecution.getQueryInfo().getQueryId());
      }
    }
  }
Ejemplo n.º 11
0
  public void runAllBenchmarks() throws IOException {
    ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("test"));
    try {
      List<AbstractBenchmark> benchmarks = createBenchmarks(executor);

      LOGGER.info("=== Pre-running all benchmarks for JVM warmup ===");
      for (AbstractBenchmark benchmark : benchmarks) {
        benchmark.runBenchmark();
      }

      LOGGER.info("=== Actually running benchmarks for metrics ===");
      for (AbstractBenchmark benchmark : benchmarks) {
        try (OutputStream jsonOut =
                new FileOutputStream(
                    createOutputFile(
                        String.format(
                            "%s/json/%s.json", outputDirectory, benchmark.getBenchmarkName())));
            OutputStream jsonAvgOut =
                new FileOutputStream(
                    createOutputFile(
                        String.format(
                            "%s/json-avg/%s.json",
                            outputDirectory, benchmark.getBenchmarkName())));
            OutputStream csvOut =
                new FileOutputStream(
                    createOutputFile(
                        String.format(
                            "%s/csv/%s.csv", outputDirectory, benchmark.getBenchmarkName())));
            OutputStream odsOut =
                new FileOutputStream(
                    createOutputFile(
                        String.format(
                            "%s/ods/%s.json", outputDirectory, benchmark.getBenchmarkName())))) {
          benchmark.runBenchmark(
              new ForwardingBenchmarkResultWriter(
                  ImmutableList.of(
                      new JsonBenchmarkResultWriter(jsonOut),
                      new JsonAvgBenchmarkResultWriter(jsonAvgOut),
                      new SimpleLineBenchmarkResultWriter(csvOut),
                      new OdsBenchmarkResultWriter(
                          "presto.benchmark." + benchmark.getBenchmarkName(), odsOut))));
        }
      }
    } finally {
      executor.shutdownNow();
    }
  }
Ejemplo n.º 12
0
 public void destroy() {
   try {
     split.close();
   } catch (RuntimeException e) {
     log.error(e, "Error closing split for task %s", taskHandle.getTaskId());
   }
   destroyed.set(true);
 }
Ejemplo n.º 13
0
 private static ResumableTask.TaskStatus safeProcessTask(ResumableTask task) {
   try {
     return task.process();
   } catch (Throwable t) {
     log.warn(t, "ResumableTask completed exceptionally");
     return ResumableTask.TaskStatus.finished();
   }
 }
Ejemplo n.º 14
0
 private void unexport(ClusterMemoryPool pool) {
   try {
     String objectName =
         ObjectNames.builder(ClusterMemoryPool.class, pool.getId().toString()).build();
     exporter.unexport(objectName);
   } catch (JmxException e) {
     log.error(e, "Failed to unexport pool %s", pool.getId());
   }
 }
Ejemplo n.º 15
0
  public void removeQuery(QueryId queryId) {
    Preconditions.checkNotNull(queryId, "queryId is null");

    log.debug("Remove query %s", queryId);

    QueryExecution query = queries.remove(queryId);
    if (query != null) {
      query.cancel();
    }
  }
Ejemplo n.º 16
0
  public void requestFailed(Throwable reason) throws PrestoException {
    // cancellation is not a failure
    if (reason instanceof CancellationException) {
      return;
    }

    if (reason instanceof RejectedExecutionException) {
      throw new PrestoException(REMOTE_TASK_ERROR, reason);
    }

    // log failure message
    if (isExpectedError(reason)) {
      // don't print a stack for a known errors
      log.warn("Error " + jobDescription + " %s: %s: %s", taskId, reason.getMessage(), taskUri);
    } else {
      log.warn(reason, "Error " + jobDescription + " %s: %s", taskId, taskUri);
    }

    // remember the first 10 errors
    if (errorsSinceLastSuccess.size() < 10) {
      errorsSinceLastSuccess.add(reason);
    }

    // fail the task, if we have more than X failures in a row and more than Y seconds have passed
    // since the last request
    if (backoff.failure()) {
      // it is weird to mark the task failed locally and then cancel the remote task, but there is
      // no way to tell a remote task that it is failed
      PrestoException exception =
          new PrestoException(
              TOO_MANY_REQUESTS_FAILED,
              format(
                  "%s (%s %s - %s failures, time since last success %s)",
                  WORKER_NODE_ERROR,
                  jobDescription,
                  taskUri,
                  backoff.getFailureCount(),
                  backoff.getTimeSinceLastSuccess().convertTo(SECONDS)));
      errorsSinceLastSuccess.forEach(exception::addSuppressed);
      throw exception;
    }
  }
Ejemplo n.º 17
0
  @Override
  public void cancelQuery(QueryId queryId) {
    checkNotNull(queryId, "queryId is null");

    log.debug("Cancel query %s", queryId);

    QueryExecution query = queries.get(queryId);
    if (query != null) {
      query.cancel();
    }
  }
Ejemplo n.º 18
0
    private void uploadObject() throws IOException {
      try {
        log.debug(
            "Starting upload for host: %s, key: %s, file: %s, size: %s",
            host, key, tempFile, tempFile.length());
        Upload upload = transferManager.upload(host, key, tempFile);

        if (log.isDebugEnabled()) {
          upload.addProgressListener(createProgressListener(upload));
        }

        upload.waitForCompletion();
        log.debug("Completed upload for host: %s, key: %s", host, key);
      } catch (AmazonClientException e) {
        throw new IOException(e);
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new InterruptedIOException();
      }
    }
Ejemplo n.º 19
0
  @Override
  public void cancelStage(StageId stageId) {
    Preconditions.checkNotNull(stageId, "stageId is null");

    log.debug("Cancel stage %s", stageId);

    QueryExecution query = queries.get(stageId.getQueryId());
    if (query != null) {
      query.cancelStage(stageId);
    }
  }
Ejemplo n.º 20
0
  public void installCodeCacheGcTrigger() {
    if (installed.getAndSet(true)) {
      return;
    }

    // Hack to work around bugs in java 8 (8u45+) related to code cache management.
    // See
    // http://openjdk.5641.n7.nabble.com/JIT-stops-compiling-after-a-while-java-8u45-td259603.html
    // for more info.
    MemoryPoolMXBean codeCacheMbean = findCodeCacheMBean();

    Thread gcThread =
        new Thread(
            () -> {
              while (!Thread.currentThread().isInterrupted()) {
                long used = codeCacheMbean.getUsage().getUsed();
                long max = codeCacheMbean.getUsage().getMax();

                if (used > 0.95 * max) {
                  log.error("Code Cache is more than 95% full. JIT may stop working.");
                }
                if (used > (max * collectionThreshold) / 100) {
                  // Due to some obscure bug in hotspot (java 8), once the code cache fills up the
                  // JIT stops compiling
                  // By forcing a GC, we let the code cache evictor make room before the cache fills
                  // up.
                  log.info("Triggering GC to avoid Code Cache eviction bugs");
                  System.gc();
                }

                try {
                  TimeUnit.MILLISECONDS.sleep(interval.toMillis());
                } catch (InterruptedException e) {
                  Thread.currentThread().interrupt();
                }
              }
            });
    gcThread.setDaemon(true);
    gcThread.setName("Code-Cache-GC-Trigger");
    gcThread.start();
  }
Ejemplo n.º 21
0
public class JmxAgent {
  private final int registryPort;
  private final int serverPort;

  private static final Logger log = Logger.get(JmxAgent.class);
  private final JMXServiceURL url;

  @Inject
  public JmxAgent(JmxConfig config) throws IOException {
    if (config.getRmiRegistryPort() == null) {
      registryPort = NetUtils.findUnusedPort();
    } else {
      registryPort = config.getRmiRegistryPort();
    }

    if (config.getRmiServerPort() == null) {
      serverPort = NetUtils.findUnusedPort();
    } else {
      serverPort = config.getRmiServerPort();
    }

    try {
      // This is how the jdk jmx agent constructs its url
      url = new JMXServiceURL("rmi", null, registryPort);
    } catch (MalformedURLException e) {
      // should not happen...
      throw new AssertionError(e);
    }
  }

  public JMXServiceURL getURL() {
    return url;
  }

  @PostConstruct
  public void start() throws IOException {
    // This is somewhat of a hack, but the jmx agent in Oracle/OpenJDK doesn't
    // have a programmatic API for starting it and controlling its parameters
    System.setProperty("com.sun.management.jmxremote", "true");
    System.setProperty("com.sun.management.jmxremote.port", Integer.toString(registryPort));
    System.setProperty("com.sun.management.jmxremote.rmi.port", Integer.toString(serverPort));
    System.setProperty("com.sun.management.jmxremote.authenticate", "false");
    System.setProperty("com.sun.management.jmxremote.ssl", "false");

    try {
      Agent.startAgent();
    } catch (Exception e) {
      throw Throwables.propagate(e);
    }

    log.info("JMX Agent listening on %s:%s", url.getHost(), url.getPort());
  }
}
Ejemplo n.º 22
0
 @Test
 public void testViewCreation() {
   try {
     verifyViewCreation();
   } finally {
     try {
       metadata.dropView(SESSION, temporaryCreateView);
     } catch (RuntimeException e) {
       Logger.get(getClass()).warn(e, "Failed to drop view: %s", temporaryCreateView);
     }
   }
 }
Ejemplo n.º 23
0
  // TODO: get the right partitions right here
  @Override
  public ConnectorPartitionResult getPartitions(
      ConnectorTableHandle tableHandle, TupleDomain<ColumnHandle> tupleDomain) {
    checkArgument(
        tableHandle instanceof RiakTableHandle,
        "tableHandle is not an instance of RiakTableHandle");
    RiakTableHandle riakTableHandle = (RiakTableHandle) tableHandle;

    log.info("==========================tupleDomain=============================");
    log.info(tupleDomain.toString());

    try {
      String parentTable = PRSubTable.parentTableName(riakTableHandle.getTableName());
      SchemaTableName parentSchemaTable =
          new SchemaTableName(riakTableHandle.getSchemaName(), parentTable);
      PRTable table = riakClient.getTable(parentSchemaTable);
      List<String> indexedColumns = new LinkedList<String>();
      for (RiakColumn riakColumn : table.getColumns()) {
        if (riakColumn.getIndex()) {
          indexedColumns.add(riakColumn.getName());
        }
      }

      // Riak connector has only one partition
      List<ConnectorPartition> partitions =
          ImmutableList.<ConnectorPartition>of(
              new RiakPartition(
                  riakTableHandle.getSchemaName(),
                  riakTableHandle.getTableName(),
                  tupleDomain,
                  indexedColumns));

      // Riak connector does not do any additional processing/filtering with the TupleDomain, so
      // just return the whole TupleDomain
      return new ConnectorPartitionResult(partitions, tupleDomain);
    } catch (Exception e) {
      log.error("interrupted: %s", e.toString());
      throw new TableNotFoundException(riakTableHandle.toSchemaTableName());
    }
  }
Ejemplo n.º 24
0
 private Throwable addSuppressedException(
     Throwable inFlightException, Throwable newException, String message, Object... args) {
   if (newException instanceof Error) {
     if (inFlightException == null) {
       inFlightException = newException;
     } else {
       inFlightException.addSuppressed(newException);
     }
   } else {
     // log normal exceptions instead of rethrowing them
     log.error(newException, message, args);
   }
   return inFlightException;
 }
Ejemplo n.º 25
0
  private List<ServiceDescriptor> getServiceInventory(SlotStatus slotStatus) {
    Assignment assignment = slotStatus.getAssignment();
    if (assignment == null) {
      return null;
    }

    String config = assignment.getConfig();

    File cacheFile = getCacheFile(config);
    if (cacheFile.canRead()) {
      try {
        String json = CharStreams.toString(Files.newReaderSupplier(cacheFile, Charsets.UTF_8));
        List<ServiceDescriptor> descriptors = descriptorsJsonCodec.fromJson(json);
        invalidServiceInventory.remove(config);
        return descriptors;
      } catch (Exception ignored) {
        // delete the bad cache file
        cacheFile.delete();
      }
    }

    InputSupplier<? extends InputStream> configFile =
        ConfigUtils.newConfigEntrySupplier(repository, config, "airship-service-inventory.json");
    if (configFile == null) {
      return null;
    }

    try {
      String json;
      try {
        json = CharStreams.toString(CharStreams.newReaderSupplier(configFile, Charsets.UTF_8));
      } catch (FileNotFoundException e) {
        // no service inventory in the config, so replace with json null so caching works
        json = "null";
      }
      invalidServiceInventory.remove(config);

      // cache json
      cacheFile.getParentFile().mkdirs();
      Files.write(json, cacheFile, Charsets.UTF_8);

      List<ServiceDescriptor> descriptors = descriptorsJsonCodec.fromJson(json);
      return descriptors;
    } catch (Exception e) {
      if (invalidServiceInventory.add(config)) {
        log.error(e, "Unable to read service inventory for %s" + config);
      }
    }
    return null;
  }
Ejemplo n.º 26
0
 public static void createTablesWithRetry(AliasDao dao) throws InterruptedException {
   Duration delay = new Duration(10, TimeUnit.SECONDS);
   while (true) {
     try {
       createTables(dao);
       return;
     } catch (UnableToObtainConnectionException e) {
       log.warn(
           "Failed to connect to database. Will retry again in %s. Exception: %s",
           delay, e.getMessage());
       Thread.sleep(delay.toMillis());
     }
   }
 }
Ejemplo n.º 27
0
  private synchronized void updatePools(Map<MemoryPoolId, Integer> queryCounts) {
    // Update view of cluster memory and pools
    List<MemoryInfo> nodeMemoryInfos =
        nodes
            .values()
            .stream()
            .map(RemoteNodeMemory::getInfo)
            .filter(Optional::isPresent)
            .map(Optional::get)
            .collect(toImmutableList());

    long totalClusterMemory =
        nodeMemoryInfos
            .stream()
            .map(MemoryInfo::getTotalNodeMemory)
            .mapToLong(DataSize::toBytes)
            .sum();
    clusterMemoryBytes.set(totalClusterMemory);

    Set<MemoryPoolId> activePoolIds =
        nodeMemoryInfos
            .stream()
            .flatMap(info -> info.getPools().keySet().stream())
            .collect(toImmutableSet());

    // Make a copy to materialize the set difference
    Set<MemoryPoolId> removedPools = ImmutableSet.copyOf(difference(pools.keySet(), activePoolIds));
    for (MemoryPoolId removed : removedPools) {
      unexport(pools.get(removed));
      pools.remove(removed);
    }
    for (MemoryPoolId id : activePoolIds) {
      ClusterMemoryPool pool =
          pools.computeIfAbsent(
              id,
              poolId -> {
                ClusterMemoryPool newPool = new ClusterMemoryPool(poolId);
                String objectName =
                    ObjectNames.builder(ClusterMemoryPool.class, newPool.getId().toString())
                        .build();
                try {
                  exporter.export(objectName, newPool);
                } catch (JmxException e) {
                  log.error(e, "Error exporting memory pool %s", poolId);
                }
                return newPool;
              });
      pool.update(nodeMemoryInfos, queryCounts.getOrDefault(pool.getId(), 0));
    }
  }
Ejemplo n.º 28
0
  public void failAbandonedQueries() {
    for (QueryExecution queryExecution : queries.values()) {
      try {
        QueryInfo queryInfo = queryExecution.getQueryInfo();
        if (queryInfo.getState().isDone()) {
          continue;
        }

        if (isAbandoned(queryExecution)) {
          log.info("Failing abandoned query %s", queryExecution.getQueryInfo().getQueryId());
          queryExecution.fail(
              new AbandonedException(
                  "Query " + queryInfo.getQueryId(),
                  queryInfo.getQueryStats().getLastHeartbeat(),
                  DateTime.now()));
        }
      } catch (RuntimeException e) {
        log.warn(
            e,
            "Error while inspecting age of query %s",
            queryExecution.getQueryInfo().getQueryId());
      }
    }
  }
Ejemplo n.º 29
0
    public PrestoS3OutputStream(
        AmazonS3 s3, TransferManagerConfiguration config, String host, String key, File tempFile)
        throws IOException {
      super(
          new BufferedOutputStream(
              new FileOutputStream(checkNotNull(tempFile, "tempFile is null"))));

      transferManager = new TransferManager(checkNotNull(s3, "s3 is null"));
      transferManager.setConfiguration(checkNotNull(config, "config is null"));

      this.host = checkNotNull(host, "host is null");
      this.key = checkNotNull(key, "key is null");
      this.tempFile = tempFile;

      log.debug("OutputStream for key '%s' using file: %s", key, tempFile);
    }
Ejemplo n.º 30
0
  public void printInitialStatusUpdates() {
    long lastPrint = System.nanoTime();
    try {
      while (client.isValid()) {
        try {
          // exit status loop if there is there is pending output
          if (client.current().getData() != null) {
            return;
          }

          // check if time to update screen
          boolean update = nanosSince(lastPrint).getValue(SECONDS) >= 0.5;

          // check for keyboard input
          int key = readKey();
          if (key == CTRL_P) {
            partialCancel();
          } else if (key == CTRL_C) {
            updateScreen();
            update = false;
            client.close();
          } else if (toUpperCase(key) == 'D') {
            debug = !debug;
            console.resetScreen();
            update = true;
          }

          // update screen
          if (update) {
            updateScreen();
            lastPrint = System.nanoTime();
          }

          // fetch next results (server will wait for a while if no data)
          client.advance();
        } catch (RuntimeException e) {
          log.debug(e, "error printing status");
          if (debug) {
            e.printStackTrace(out);
          }
        }
      }
    } finally {
      console.resetScreen();
    }
  }