Beispiel #1
0
  public void runAllBenchmarks() throws IOException {
    ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("test"));
    try {
      List<AbstractBenchmark> benchmarks = createBenchmarks(executor);

      LOGGER.info("=== Pre-running all benchmarks for JVM warmup ===");
      for (AbstractBenchmark benchmark : benchmarks) {
        benchmark.runBenchmark();
      }

      LOGGER.info("=== Actually running benchmarks for metrics ===");
      for (AbstractBenchmark benchmark : benchmarks) {
        try (OutputStream jsonOut =
                new FileOutputStream(
                    createOutputFile(
                        String.format(
                            "%s/json/%s.json", outputDirectory, benchmark.getBenchmarkName())));
            OutputStream jsonAvgOut =
                new FileOutputStream(
                    createOutputFile(
                        String.format(
                            "%s/json-avg/%s.json",
                            outputDirectory, benchmark.getBenchmarkName())));
            OutputStream csvOut =
                new FileOutputStream(
                    createOutputFile(
                        String.format(
                            "%s/csv/%s.csv", outputDirectory, benchmark.getBenchmarkName())));
            OutputStream odsOut =
                new FileOutputStream(
                    createOutputFile(
                        String.format(
                            "%s/ods/%s.json", outputDirectory, benchmark.getBenchmarkName())))) {
          benchmark.runBenchmark(
              new ForwardingBenchmarkResultWriter(
                  ImmutableList.of(
                      new JsonBenchmarkResultWriter(jsonOut),
                      new JsonAvgBenchmarkResultWriter(jsonAvgOut),
                      new SimpleLineBenchmarkResultWriter(csvOut),
                      new OdsBenchmarkResultWriter(
                          "presto.benchmark." + benchmark.getBenchmarkName(), odsOut))));
        }
      }
    } finally {
      executor.shutdownNow();
    }
  }
Beispiel #2
0
 // TODO : make wait time configurable ?
 public static void shutdownExecutor(ExecutorService executor, final String name) {
   executor.shutdown();
   try {
     log.info("Waiting for %s to shutdown", name);
     if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
       log.warn("%s did not shutdown properly", name);
     }
   } catch (InterruptedException e) {
     log.warn("Interrupted while waiting for %s to shutdown", name);
     Thread.currentThread().interrupt();
   }
 }
  // TODO: get the right partitions right here
  @Override
  public ConnectorPartitionResult getPartitions(
      ConnectorTableHandle tableHandle, TupleDomain<ColumnHandle> tupleDomain) {
    checkArgument(
        tableHandle instanceof RiakTableHandle,
        "tableHandle is not an instance of RiakTableHandle");
    RiakTableHandle riakTableHandle = (RiakTableHandle) tableHandle;

    log.info("==========================tupleDomain=============================");
    log.info(tupleDomain.toString());

    try {
      String parentTable = PRSubTable.parentTableName(riakTableHandle.getTableName());
      SchemaTableName parentSchemaTable =
          new SchemaTableName(riakTableHandle.getSchemaName(), parentTable);
      PRTable table = riakClient.getTable(parentSchemaTable);
      List<String> indexedColumns = new LinkedList<String>();
      for (RiakColumn riakColumn : table.getColumns()) {
        if (riakColumn.getIndex()) {
          indexedColumns.add(riakColumn.getName());
        }
      }

      // Riak connector has only one partition
      List<ConnectorPartition> partitions =
          ImmutableList.<ConnectorPartition>of(
              new RiakPartition(
                  riakTableHandle.getSchemaName(),
                  riakTableHandle.getTableName(),
                  tupleDomain,
                  indexedColumns));

      // Riak connector does not do any additional processing/filtering with the TupleDomain, so
      // just return the whole TupleDomain
      return new ConnectorPartitionResult(partitions, tupleDomain);
    } catch (Exception e) {
      log.error("interrupted: %s", e.toString());
      throw new TableNotFoundException(riakTableHandle.toSchemaTableName());
    }
  }
Beispiel #4
0
 public static void closeChannels(ChannelGroup allChannels) {
   if (allChannels.size() > 0) {
     // TODO : allow an option here to control if we need to drain connections and wait instead of
     // killing them all
     try {
       log.info("Closing %s open client connections", allChannels.size());
       if (!allChannels.close().await(5, TimeUnit.SECONDS)) {
         log.warn("Failed to close all open client connections");
       }
     } catch (InterruptedException e) {
       log.warn("Interrupted while closing client connections");
       Thread.currentThread().interrupt();
     }
   }
 }
Beispiel #5
0
  @PostConstruct
  public void start() throws IOException {
    // This is somewhat of a hack, but the jmx agent in Oracle/OpenJDK doesn't
    // have a programmatic API for starting it and controlling its parameters
    System.setProperty("com.sun.management.jmxremote", "true");
    System.setProperty("com.sun.management.jmxremote.port", Integer.toString(registryPort));
    System.setProperty("com.sun.management.jmxremote.rmi.port", Integer.toString(serverPort));
    System.setProperty("com.sun.management.jmxremote.authenticate", "false");
    System.setProperty("com.sun.management.jmxremote.ssl", "false");

    try {
      Agent.startAgent();
    } catch (Exception e) {
      throw Throwables.propagate(e);
    }

    log.info("JMX Agent listening on %s:%s", url.getHost(), url.getPort());
  }
  public void installCodeCacheGcTrigger() {
    if (installed.getAndSet(true)) {
      return;
    }

    // Hack to work around bugs in java 8 (8u45+) related to code cache management.
    // See
    // http://openjdk.5641.n7.nabble.com/JIT-stops-compiling-after-a-while-java-8u45-td259603.html
    // for more info.
    MemoryPoolMXBean codeCacheMbean = findCodeCacheMBean();

    Thread gcThread =
        new Thread(
            () -> {
              while (!Thread.currentThread().isInterrupted()) {
                long used = codeCacheMbean.getUsage().getUsed();
                long max = codeCacheMbean.getUsage().getMax();

                if (used > 0.95 * max) {
                  log.error("Code Cache is more than 95% full. JIT may stop working.");
                }
                if (used > (max * collectionThreshold) / 100) {
                  // Due to some obscure bug in hotspot (java 8), once the code cache fills up the
                  // JIT stops compiling
                  // By forcing a GC, we let the code cache evictor make room before the cache fills
                  // up.
                  log.info("Triggering GC to avoid Code Cache eviction bugs");
                  System.gc();
                }

                try {
                  TimeUnit.MILLISECONDS.sleep(interval.toMillis());
                } catch (InterruptedException e) {
                  Thread.currentThread().interrupt();
                }
              }
            });
    gcThread.setDaemon(true);
    gcThread.setName("Code-Cache-GC-Trigger");
    gcThread.start();
  }
  public void failAbandonedQueries() {
    for (QueryExecution queryExecution : queries.values()) {
      try {
        QueryInfo queryInfo = queryExecution.getQueryInfo();
        if (queryInfo.getState().isDone()) {
          continue;
        }

        if (isAbandoned(queryExecution)) {
          log.info("Failing abandoned query %s", queryExecution.getQueryInfo().getQueryId());
          queryExecution.fail(
              new AbandonedException(
                  "Query " + queryInfo.getQueryId(),
                  queryInfo.getQueryStats().getLastHeartbeat(),
                  DateTime.now()));
        }
      } catch (RuntimeException e) {
        log.warn(
            e,
            "Error while inspecting age of query %s",
            queryExecution.getQueryInfo().getQueryId());
      }
    }
  }
  // TODO: return correct splits from partitions
  @Override
  public ConnectorSplitSource getPartitionSplits(
      ConnectorTableHandle tableHandle, List<ConnectorPartition> partitions) {
    checkNotNull(partitions, "partitions is null");
    checkArgument(partitions.size() == 1, "Expected one partition but got %s", partitions.size());
    ConnectorPartition partition = partitions.get(0);

    checkArgument(
        partition instanceof RiakPartition, "partition is not an instance of RiakPartition");
    // RiakPartition riakPartition = (RiakPartition) partition;

    RiakTableHandle riakTableHandle = (RiakTableHandle) tableHandle;

    try {
      String parentTable = PRSubTable.parentTableName(riakTableHandle.getTableName());
      SchemaTableName parentSchemaTable =
          new SchemaTableName(riakTableHandle.getSchemaName(), parentTable);
      PRTable table = riakClient.getTable(parentSchemaTable);

      log.debug("> %s", table.getColumns().toString());
      // add all nodes at the cluster here
      List<ConnectorSplit> splits = Lists.newArrayList();
      String hosts = riakClient.getHosts();
      log.debug(hosts);

      if (riakConfig.getLocalNode() != null) {
        // TODO: make coverageSplits here

        // try {
        DirectConnection conn = directConnection;
        // conn.connect(riak);
        // conn.ping();
        Coverage coverage = new Coverage(conn);
        coverage.plan();
        List<SplitTask> splitTasks = coverage.getSplits();

        log.debug("print coverage plan==============");
        log.debug(coverage.toString());

        for (SplitTask split : splitTasks) {
          log.info("============printing split data at " + split.getHost() + "===============");
          // log.debug(((OtpErlangObject)split.getTask()).toString());
          log.info(split.toString());

          CoverageSplit coverageSplit =
              new CoverageSplit(
                  riakTableHandle, // maybe toplevel or subtable
                  table, // toplevel PRTable
                  split.getHost(),
                  split.toString(),
                  partition.getTupleDomain());

          // log.info(new JsonCodecFactory().jsonCodec(CoverageSplit.class).toJson(coverageSplit));
          splits.add(coverageSplit);
        }
      } else {
        // TODO: in Riak connector, you only need single access point for each presto worker???
        log.error("localNode must be set and working");
        log.debug(hosts);
        // splits.add(new CoverageSplit(connectorId, riakTableHandle.getSchemaName(),
        //        riakTableHandle.getTableName(), hosts,
        //        partition.getTupleDomain(),
        //        ((RiakPartition) partition).getIndexedColumns()));

      }
      log.debug(
          "table %s.%s has %d splits.",
          riakTableHandle.getSchemaName(), riakTableHandle.getTableName(), splits.size());

      Collections.shuffle(splits);
      return new FixedSplitSource(connectorId, splits);

    } catch (Exception e) {
      throw new TableNotFoundException(riakTableHandle.toSchemaTableName());
    }
    // this can happen if table is removed during a query

  }
Beispiel #9
0
  private void logQueryTimeline(QueryInfo queryInfo) {
    try {
      QueryStats queryStats = queryInfo.getQueryStats();
      DateTime queryStartTime = queryStats.getCreateTime();
      DateTime queryEndTime = queryStats.getEndTime();

      // query didn't finish cleanly
      if (queryStartTime == null || queryEndTime == null) {
        return;
      }

      // planning duration -- start to end of planning
      Duration planning = queryStats.getTotalPlanningTime();
      if (planning == null) {
        planning = new Duration(0, MILLISECONDS);
      }

      List<StageInfo> stages = StageInfo.getAllStages(queryInfo.getOutputStage());
      // long lastSchedulingCompletion = 0;
      long firstTaskStartTime = queryEndTime.getMillis();
      long lastTaskStartTime = queryStartTime.getMillis() + planning.toMillis();
      long lastTaskEndTime = queryStartTime.getMillis() + planning.toMillis();
      for (StageInfo stage : stages) {
        // only consider leaf stages
        if (!stage.getSubStages().isEmpty()) {
          continue;
        }

        for (TaskInfo taskInfo : stage.getTasks()) {
          TaskStats taskStats = taskInfo.getStats();

          DateTime firstStartTime = taskStats.getFirstStartTime();
          if (firstStartTime != null) {
            firstTaskStartTime = Math.min(firstStartTime.getMillis(), firstTaskStartTime);
          }

          DateTime lastStartTime = taskStats.getLastStartTime();
          if (lastStartTime != null) {
            lastTaskStartTime = Math.max(lastStartTime.getMillis(), lastTaskStartTime);
          }

          DateTime endTime = taskStats.getEndTime();
          if (endTime != null) {
            lastTaskEndTime = Math.max(endTime.getMillis(), lastTaskEndTime);
          }
        }
      }

      Duration elapsed = millis(queryEndTime.getMillis() - queryStartTime.getMillis());

      Duration scheduling =
          millis(firstTaskStartTime - queryStartTime.getMillis() - planning.toMillis());

      Duration running = millis(lastTaskEndTime - firstTaskStartTime);

      Duration finishing = millis(queryEndTime.getMillis() - lastTaskEndTime);

      log.info(
          "TIMELINE: Query %s :: elapsed %s :: planning %s :: scheduling %s :: running %s :: finishing %s :: begin %s :: end %s",
          queryInfo.getQueryId(),
          elapsed,
          planning,
          scheduling,
          running,
          finishing,
          queryStartTime,
          queryEndTime);
    } catch (Exception e) {
      log.error(e, "Error logging query timeline");
    }
  }