Ejemplo n.º 1
0
 public void start() {
   if (watch.isRunning()) {
     watch.reset().start();
   } else {
     watch.start();
   }
 }
Ejemplo n.º 2
0
  @Override
  public void execute() throws IOException, RecommenderBuildException {
    LenskitConfiguration dataConfig = input.getConfiguration();
    LenskitRecommenderEngineBuilder builder = LenskitRecommenderEngine.newBuilder();
    for (LenskitConfiguration config : environment.loadConfigurations(getConfigFiles())) {
      builder.addConfiguration(config);
    }
    builder.addConfiguration(dataConfig, ModelDisposition.EXCLUDED);

    Stopwatch timer = Stopwatch.createStarted();
    LenskitRecommenderEngine engine = builder.build();
    timer.stop();
    logger.info("built model in {}", timer);
    File output = getOutputFile();
    CompressionMode comp = CompressionMode.autodetect(output);
    logger.info("writing model to {}", output);
    Closer closer = Closer.create();
    try {
      OutputStream stream = closer.register(new FileOutputStream(output));
      stream = closer.register(comp.wrapOutput(stream));
      engine.write(stream);
    } catch (Throwable th) { // NOSONAR using a closer
      throw closer.rethrow(th);
    } finally {
      closer.close();
    }
  }
Ejemplo n.º 3
0
  public AggregateNumericMetric getSummaryAggregate(
      List<Integer> scheduleIds, long beginTime, long endTime) {
    Stopwatch stopwatch = new Stopwatch().start();
    try {
      DateTime begin = new DateTime(beginTime);

      if (dateTimeService.isInRawDataRange(new DateTime(beginTime))) {
        Iterable<RawNumericMetric> metrics = dao.findRawMetrics(scheduleIds, beginTime, endTime);
        return calculateAggregatedRaw(metrics, beginTime);
      }
      Bucket bucket = getBucket(begin);
      List<AggregateNumericMetric> metrics = loadMetrics(scheduleIds, beginTime, endTime, bucket);

      return calculateAggregate(metrics, beginTime, bucket);
    } finally {
      stopwatch.stop();
      if (log.isDebugEnabled()) {
        log.debug(
            "Finished calculating group summary aggregate for [scheduleIds: "
                + scheduleIds
                + ", beginTime: "
                + beginTime
                + ", endTime: "
                + endTime
                + "] in "
                + stopwatch.elapsed(TimeUnit.MILLISECONDS)
                + " ms");
      }
    }
  }
  @Test
  public void ensureRecordsTest() {
    int empId = 11303;
    List<PayPeriod> payPeriods =
        periodService.getOpenPayPeriods(PayPeriodType.AF, empId, SortOrder.ASC);
    // Print existing records
    Set<TimeRecord> existingRecords =
        timeRecordService
            .getTimeRecords(Collections.singleton(empId), payPeriods, TimeRecordStatus.getAll())
            .stream()
            .map(TimeRecord::new)
            .collect(Collectors.toSet());
    logger.info("-------- EXISTING RECORDS --------");
    printRecords(existingRecords);

    Stopwatch sw = Stopwatch.createStarted();
    // Generate records
    manager.ensureRecords(empId);
    logger.info("generation took {} ms", sw.stop().elapsed(TimeUnit.MILLISECONDS));

    // Print difference
    Set<TimeRecord> newRecords =
        new TreeSet<>(
            timeRecordService.getTimeRecords(
                Collections.singleton(empId), payPeriods, TimeRecordStatus.getAll()));
    logger.info("-------- NEW RECORDS --------");
    printRecords(Sets.difference(newRecords, existingRecords));
  }
Ejemplo n.º 5
0
  public Iterable<MeasurementDataNumericHighLowComposite> findDataForResource(
      int scheduleId, long beginTime, long endTime, int numberOfBuckets) {
    Stopwatch stopwatch = new Stopwatch().start();
    try {
      DateTime begin = new DateTime(beginTime);

      if (dateTimeService.isInRawDataRange(begin)) {
        Iterable<RawNumericMetric> metrics = dao.findRawMetrics(scheduleId, beginTime, endTime);
        return createRawComposites(metrics, beginTime, endTime, numberOfBuckets);
      }

      List<AggregateNumericMetric> metrics = null;
      if (dateTimeService.isIn1HourDataRange(begin)) {
        metrics = dao.findAggregateMetrics(scheduleId, Bucket.ONE_HOUR, beginTime, endTime);
        return createComposites(metrics, beginTime, endTime, numberOfBuckets);
      } else if (dateTimeService.isIn6HourDataRange(begin)) {
        metrics = dao.findAggregateMetrics(scheduleId, Bucket.SIX_HOUR, beginTime, endTime);
        return createComposites(metrics, beginTime, endTime, numberOfBuckets);
      } else if (dateTimeService.isIn24HourDataRange(begin)) {
        metrics = dao.findAggregateMetrics(scheduleId, Bucket.TWENTY_FOUR_HOUR, beginTime, endTime);
        return createComposites(metrics, beginTime, endTime, numberOfBuckets);
      } else {
        throw new IllegalArgumentException(
            "beginTime[" + beginTime + "] is outside the accepted range.");
      }
    } finally {
      stopwatch.stop();
      if (log.isDebugEnabled()) {
        log.debug(
            "Finished calculating resource summary aggregate in "
                + stopwatch.elapsed(TimeUnit.MILLISECONDS)
                + " ms");
      }
    }
  }
Ejemplo n.º 6
0
 /**
  * DO NOT RUN!!!!
  *
  * @author Joshua Barlin (propoke24)
  * @version 1
  * @return Time between execution and interruption
  * @deprecated Test Code
  */
 @Deprecated
 public static long timer() {
   final Stopwatch stopwatch = Stopwatch.createUnstarted();
   stopwatch.start();
   stopwatch.stop();
   return stopwatch.elapsed(TimeUnit.SECONDS);
 }
Ejemplo n.º 7
0
  public static <T extends CompleteWork> List<EndpointAffinity> getAffinityMap(List<T> work) {
    Stopwatch watch = new Stopwatch();

    long totalBytes = 0;
    for (CompleteWork entry : work) {
      totalBytes += entry.getTotalBytes();
    }

    ObjectFloatOpenHashMap<DrillbitEndpoint> affinities =
        new ObjectFloatOpenHashMap<DrillbitEndpoint>();
    for (CompleteWork entry : work) {
      for (ObjectLongCursor<DrillbitEndpoint> cursor : entry.getByteMap()) {
        long bytes = cursor.value;
        float affinity = (float) bytes / (float) totalBytes;
        logger.debug("Work: {} Endpoint: {} Bytes: {}", work, cursor.key.getAddress(), bytes);
        affinities.putOrAdd(cursor.key, affinity, affinity);
      }
    }

    List<EndpointAffinity> affinityList = Lists.newLinkedList();
    for (ObjectFloatCursor<DrillbitEndpoint> d : affinities) {
      logger.debug("Endpoint {} has affinity {}", d.key.getAddress(), d.value);
      affinityList.add(new EndpointAffinity(d.key, d.value));
    }

    logger.debug("Took {} ms to get operator affinity", watch.elapsed(TimeUnit.MILLISECONDS));
    return affinityList;
  }
 @Override
 public final D scan(I container, String path, Scope scope, Scanner scanner) throws IOException {
   ScannerContext context = scanner.getContext();
   D containerDescriptor = getContainerDescriptor(container, context);
   String containerPath = getContainerPath(container, path);
   containerDescriptor.setFileName(containerPath);
   LOGGER.info("Entering {}", containerPath);
   ContainerFileResolver fileResolverStrategy = new ContainerFileResolver(containerDescriptor);
   context.push(FileResolver.class, fileResolverStrategy);
   enterContainer(container, containerDescriptor, scanner.getContext());
   Stopwatch stopwatch = Stopwatch.createStarted();
   try {
     Iterable<? extends E> entries = getEntries(container);
     for (E entry : entries) {
       String relativePath = getRelativePath(container, entry);
       try (Resource resource = getEntry(container, entry)) {
         LOGGER.debug("Scanning {}", relativePath);
         FileDescriptor descriptor = scanner.scan(resource, relativePath, scope);
         fileResolverStrategy.put(relativePath, descriptor);
       }
     }
   } finally {
     leaveContainer(container, containerDescriptor, scanner.getContext());
     context.pop(FileResolver.class);
   }
   fileResolverStrategy.flush();
   LOGGER.info(
       "Leaving {} ({} entries, {} ms)",
       containerPath,
       fileResolverStrategy.size(),
       stopwatch.elapsed(MILLISECONDS));
   return containerDescriptor;
 }
  public HashMap<String, byte[]> digestDependencies(List<File> dependencies) throws IOException {
    Stopwatch stopwatch = Stopwatch.createStarted();

    HashMap<String, byte[]> digest = new HashMap<String, byte[]>();

    // scan dependencies backwards to properly deal with duplicate type definitions
    for (int i = dependencies.size() - 1; i >= 0; i--) {
      File file = dependencies.get(i);
      if (file.isFile()) {
        digest.putAll(digestJar(file));
      } else if (file.isDirectory()) {
        digest.putAll(digestDirectory(file));
      } else {
        // happens with reactor dependencies with empty source folders
        continue;
      }
    }

    log.debug(
        "Analyzed {} classpath dependencies ({} ms)",
        dependencies.size(),
        stopwatch.elapsed(TimeUnit.MILLISECONDS));

    return digest;
  }
  @Override
  public SOid fromString(final String from) throws Exception {
    if (LOGGER.isDebugEnabled()) {
      LOGGER.debug(String.format("fromString(from=%s)", from));
    }
    final SOid oid;
    final Stopwatch stopwatch = Stopwatch.createStarted();
    try {
      final String split[] = from.split(":");
      Assert.isTrue(
          split.length == 3,
          String.format("OID[%s] is invalid, it should be in format A:B:C", from));

      final String oidPrefix = split[TYPE_PREFIX_INDEX];
      final Class<?> oidClass = Class.forName(split[CLASS_NAME_INDEX]);
      final String oidId = split[ID_INDEX];

      oid = this.getOidObject(oidPrefix, oidClass, oidId);
    } catch (Exception exp) {
      LOGGER.error(
          String.format("fromString(from=%s) failed...", from), Throwables.getRootCause(exp));
      throw exp;
    }

    stopwatch.stop();

    if (LOGGER.isTraceEnabled()) {
      final long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
      LOGGER.trace(
          String.format("fromString(from=%s) to SOid(oid=%s) took %d ms", from, oid, elapsed));
    }

    return oid;
  }
Ejemplo n.º 11
0
  protected void testAction(JdbcAction action, long rowcount) throws Exception {
    int rows = 0;
    Stopwatch watch = Stopwatch.createStarted();
    ResultSet r = action.getResult(connection);
    boolean first = true;
    while (r.next()) {
      rows++;
      ResultSetMetaData md = r.getMetaData();
      if (first == true) {
        for (int i = 1; i <= md.getColumnCount(); i++) {
          System.out.print(md.getColumnName(i));
          System.out.print('\t');
        }
        System.out.println();
        first = false;
      }

      for (int i = 1; i <= md.getColumnCount(); i++) {
        System.out.print(r.getObject(i));
        System.out.print('\t');
      }
      System.out.println();
    }

    System.out.println(
        String.format("Query completed in %d millis.", watch.elapsed(TimeUnit.MILLISECONDS)));

    if (rowcount != -1) {
      Assert.assertEquals((long) rowcount, (long) rows);
    }

    System.out.println("\n\n\n");
  }
  @Override
  protected void shutDown() throws Exception {
    LOG.debug("Stopping InputSetupService");
    eventBus.unregister(this);

    for (InputState state : inputRegistry.getRunningInputs()) {
      MessageInput input = state.getMessageInput();

      LOG.info(
          "Attempting to close input <{}> [{}].", input.getUniqueReadableId(), input.getName());

      Stopwatch s = Stopwatch.createStarted();
      try {
        input.stop();

        LOG.info(
            "Input <{}> closed. Took [{}ms]",
            input.getUniqueReadableId(),
            s.elapsed(TimeUnit.MILLISECONDS));
      } catch (Exception e) {
        LOG.error(
            "Unable to stop input <{}> [{}]: " + e.getMessage(),
            input.getUniqueReadableId(),
            input.getName());
      } finally {
        s.stop();
      }
    }
    LOG.debug("Stopped InputSetupService");
  }
Ejemplo n.º 13
0
 @GET
 @Path("jobFlow/{cluster}/{jobId}")
 @Produces(MediaType.APPLICATION_JSON)
 public Flow getJobFlowById(@PathParam("cluster") String cluster, @PathParam("jobId") String jobId)
     throws IOException {
   LOG.info(String.format("Fetching Flow for cluster=%s, jobId=%s", cluster, jobId));
   Stopwatch timer = new Stopwatch().start();
   serializationContext.set(new SerializationContext(SerializationContext.DetailLevel.EVERYTHING));
   Flow flow = getJobHistoryService().getFlowByJobID(cluster, jobId, false);
   timer.stop();
   if (flow != null) {
     LOG.info(
         "For jobFlow/{cluster}/{jobId} with input query: "
             + "jobFlow/"
             + cluster
             + SLASH
             + jobId
             + " fetched flow "
             + flow.getFlowName()
             + " with #jobs "
             + flow.getJobCount()
             + " in "
             + timer);
   } else {
     LOG.info(
         "For jobFlow/{cluster}/{jobId} with input query: "
             + "jobFlow/"
             + cluster
             + SLASH
             + jobId
             + " No flow found, spent "
             + timer);
   }
   return flow;
 }
Ejemplo n.º 14
0
  @Override
  public List<EndpointAffinity> getOperatorAffinity() {
    watch.reset();
    watch.start();
    Map<String, DrillbitEndpoint> endpointMap = new HashMap<String, DrillbitEndpoint>();
    for (DrillbitEndpoint ep : storagePlugin.getContext().getBits()) {
      endpointMap.put(ep.getAddress(), ep);
    }

    Map<DrillbitEndpoint, EndpointAffinity> affinityMap =
        new HashMap<DrillbitEndpoint, EndpointAffinity>();
    for (ServerName sn : regionsToScan.values()) {
      DrillbitEndpoint ep = endpointMap.get(sn.getHostname());
      if (ep != null) {
        EndpointAffinity affinity = affinityMap.get(ep);
        if (affinity == null) {
          affinityMap.put(ep, new EndpointAffinity(ep, 1));
        } else {
          affinity.addAffinity(1);
        }
      }
    }
    logger.debug("Took {} µs to get operator affinity", watch.elapsed(TimeUnit.NANOSECONDS) / 1000);
    return Lists.newArrayList(affinityMap.values());
  }
Ejemplo n.º 15
0
 private synchronized Duration elapsedErrorDuration() {
   if (errorStopwatch.isRunning()) {
     errorStopwatch.stop();
   }
   long nanos = errorStopwatch.elapsed(TimeUnit.NANOSECONDS);
   return new Duration(nanos, TimeUnit.NANOSECONDS).convertTo(TimeUnit.SECONDS);
 }
Ejemplo n.º 16
0
  @SuppressWarnings("unchecked")
  public static void register() throws IOException {
    Stopwatch watch = Stopwatch.createStarted();

    InputStream in = PoreEventWrapper.class.getResourceAsStream("events.txt");
    if (in == null) {
      Pore.getLogger().warn("No registered events found, Bukkit events will not be called.");
      return;
    }

    try (BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
      String line;
      while ((line = reader.readLine()) != null) {
        line = line.trim();
        if (line.isEmpty() || line.charAt(0) == '#') {
          continue;
        }

        try {
          register((Class<? extends Event>) Class.forName(line));
        } catch (ClassNotFoundException e) {
          Pore.getLogger().warn("Failed to register class {} as an event", line, e);
        }
      }
    }

    Pore.getLogger().debug("Registered events in {}", watch.stop());
  }
 private void doParseSentencesInCorpus(File ntvmsnbcCorpus) throws IOException {
   /* SentenceMorphParse parse = parser.parse("Turgut Özal'ın ölüm raporu ile ilgili flaş bir gelişme.");
   parse.dump();
   System.out.println("After disambiguation:");
   parser.disambiguate(parse);
   parse.dump();
   for (SentenceMorphParse.Entry entry : parse) {
       System.out.println(entry.input + "=" + entry.parses.get(0));
   }
   for (SentenceMorphParse.Entry entry : parse) {
       System.out.println(entry.input + " kök=" + entry.parses.get(0).stem);
   }*/
   List<String> sentences = SimpleTextReader.trimmingUTF8Reader(ntvmsnbcCorpus).asStringList();
   Stopwatch sw = new Stopwatch().start();
   int wc = 0;
   for (String sentence : sentences) {
     SentenceMorphParse parse = parser.parse(sentence);
     wc += parse.size();
     parser.disambiguate(parse);
     // System.out.println(sentence);
     // parse.dump();
   }
   System.out.println(wc);
   System.out.println(sw.elapsed(TimeUnit.MILLISECONDS));
 }
 public static void endTimer(String name) {
   Stopwatch stopwatch = timers.get(name + Thread.currentThread().getId());
   if (stopwatch != null) {
     stopwatch.stop();
     addMeasurementToTimer(stopwatch.elapsedTime(TimeUnit.NANOSECONDS), name);
   }
 }
  private void testFull(
      QueryType type,
      String planText,
      String filename,
      int numberOfTimesRead /* specified in json plan */,
      int numberOfRowGroups,
      int recordsPerRowGroup,
      boolean testValues)
      throws Exception {

    //    RecordBatchLoader batchLoader = new RecordBatchLoader(getAllocator());
    HashMap<String, FieldInfo> fields = new HashMap<>();
    ParquetTestProperties props =
        new ParquetTestProperties(
            numberRowGroups, recordsPerRowGroup, DEFAULT_BYTES_PER_PAGE, fields);
    TestFileGenerator.populateFieldInfoMap(props);
    ParquetResultListener resultListener =
        new ParquetResultListener(getAllocator(), props, numberOfTimesRead, testValues);
    Stopwatch watch = new Stopwatch().start();
    testWithListener(type, planText, resultListener);
    resultListener.getResults();
    //    batchLoader.clear();
    System.out.println(
        String.format("Took %d ms to run query", watch.elapsed(TimeUnit.MILLISECONDS)));
  }
  @Override
  public void write(OntologyVersion o, Collection<Statement> statements)
      throws IOException, ParseException {
    LOG.debug(
        "Exporting to Meta Snomed Model in TriG format. \nGraph name is <" + GRAPH_NAME + ">");
    Stopwatch stopwatch = new Stopwatch().start();

    int counter = 1;
    for (Concept c : o.getConcepts()) {
      parse(c);
      counter++;
      if (counter % 10000 == 0) {
        LOG.info("Processed {} concepts", counter);
      }
    }

    counter = 1;
    for (Statement s : statements) {
      write(s);
      counter++;
      if (counter % 10000 == 0) {
        LOG.info("Processed {} statements", counter);
      }
    }
    footer();

    stopwatch.stop();
    LOG.info("Completed Meta Snomed export in " + stopwatch.elapsed(TimeUnit.SECONDS) + " seconds");
  }
Ejemplo n.º 21
0
 @GET
 @Path("job/{cluster}/{jobId}")
 @Produces(MediaType.APPLICATION_JSON)
 public JobDetails getJobById(
     @PathParam("cluster") String cluster, @PathParam("jobId") String jobId) throws IOException {
   LOG.info("Fetching JobDetails for jobId=" + jobId);
   Stopwatch timer = new Stopwatch().start();
   serializationContext.set(new SerializationContext(SerializationContext.DetailLevel.EVERYTHING));
   JobDetails jobDetails = getJobHistoryService().getJobByJobID(cluster, jobId);
   timer.stop();
   if (jobDetails != null) {
     LOG.info(
         "For job/{cluster}/{jobId} with input query:"
             + " job/"
             + cluster
             + SLASH
             + jobId
             + " fetched jobDetails for "
             + jobDetails.getJobName()
             + " in "
             + timer);
   } else {
     LOG.info(
         "For job/{cluster}/{jobId} with input query:"
             + " job/"
             + cluster
             + SLASH
             + jobId
             + " No jobDetails found, but spent "
             + timer);
   }
   return jobDetails;
 }
Ejemplo n.º 22
0
  public static void main(String[] args) {
    Stopwatch sw = Stopwatch.createStarted();
    for (int y = 0; y < 10; ++y) {
      new CartesianIteratorTest().testCartesianProduct();

      // System.out.println("done[" + y + "]: " + i);
    }
    System.out.println(sw.elapsed(TimeUnit.MILLISECONDS));
  }
Ejemplo n.º 23
0
 @Override
 public void failed(Throwable t) {
   if (stopwatch.isRunning()) {
     long latencyNanos = stopwatch.elapsed(NANOSECONDS);
     recorder.recordFailure(t, latencyNanos);
   } else {
     recorder.recordSkip(t);
   }
 }
Ejemplo n.º 24
0
 @Test
 public void testMillionsExeWithAnnotation() {
   Stopwatch stopwatch = Stopwatch.createStarted();
   int size = 1000 * 1000;
   for (int i = 0; i < size; i++) {
     cacheDemo.getUserMock(i);
   }
   stopwatch.stop();
   System.out.println(stopwatch.elapsed(TimeUnit.MILLISECONDS));
 }
Ejemplo n.º 25
0
  @Mod.EventHandler
  public void postInit(FMLPostInitializationEvent event) {
    final Stopwatch stopwatch = Stopwatch.createStarted();
    logHelper.info("Post Initialization (Started)");

    IntegrationsManager.instance().postInit();

    logHelper.info(
        "Post Initialization (Ended after " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + "ms)");
  }
  @Override
  public void record() {
    super.record();

    getStatistics("keySearch", count).addValue(keySearch.elapsedTime(TimeUnit.MICROSECONDS));
    getStatistics("hashGet", count).addValue(hashGet.elapsedTime(TimeUnit.MICROSECONDS));

    keySearch.reset();
    hashGet.reset();
  }
  protected long measure(String prefix, int numIterations, Runnable r) {
    final int logInterval = 5 * 1000;
    long nextLogTime = logInterval;

    // Give it some warm-up cycles
    Stopwatch warmupWatch = Stopwatch.createStarted();
    for (int i = 0; i < (numIterations / 10); i++) {
      if (warmupWatch.elapsed(TimeUnit.MILLISECONDS) >= nextLogTime) {
        LOG.info(
            "Warm-up "
                + prefix
                + " iteration="
                + i
                + " at "
                + warmupWatch.elapsed(TimeUnit.MILLISECONDS)
                + "ms");
        nextLogTime += logInterval;
      }
      r.run();
    }

    Stopwatch stopwatch = Stopwatch.createStarted();
    nextLogTime = 0;
    for (int i = 0; i < numIterations; i++) {
      if (stopwatch.elapsed(TimeUnit.MILLISECONDS) >= nextLogTime) {
        LOG.info(
            prefix + " iteration=" + i + " at " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + "ms");
        nextLogTime += logInterval;
      }
      r.run();
    }
    return stopwatch.elapsed(TimeUnit.MILLISECONDS);
  }
Ejemplo n.º 28
0
  private void sendPackedObjects(
      final List<ObjectId> toSend,
      final Set<ObjectId> roots,
      Deduplicator deduplicator,
      final ProgressListener progress) {
    Set<ObjectId> sent = new HashSet<ObjectId>();
    while (!toSend.isEmpty()) {
      try {
        BinaryPackedObjects.Callback callback =
            new BinaryPackedObjects.Callback() {
              @Override
              public void callback(Supplier<RevObject> supplier) {
                RevObject object = supplier.get();
                progress.setProgress(progress.getProgress() + 1);
                if (object instanceof RevCommit) {
                  RevCommit commit = (RevCommit) object;
                  toSend.remove(commit.getId());
                  roots.removeAll(commit.getParentIds());
                  roots.add(commit.getId());
                }
              }
            };
        ObjectDatabase database = localRepository.objectDatabase();
        BinaryPackedObjects packer = new BinaryPackedObjects(database);

        ImmutableList<ObjectId> have = ImmutableList.copyOf(roots);
        final boolean traverseCommits = false;

        Stopwatch sw = Stopwatch.createStarted();
        ObjectSerializingFactory serializer = DataStreamSerializationFactoryV1.INSTANCE;
        SendObjectsConnectionFactory outFactory;
        ObjectFunnel objectFunnel;

        outFactory = new SendObjectsConnectionFactory(repositoryURL);
        int pushBytesLimit = parsePushLimit();
        objectFunnel = ObjectFunnels.newFunnel(outFactory, serializer, pushBytesLimit);
        final long writtenObjectsCount =
            packer.write(objectFunnel, toSend, have, sent, callback, traverseCommits, deduplicator);
        objectFunnel.close();
        sw.stop();

        long compressedSize = outFactory.compressedSize;
        long uncompressedSize = outFactory.uncompressedSize;
        LOGGER.info(
            String.format(
                "HttpRemoteRepo: Written %,d objects."
                    + " Time to process: %s."
                    + " Compressed size: %,d bytes. Uncompressed size: %,d bytes.",
                writtenObjectsCount, sw, compressedSize, uncompressedSize));
      } catch (IOException e) {
        Throwables.propagate(e);
      }
    }
  }
Ejemplo n.º 29
0
 /**
  * Wait up to DEFAULT_SLEEP for an expected count of TS to connect to the master
  *
  * @param expected How many TS are expected
  * @return true if there are at least as many TS as expected, otherwise false
  */
 static boolean waitForTabletServers(int expected) throws Exception {
   int count = 0;
   Stopwatch stopwatch = new Stopwatch().start();
   while (count < expected && stopwatch.elapsedMillis() < DEFAULT_SLEEP) {
     Thread.sleep(200);
     Deferred<ListTabletServersResponse> d = client.listTabletServers();
     d.addErrback(defaultErrorCB);
     count = d.join(DEFAULT_SLEEP).getTabletServersCount();
   }
   return count >= expected;
 }
Ejemplo n.º 30
0
  @GET
  @Path("hdfs/{cluster}/")
  @Produces(MediaType.APPLICATION_JSON)
  public List<HdfsStats> getHdfsStats(
      @PathParam("cluster") String cluster,
      // run Id is timestamp in seconds
      @QueryParam("timestamp") long runid,
      @QueryParam("path") String pathPrefix,
      @QueryParam("limit") int limit)
      throws IOException {
    if (limit == 0) {
      limit = HdfsConstants.RECORDS_RETURNED_LIMIT;
    }

    boolean noRunId = false;
    if (runid == 0L) {
      // default it to 2 hours back
      long lastHour = System.currentTimeMillis() - 2 * 3600000L;
      // convert milliseconds to seconds
      runid = lastHour / 1000L;
      noRunId = true;
    }

    LOG.info(
        String.format(
            "Fetching hdfs stats for cluster=%s, path=%s limit=%d, runId=%d",
            cluster, pathPrefix, limit, runid));
    Stopwatch timer = new Stopwatch().start();
    serializationContext.set(new SerializationContext(SerializationContext.DetailLevel.EVERYTHING));
    List<HdfsStats> hdfsStats = getHdfsStatsService().getAllDirs(cluster, pathPrefix, limit, runid);
    timer.stop();
    /**
     * if we find NO hdfs stats for the default timestamp consider the case when no runId is passed
     * in that means user is expecting a default response we set the default runId to 2 hours back
     * as above but what if there was an error in collection at that time? hence we try to look back
     * for some older runIds
     */
    if (hdfsStats == null || hdfsStats.size() == 0L) {
      if (noRunId) {
        // consider reading the daily aggregation table instead of hourly
        // or consider reading older data since runId was a default timestamp
        int retryCount = 0;
        while (retryCount < HdfsConstants.ageMult.length) {
          runid = HdfsStatsService.getOlderRunId(retryCount, runid);
          hdfsStats = getHdfsStatsService().getAllDirs(cluster, pathPrefix, limit, runid);
          if ((hdfsStats != null) && (hdfsStats.size() != 0L)) {
            break;
          }
          retryCount++;
        }
      }
    }
    return hdfsStats;
  }