Пример #1
0
    public ListenableFuture<?> process() throws Exception {
      try {
        long start = ticker.read();
        ListenableFuture<?> blocked = split.processFor(SPLIT_RUN_QUANTA);
        long endTime = ticker.read();

        // update priority level base on total thread usage of task
        long durationNanos = endTime - start;
        long threadUsageNanos = taskHandle.addThreadUsageNanos(durationNanos);
        this.threadUsageNanos.set(threadUsageNanos);
        priorityLevel.set(calculatePriorityLevel(threadUsageNanos));

        // record last run for prioritization within a level
        lastRun.set(endTime);

        return blocked;
      } catch (Throwable e) {
        finishedFuture.setException(e);
        throw e;
      }
    }
Пример #2
0
  @VisibleForTesting
  QuantileDigest(double maxError, double alpha, Ticker ticker, boolean compressAutomatically) {
    checkArgument(maxError >= 0 && maxError <= 1, "maxError must be in range [0, 1]");
    checkArgument(alpha >= 0 && alpha < 1, "alpha must be in range [0, 1)");

    this.maxError = maxError;
    this.alpha = alpha;
    this.ticker = ticker;
    this.compressAutomatically = compressAutomatically;

    landmarkInSeconds = TimeUnit.NANOSECONDS.toSeconds(ticker.read());
  }
Пример #3
0
  /*
   * Get the exponentially-decayed approximate counts of values in multiple buckets. The elements in
   * the provided list denote the upper bound each of the buckets and must be sorted in ascending
   * order.
   *
   * The approximate count in each bucket is guaranteed to be within 2 * totalCount * maxError of
   * the real count.
   */
  public List<Bucket> getHistogram(List<Long> bucketUpperBounds) {
    checkArgument(
        Ordering.natural().isOrdered(bucketUpperBounds),
        "buckets must be sorted in increasing order");

    final ImmutableList.Builder<Bucket> builder = ImmutableList.builder();
    final PeekingIterator<Long> iterator = Iterators.peekingIterator(bucketUpperBounds.iterator());

    final AtomicDouble sum = new AtomicDouble();
    final AtomicDouble lastSum = new AtomicDouble();

    // for computing weighed average of values in bucket
    final AtomicDouble bucketWeightedSum = new AtomicDouble();

    final double normalizationFactor = weight(TimeUnit.NANOSECONDS.toSeconds(ticker.read()));

    postOrderTraversal(
        root,
        new Callback() {
          @Override
          public boolean process(Node node) {

            while (iterator.hasNext() && iterator.peek() <= node.getUpperBound()) {
              double bucketCount = sum.get() - lastSum.get();

              Bucket bucket =
                  new Bucket(
                      bucketCount / normalizationFactor, bucketWeightedSum.get() / bucketCount);

              builder.add(bucket);
              lastSum.set(sum.get());
              bucketWeightedSum.set(0);
              iterator.next();
            }

            bucketWeightedSum.addAndGet(node.getMiddle() * node.weightedCount);
            sum.addAndGet(node.weightedCount);
            return iterator.hasNext();
          }
        });

    while (iterator.hasNext()) {
      double bucketCount = sum.get() - lastSum.get();
      Bucket bucket =
          new Bucket(bucketCount / normalizationFactor, bucketWeightedSum.get() / bucketCount);

      builder.add(bucket);

      iterator.next();
    }

    return builder.build();
  }
Пример #4
0
 // no need to throttle partial trace storage since throttling is handled upstream by using a
 // single thread executor in PartialTraceStorageWatcher
 public void storePartialTrace(Transaction transaction) {
   try {
     Trace trace =
         TraceCreator.createPartialTrace(transaction, clock.currentTimeMillis(), ticker.read());
     transaction.setPartiallyStored();
     // one last check if transaction has completed
     if (!transaction.isCompleted()) {
       collector.collectTrace(trace);
     }
   } catch (Exception e) {
     logger.error(e.getMessage(), e);
   }
 }
Пример #5
0
  /** Adds a value to this digest. The value must be {@code >= 0} */
  public void add(long value, long count) {
    checkArgument(count > 0, "count must be > 0");

    long nowInSeconds = TimeUnit.NANOSECONDS.toSeconds(ticker.read());

    int maxExpectedNodeCount = 3 * calculateCompressionFactor();
    if (nowInSeconds - landmarkInSeconds >= RESCALE_THRESHOLD_SECONDS) {
      rescale(nowInSeconds);
      compress(); // need to compress to get rid of nodes that may have decayed to ~ 0
    } else if (nonZeroNodeCount > MAX_SIZE_FACTOR * maxExpectedNodeCount && compressAutomatically) {
      // The size (number of non-zero nodes) of the digest is at most 3 * compression factor
      // If we're over MAX_SIZE_FACTOR of the expected size, compress
      // Note: we don't compress as soon as we go over expectedNodeCount to avoid unnecessarily
      // running a compression for every new added element when we're close to boundary
      compress();
    }

    double weight = weight(TimeUnit.NANOSECONDS.toSeconds(ticker.read())) * count;

    max = Math.max(max, value);
    min = Math.min(min, value);

    insert(longToBits(value), weight);
  }
Пример #6
0
  private void rescaleToCommonLandmark(QuantileDigest one, QuantileDigest two) {
    long nowInSeconds = TimeUnit.NANOSECONDS.toSeconds(ticker.read());

    // 1. rescale this and other to common landmark
    long targetLandmark = Math.max(one.landmarkInSeconds, two.landmarkInSeconds);

    if (nowInSeconds - targetLandmark >= RESCALE_THRESHOLD_SECONDS) {
      targetLandmark = nowInSeconds;
    }

    if (targetLandmark != one.landmarkInSeconds) {
      one.rescale(targetLandmark);
    }

    if (targetLandmark != two.landmarkInSeconds) {
      two.rescale(targetLandmark);
    }
  }
  /**
   * returns a {@link RetryAfterException} if parameter {@code retryAfter} corresponds to known
   * formats.
   *
   * @see <a href="https://tools.ietf.org/html/rfc2616#section-14.37">Retry-After format</a>
   */
  public Optional<RetryAfterException> tryCreateRetryAfterException(
      Throwable in, String retryAfter) {
    checkNotNull(in, "throwable");
    checkNotNull(retryAfter, "retryAfter");

    if (retryAfter.matches("^[0-9]+$"))
      return Optional.of(new RetryAfterException(in, Integer.parseInt(retryAfter)));
    try {
      long retryTimeMillis = dateCodec.toDate(retryAfter).getTime();
      long currentTimeMillis = NANOSECONDS.toMillis(ticker.read());
      return Optional.of(
          new RetryAfterException(
              in, (int) MILLISECONDS.toSeconds(retryTimeMillis - currentTimeMillis)));
    } catch (IllegalArgumentException e) {
      // ignored
    }
    return Optional.absent();
  }
Пример #8
0
  @Override
  public Double doSample() {
    T newSample = inputAccessor.get();
    long newTimestamp = ticker.read();

    double rate = 0;
    if (!samples.isEmpty()) {
      Pair<Long, Double> oldestSample = samples.peekLast();

      double dy = newSample.doubleValue() - oldestSample.getSecond();
      double dt = newTimestamp - oldestSample.getFirst();
      rate = dt == 0 ? 0 : (NANOS_PER_SEC * scaleFactor * dy) / dt;
    }

    if (samples.remainingCapacity() == 0) samples.removeLast();
    samples.addFirst(Pair.of(newTimestamp, newSample.doubleValue()));

    return rate;
  }
Пример #9
0
 /** Number (decayed) of elements added to this quantile digest */
 public double getCount() {
   return weightedCount / weight(TimeUnit.NANOSECONDS.toSeconds(ticker.read()));
 }
 private long tickerCurrentMillis() {
   return ticker.read();
 }
Пример #11
0
 private long getTickInSeconds() {
   return TimeUnit.NANOSECONDS.toSeconds(ticker.read());
 }