Beispiel #1
0
  public void merge(QuantileDigest other) {
    rescaleToCommonLandmark(this, other);

    // 2. merge other into this (don't modify other)
    root = merge(root, other.root);

    max = Math.max(max, other.max);
    min = Math.min(min, other.min);

    // 3. compress to remove unnecessary nodes
    compress();
  }
  // purchase product and return relevant product recommendations
  public List<Product> purchase(Customer customer, Product product) {
    purchasesCache.put(customer.id, product.id);

    ProductAssociativityGraph graph = productAssociativityGraphMap.get(product.category);
    Vertex v = Vertex.create(product.id);
    List<Vertex> associations = graph.getProductAssociations(v);

    int recommendSize = Math.min(associations.size(), maxNumRecommendations);
    return associations
        .stream()
        .map(vertex -> productCache.get(vertex.productId))
        .limit(recommendSize)
        .collect(Collectors.toList());
  }
Beispiel #3
0
  public long getMax() {
    final AtomicLong chosen = new AtomicLong(max);
    postOrderTraversal(
        root,
        new Callback() {
          @Override
          public boolean process(Node node) {
            if (node.weightedCount >= ZERO_WEIGHT_THRESHOLD) {
              chosen.set(node.getUpperBound());
              return false;
            }
            return true;
          }
        },
        TraversalOrder.REVERSE);

    return Math.min(max, chosen.get());
  }
Beispiel #4
0
  /** Adds a value to this digest. The value must be {@code >= 0} */
  public void add(long value, long count) {
    checkArgument(count > 0, "count must be > 0");

    long nowInSeconds = TimeUnit.NANOSECONDS.toSeconds(ticker.read());

    int maxExpectedNodeCount = 3 * calculateCompressionFactor();
    if (nowInSeconds - landmarkInSeconds >= RESCALE_THRESHOLD_SECONDS) {
      rescale(nowInSeconds);
      compress(); // need to compress to get rid of nodes that may have decayed to ~ 0
    } else if (nonZeroNodeCount > MAX_SIZE_FACTOR * maxExpectedNodeCount && compressAutomatically) {
      // The size (number of non-zero nodes) of the digest is at most 3 * compression factor
      // If we're over MAX_SIZE_FACTOR of the expected size, compress
      // Note: we don't compress as soon as we go over expectedNodeCount to avoid unnecessarily
      // running a compression for every new added element when we're close to boundary
      compress();
    }

    double weight = weight(TimeUnit.NANOSECONDS.toSeconds(ticker.read())) * count;

    max = Math.max(max, value);
    min = Math.min(min, value);

    insert(longToBits(value), weight);
  }
 /**
  * @param bucket list of sstables, ordered from newest to oldest by getMinTimestamp().
  * @param maxThreshold maximum number of sstables in a single compaction task.
  * @return A bucket trimmed to the <code>maxThreshold</code> newest sstables.
  */
 @VisibleForTesting
 static List<SSTableReader> trimToThreshold(List<SSTableReader> bucket, int maxThreshold) {
   // Trim the oldest sstables off the end to meet the maxThreshold
   return bucket.subList(0, Math.min(bucket.size(), maxThreshold));
 }