@Override
  public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {
    assert owningBucketOrdinal == 0;
    List<InternalHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size());
    for (long i = 0; i < bucketOrds.size(); i++) {
      buckets.add(
          histogramFactory.createBucket(
              rounding.valueForKey(bucketOrds.get(i)),
              bucketDocCount(i),
              bucketAggregations(i),
              keyed,
              formatter));
    }

    // the contract of the histogram aggregation is that shards must return buckets ordered by key
    // in ascending order
    CollectionUtil.introSort(buckets, InternalOrder.KEY_ASC.comparator());

    // value source will be null for unmapped fields
    InternalHistogram.EmptyBucketInfo emptyBucketInfo =
        minDocCount == 0
            ? new InternalHistogram.EmptyBucketInfo(
                rounding, buildEmptySubAggregations(), extendedBounds)
            : null;
    return histogramFactory.create(
        name,
        buckets,
        order,
        minDocCount,
        emptyBucketInfo,
        formatter,
        keyed,
        pipelineAggregators(),
        metaData());
  }
  /** Replay the wrapped collector, but only on a selection of buckets. */
  @Override
  public void prepareSelectedBuckets(long... selectedBuckets) throws IOException {
    if (!finished) {
      throw new IllegalStateException(
          "Cannot replay yet, collection is not finished: postCollect() has not been called");
    }
    if (this.selectedBuckets != null) {
      throw new IllegalStateException("Already been replayed");
    }

    final LongHash hash = new LongHash(selectedBuckets.length, BigArrays.NON_RECYCLING_INSTANCE);
    for (long bucket : selectedBuckets) {
      hash.add(bucket);
    }
    this.selectedBuckets = hash;

    collector.preCollection();
    boolean needsScores = collector.needsScores();
    Weight weight = null;
    if (needsScores) {
      weight =
          aggContext
              .searchContext()
              .searcher()
              .createNormalizedWeight(aggContext.searchContext().query(), true);
    }
    for (Entry entry : entries) {
      final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context);
      DocIdSetIterator docIt = null;
      if (needsScores && entry.docDeltas.size() > 0) {
        Scorer scorer = weight.scorer(entry.context);
        // We don't need to check if the scorer is null
        // since we are sure that there are documents to replay (entry.docDeltas it not empty).
        docIt = scorer.iterator();
        leafCollector.setScorer(scorer);
      }
      final PackedLongValues.Iterator docDeltaIterator = entry.docDeltas.iterator();
      final PackedLongValues.Iterator buckets = entry.buckets.iterator();
      int doc = 0;
      for (long i = 0, end = entry.docDeltas.size(); i < end; ++i) {
        doc += docDeltaIterator.next();
        final long bucket = buckets.next();
        final long rebasedBucket = hash.find(bucket);
        if (rebasedBucket != -1) {
          if (needsScores) {
            if (docIt.docID() < doc) {
              docIt.advance(doc);
            }
            // aggregations should only be replayed on matching documents
            assert docIt.docID() == doc;
          }
          leafCollector.collect(doc, rebasedBucket);
        }
      }
    }

    collector.postCollection();
  }
 @Override
 public void collect(int doc, long owningBucketOrdinal) throws IOException {
   final int numOrds = globalOrdinals.setDocument(doc);
   for (int i = 0; i < numOrds; i++) {
     final long globalOrd = globalOrdinals.nextOrd();
     long bucketOrd = bucketOrds.add(globalOrd);
     if (bucketOrd < 0) {
       bucketOrd = -1 - bucketOrd;
       collectExistingBucket(doc, bucketOrd);
     } else {
       collectBucket(doc, bucketOrd);
     }
   }
 }
 @Override
 protected long getBucketOrd(long termOrd) {
   return bucketOrds.find(termOrd);
 }