@Override
      public int advance(int target) throws IOException {
        if (remaining == 0) {
          return currentDocId = NO_MORE_DOCS;
        }

        currentDocId = parentsIterator.advance(target);
        if (currentDocId == DocIdSetIterator.NO_MORE_DOCS) {
          return currentDocId;
        }

        bytesValues.setDocument(currentDocId);
        long index = parentIds.find(bytesValues.nextValue(), bytesValues.currentValueHash());
        if (index != -1) {
          currentScore = scores.get(index);
          remaining--;
          return currentDocId;
        } else {
          return nextDoc();
        }
      }
 @Override
 public void collect(int doc) throws IOException {
   if (values != null) {
     long ord = ordinals.getOrd(doc);
     long parentIdx = parentIdsIndex.get(ord);
     if (parentIdx < 0) {
       final BytesRef bytes = values.getValueByOrd(ord);
       final int hash = values.currentValueHash();
       parentIdx = parentIds.add(bytes, hash);
       if (parentIdx < 0) {
         parentIdx = -parentIdx - 1;
         doScore(parentIdx);
       } else {
         scores = bigArrays.grow(scores, parentIdx + 1);
         scores.set(parentIdx, scorer.score());
       }
       parentIdsIndex.set(ord, parentIdx);
     } else {
       doScore(parentIdx);
     }
   }
 }
  @Override
  public Weight createWeight(IndexSearcher searcher) throws IOException {
    SearchContext searchContext = SearchContext.current();
    assert rewrittenChildQuery != null;
    assert rewriteIndexReader == searcher.getIndexReader()
        : "not equal, rewriteIndexReader="
            + rewriteIndexReader
            + " searcher.getIndexReader()="
            + searcher.getIndexReader();
    final Query childQuery = rewrittenChildQuery;

    IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
    indexSearcher.setSimilarity(searcher.getSimilarity());

    final BytesRefHash parentIds;
    final FloatArray scores;
    final IntArray occurrences;
    switch (scoreType) {
      case MAX:
        MaxCollector maxCollector =
            new MaxCollector(parentChildIndexFieldData, parentType, searchContext);
        try {
          indexSearcher.search(childQuery, maxCollector);
          parentIds = maxCollector.parentIds;
          scores = maxCollector.scores;
          occurrences = null;
        } finally {
          Releasables.release(maxCollector.parentIdsIndex);
        }
        break;
      case SUM:
        SumCollector sumCollector =
            new SumCollector(parentChildIndexFieldData, parentType, searchContext);
        try {
          indexSearcher.search(childQuery, sumCollector);
          parentIds = sumCollector.parentIds;
          scores = sumCollector.scores;
          occurrences = null;
        } finally {
          Releasables.release(sumCollector.parentIdsIndex);
        }
        break;
      case AVG:
        AvgCollector avgCollector =
            new AvgCollector(parentChildIndexFieldData, parentType, searchContext);
        try {
          indexSearcher.search(childQuery, avgCollector);
          parentIds = avgCollector.parentIds;
          scores = avgCollector.scores;
          occurrences = avgCollector.occurrences;
        } finally {
          Releasables.release(avgCollector.parentIdsIndex);
        }
        break;
      default:
        throw new RuntimeException("Are we missing a score type here? -- " + scoreType);
    }

    int size = (int) parentIds.size();
    if (size == 0) {
      Releasables.release(parentIds, scores, occurrences);
      return Queries.newMatchNoDocsQuery().createWeight(searcher);
    }

    final Filter parentFilter;
    if (size == 1) {
      BytesRef id = parentIds.get(0, new BytesRef());
      if (nonNestedDocsFilter != null) {
        List<Filter> filters =
            Arrays.asList(
                new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id))),
                nonNestedDocsFilter);
        parentFilter = new AndFilter(filters);
      } else {
        parentFilter =
            new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)));
      }
    } else if (size <= shortCircuitParentDocSet) {
      parentFilter = new ParentIdsFilter(parentType, nonNestedDocsFilter, parentIds);
    } else {
      parentFilter = new ApplyAcceptedDocsFilter(this.parentFilter);
    }
    ParentWeight parentWeight =
        new ParentWeight(
            rewrittenChildQuery.createWeight(searcher),
            parentFilter,
            size,
            parentIds,
            scores,
            occurrences);
    searchContext.addReleasable(parentWeight);
    return parentWeight;
  }
  @Override
  public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {
    assert owningBucketOrdinal == 0;

    if (bucketCountThresholds.getMinDocCount() == 0
        && (order != InternalOrder.COUNT_DESC
            || bucketOrds.size() < bucketCountThresholds.getRequiredSize())) {
      // we need to fill-in the blanks
      for (LeafReaderContext ctx : context.searcher().getTopReaderContext().leaves()) {
        final SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
        // brute force
        for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) {
          values.setDocument(docId);
          final int valueCount = values.count();
          for (int i = 0; i < valueCount; ++i) {
            final BytesRef term = values.valueAt(i);
            if (includeExclude == null || includeExclude.accept(term)) {
              bucketOrds.add(term);
            }
          }
        }
      }
    }

    final int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());

    long otherDocCount = 0;
    BucketPriorityQueue<StringTerms.Bucket> ordered =
        new BucketPriorityQueue<>(size, order.comparator(this));
    StringTerms.Bucket spare = null;
    for (int i = 0; i < bucketOrds.size(); i++) {
      if (spare == null) {
        spare = new StringTerms.Bucket(new BytesRef(), 0, null, showTermDocCountError, 0, format);
      }
      bucketOrds.get(i, spare.termBytes);
      spare.docCount = bucketDocCount(i);
      otherDocCount += spare.docCount;
      spare.bucketOrd = i;
      if (bucketCountThresholds.getShardMinDocCount() <= spare.docCount) {
        spare = ordered.insertWithOverflow(spare);
      }
    }

    // Get the top buckets
    final StringTerms.Bucket[] list = new StringTerms.Bucket[ordered.size()];
    long survivingBucketOrds[] = new long[ordered.size()];
    for (int i = ordered.size() - 1; i >= 0; --i) {
      final StringTerms.Bucket bucket = (StringTerms.Bucket) ordered.pop();
      survivingBucketOrds[i] = bucket.bucketOrd;
      list[i] = bucket;
      otherDocCount -= bucket.docCount;
    }
    // replay any deferred collections
    runDeferredCollections(survivingBucketOrds);

    // Now build the aggs
    for (int i = 0; i < list.length; i++) {
      final StringTerms.Bucket bucket = (StringTerms.Bucket) list[i];
      bucket.termBytes = BytesRef.deepCopyOf(bucket.termBytes);
      bucket.aggregations = bucketAggregations(bucket.bucketOrd);
      bucket.docCountError = 0;
    }

    return new StringTerms(
        name,
        order,
        bucketCountThresholds.getRequiredSize(),
        bucketCountThresholds.getMinDocCount(),
        pipelineAggregators(),
        metaData(),
        format,
        bucketCountThresholds.getShardSize(),
        showTermDocCountError,
        otherDocCount,
        Arrays.asList(list),
        0);
  }