Exemplo n.º 1
0
  @Override
  public InternalTerms reduce(ReduceContext reduceContext) {
    List<InternalAggregation> aggregations = reduceContext.aggregations();
    if (aggregations.size() == 1) {
      InternalTerms terms = (InternalTerms) aggregations.get(0);
      terms.trimExcessEntries();
      return terms;
    }
    InternalTerms reduced = null;

    Recycler.V<LongObjectOpenHashMap<List<Bucket>>> buckets = null;
    for (InternalAggregation aggregation : aggregations) {
      InternalTerms terms = (InternalTerms) aggregation;
      if (terms instanceof UnmappedTerms) {
        continue;
      }
      if (reduced == null) {
        reduced = terms;
      }
      if (buckets == null) {
        buckets = reduceContext.cacheRecycler().longObjectMap(terms.buckets.size());
      }
      for (Terms.Bucket bucket : terms.buckets) {
        List<Bucket> existingBuckets = buckets.v().get(((Bucket) bucket).term);
        if (existingBuckets == null) {
          existingBuckets = new ArrayList<Bucket>(aggregations.size());
          buckets.v().put(((Bucket) bucket).term, existingBuckets);
        }
        existingBuckets.add((Bucket) bucket);
      }
    }

    if (reduced == null) {
      // there are only unmapped terms, so we just return the first one (no need to reduce)
      return (UnmappedTerms) aggregations.get(0);
    }

    // TODO: would it be better to sort the backing array buffer of the hppc map directly instead of
    // using a PQ?
    final int size = Math.min(requiredSize, buckets.v().size());
    BucketPriorityQueue ordered = new BucketPriorityQueue(size, order.comparator(null));
    Object[] internalBuckets = buckets.v().values;
    boolean[] states = buckets.v().allocated;
    for (int i = 0; i < states.length; i++) {
      if (states[i]) {
        List<LongTerms.Bucket> sameTermBuckets = (List<LongTerms.Bucket>) internalBuckets[i];
        ordered.insertWithOverflow(
            sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext.cacheRecycler()));
      }
    }
    buckets.release();
    InternalTerms.Bucket[] list = new InternalTerms.Bucket[ordered.size()];
    for (int i = ordered.size() - 1; i >= 0; i--) {
      list[i] = (Bucket) ordered.pop();
    }
    reduced.buckets = Arrays.asList(list);
    return reduced;
  }
  @Override
  public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {
    assert owningBucketOrdinal == 0;

    if (bucketCountThresholds.getMinDocCount() == 0
        && (order != InternalOrder.COUNT_DESC
            || bucketOrds.size() < bucketCountThresholds.getRequiredSize())) {
      // we need to fill-in the blanks
      for (LeafReaderContext ctx : context.searcher().getTopReaderContext().leaves()) {
        final SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
        // brute force
        for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) {
          values.setDocument(docId);
          final int valueCount = values.count();
          for (int i = 0; i < valueCount; ++i) {
            final BytesRef term = values.valueAt(i);
            if (includeExclude == null || includeExclude.accept(term)) {
              bucketOrds.add(term);
            }
          }
        }
      }
    }

    final int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());

    long otherDocCount = 0;
    BucketPriorityQueue<StringTerms.Bucket> ordered =
        new BucketPriorityQueue<>(size, order.comparator(this));
    StringTerms.Bucket spare = null;
    for (int i = 0; i < bucketOrds.size(); i++) {
      if (spare == null) {
        spare = new StringTerms.Bucket(new BytesRef(), 0, null, showTermDocCountError, 0, format);
      }
      bucketOrds.get(i, spare.termBytes);
      spare.docCount = bucketDocCount(i);
      otherDocCount += spare.docCount;
      spare.bucketOrd = i;
      if (bucketCountThresholds.getShardMinDocCount() <= spare.docCount) {
        spare = ordered.insertWithOverflow(spare);
      }
    }

    // Get the top buckets
    final StringTerms.Bucket[] list = new StringTerms.Bucket[ordered.size()];
    long survivingBucketOrds[] = new long[ordered.size()];
    for (int i = ordered.size() - 1; i >= 0; --i) {
      final StringTerms.Bucket bucket = (StringTerms.Bucket) ordered.pop();
      survivingBucketOrds[i] = bucket.bucketOrd;
      list[i] = bucket;
      otherDocCount -= bucket.docCount;
    }
    // replay any deferred collections
    runDeferredCollections(survivingBucketOrds);

    // Now build the aggs
    for (int i = 0; i < list.length; i++) {
      final StringTerms.Bucket bucket = (StringTerms.Bucket) list[i];
      bucket.termBytes = BytesRef.deepCopyOf(bucket.termBytes);
      bucket.aggregations = bucketAggregations(bucket.bucketOrd);
      bucket.docCountError = 0;
    }

    return new StringTerms(
        name,
        order,
        bucketCountThresholds.getRequiredSize(),
        bucketCountThresholds.getMinDocCount(),
        pipelineAggregators(),
        metaData(),
        format,
        bucketCountThresholds.getShardSize(),
        showTermDocCountError,
        otherDocCount,
        Arrays.asList(list),
        0);
  }
  @Override
  public InternalAggregation buildAggregation(long owningBucketOrdinal) {
    if (globalOrdinals == null) { // no context in this reader
      return buildEmptyAggregation();
    }

    final int size;
    if (bucketCountThresholds.getMinDocCount() == 0) {
      // if minDocCount == 0 then we can end up with more buckets then maxBucketOrd() returns
      size = (int) Math.min(globalOrdinals.getMaxOrd(), bucketCountThresholds.getShardSize());
    } else {
      size = (int) Math.min(maxBucketOrd(), bucketCountThresholds.getShardSize());
    }
    BucketPriorityQueue ordered = new BucketPriorityQueue(size, order.comparator(this));
    OrdBucket spare = new OrdBucket(-1, 0, null);
    for (long globalTermOrd = Ordinals.MIN_ORDINAL;
        globalTermOrd < globalOrdinals.getMaxOrd();
        ++globalTermOrd) {
      if (includeExclude != null && !acceptedGlobalOrdinals.get(globalTermOrd)) {
        continue;
      }
      final long bucketOrd = getBucketOrd(globalTermOrd);
      final long bucketDocCount = bucketOrd < 0 ? 0 : bucketDocCount(bucketOrd);
      if (bucketCountThresholds.getMinDocCount() > 0 && bucketDocCount == 0) {
        continue;
      }
      spare.globalOrd = globalTermOrd;
      spare.bucketOrd = bucketOrd;
      spare.docCount = bucketDocCount;
      if (bucketCountThresholds.getShardMinDocCount() <= spare.docCount) {
        spare = (OrdBucket) ordered.insertWithOverflow(spare);
        if (spare == null) {
          spare = new OrdBucket(-1, 0, null);
        }
      }
    }

    // Get the top buckets
    final InternalTerms.Bucket[] list = new InternalTerms.Bucket[ordered.size()];
    long survivingBucketOrds[] = new long[ordered.size()];
    for (int i = ordered.size() - 1; i >= 0; --i) {
      final OrdBucket bucket = (OrdBucket) ordered.pop();
      survivingBucketOrds[i] = bucket.bucketOrd;
      BytesRef scratch = new BytesRef();
      copy(globalValues.getValueByOrd(bucket.globalOrd), scratch);
      list[i] = new StringTerms.Bucket(scratch, bucket.docCount, null);
      list[i].bucketOrd = bucket.bucketOrd;
    }
    // replay any deferred collections
    runDeferredCollections(survivingBucketOrds);
    // Now build the aggs
    for (int i = 0; i < list.length; i++) {
      Bucket bucket = list[i];
      bucket.aggregations =
          bucket.docCount == 0 ? bucketEmptyAggregations() : bucketAggregations(bucket.bucketOrd);
    }

    return new StringTerms(
        name,
        order,
        bucketCountThresholds.getRequiredSize(),
        bucketCountThresholds.getMinDocCount(),
        Arrays.asList(list));
  }