@Override
  public void preProcess(SearchContext context) {
    if (context.aggregations() != null) {
      AggregationContext aggregationContext = new AggregationContext(context);
      context.aggregations().aggregationContext(aggregationContext);

      List<Aggregator> collectors = new ArrayList<>();
      Aggregator[] aggregators;
      try {
        AggregatorFactories factories = context.aggregations().factories();
        aggregators = factories.createTopLevelAggregators(aggregationContext);
        for (int i = 0; i < aggregators.length; i++) {
          if (aggregators[i] instanceof GlobalAggregator == false) {
            collectors.add(aggregators[i]);
          }
        }
        context.aggregations().aggregators(aggregators);
        if (!collectors.isEmpty()) {
          Collector collector = BucketCollector.wrap(collectors);
          ((BucketCollector) collector).preCollection();
          if (context.getProfilers() != null) {
            collector =
                new InternalProfileCollector(
                    collector,
                    CollectorResult.REASON_AGGREGATION,
                    // TODO: report on child aggs as well
                    Collections.emptyList());
          }
          context.queryCollectors().put(AggregationPhase.class, collector);
        }
      } catch (IOException e) {
        throw new AggregationInitializationException("Could not initialize aggregators", e);
      }
    }
  }
  @Override
  public void preProcess(SearchContext context) {
    if (context.aggregations() != null) {
      AggregationContext aggregationContext = new AggregationContext(context);
      context.aggregations().aggregationContext(aggregationContext);

      List<Aggregator> collectors = new ArrayList<>();
      Aggregator[] aggregators;
      try {
        AggregatorFactories factories = context.aggregations().factories();
        aggregators = factories.createTopLevelAggregators(aggregationContext);
        for (int i = 0; i < aggregators.length; i++) {
          if (aggregators[i] instanceof GlobalAggregator == false) {
            collectors.add(aggregators[i]);
          }
        }
        context.aggregations().aggregators(aggregators);
        if (!collectors.isEmpty()) {
          final BucketCollector collector = BucketCollector.wrap(collectors);
          collector.preCollection();
          context.queryCollectors().put(AggregationPhase.class, collector);
        }
      } catch (IOException e) {
        throw new AggregationInitializationException("Could not initialize aggregators", e);
      }
    }
  }
Exemplo n.º 3
0
 /**
  * Constructs a new Aggregator.
  *
  * @param name The name of the aggregation
  * @param bucketAggregationMode The nature of execution as a sub-aggregator (see {@link
  *     BucketAggregationMode})
  * @param factories The factories for all the sub-aggregators under this aggregator
  * @param estimatedBucketsCount When served as a sub-aggregator, indicate how many buckets the
  *     parent aggregator will generate.
  * @param context The aggregation context
  * @param parent The parent aggregator (may be {@code null} for top level aggregators)
  */
 protected Aggregator(
     String name,
     BucketAggregationMode bucketAggregationMode,
     AggregatorFactories factories,
     long estimatedBucketsCount,
     AggregationContext context,
     Aggregator parent) {
   this.name = name;
   this.parent = parent;
   this.estimatedBucketCount = estimatedBucketsCount;
   this.context = context;
   this.bigArrays = context.bigArrays();
   this.depth = parent == null ? 0 : 1 + parent.depth();
   this.bucketAggregationMode = bucketAggregationMode;
   assert factories != null
       : "sub-factories provided to BucketAggregator must not be null, use AggragatorFactories.EMPTY instead";
   this.factories = factories;
   this.subAggregators = factories.createSubAggregators(this, estimatedBucketsCount);
   collectableSugAggregators =
       BucketCollector.wrap(
           Iterables.filter(Arrays.asList(subAggregators), COLLECTABLE_AGGREGATOR));
   context.searchContext().addReleasable(this, Lifetime.PHASE);
 }
 /**
  * Generic test that creates new AggregatorFactory from the test AggregatorFactory and checks both
  * for equality and asserts equality on the two queries.
  */
 public void testFromXContent() throws IOException {
   AF testAgg = createTestAggregatorFactory();
   AggregatorFactories.Builder factoriesBuilder =
       AggregatorFactories.builder().skipResolveOrder().addPipelineAggregator(testAgg);
   logger.info("Content string: {}", factoriesBuilder);
   XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
   if (randomBoolean()) {
     builder.prettyPrint();
   }
   factoriesBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
   XContentBuilder shuffled = shuffleXContent(builder);
   XContentParser parser =
       XContentFactory.xContent(shuffled.bytes()).createParser(shuffled.bytes());
   QueryParseContext parseContext =
       new QueryParseContext(queriesRegistry, parser, parseFieldMatcher);
   String contentString = factoriesBuilder.toString();
   logger.info("Content string: {}", contentString);
   assertSame(XContentParser.Token.START_OBJECT, parser.nextToken());
   assertSame(XContentParser.Token.FIELD_NAME, parser.nextToken());
   assertEquals(testAgg.getName(), parser.currentName());
   assertSame(XContentParser.Token.START_OBJECT, parser.nextToken());
   assertSame(XContentParser.Token.FIELD_NAME, parser.nextToken());
   assertEquals(testAgg.type(), parser.currentName());
   assertSame(XContentParser.Token.START_OBJECT, parser.nextToken());
   PipelineAggregationBuilder newAgg =
       aggParsers
           .pipelineParser(testAgg.getWriteableName(), ParseFieldMatcher.STRICT)
           .parse(testAgg.getName(), parseContext);
   assertSame(XContentParser.Token.END_OBJECT, parser.currentToken());
   assertSame(XContentParser.Token.END_OBJECT, parser.nextToken());
   assertSame(XContentParser.Token.END_OBJECT, parser.nextToken());
   assertNull(parser.nextToken());
   assertNotNull(newAgg);
   assertNotSame(newAgg, testAgg);
   assertEquals(testAgg, newAgg);
   assertEquals(testAgg.hashCode(), newAgg.hashCode());
 }
  public void testResetRootDocId() throws Exception {
    Directory directory = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, iwc);

    List<Document> documents = new ArrayList<>();

    // 1 segment with, 1 root document, with 3 nested sub docs
    Document document = new Document();
    document.add(
        new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(
        new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(
        new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(
        new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(
        new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(
        new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);
    indexWriter.commit();

    documents.clear();
    // 1 segment with:
    // 1 document, with 1 nested subdoc
    document = new Document();
    document.add(
        new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(
        new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);
    documents.clear();
    // and 1 document, with 1 nested subdoc
    document = new Document();
    document.add(
        new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(
        new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);

    indexWriter.commit();
    indexWriter.close();

    IndexService indexService = createIndex("test");
    DirectoryReader directoryReader = DirectoryReader.open(directory);
    directoryReader =
        ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexService.index(), 0));
    IndexSearcher searcher = new IndexSearcher(directoryReader);

    indexService
        .mapperService()
        .merge(
            "test",
            new CompressedXContent(
                PutMappingRequest.buildFromSimplifiedDef("test", "nested_field", "type=nested")
                    .string()),
            MapperService.MergeReason.MAPPING_UPDATE,
            false);
    SearchContext searchContext = createSearchContext(indexService);
    AggregationContext context = new AggregationContext(searchContext);

    AggregatorFactories.Builder builder = AggregatorFactories.builder();
    NestedAggregatorBuilder factory = new NestedAggregatorBuilder("test", "nested_field");
    builder.addAggregator(factory);
    AggregatorFactories factories = builder.build(context, null);
    searchContext.aggregations(new SearchContextAggregations(factories));
    Aggregator[] aggs = factories.createTopLevelAggregators();
    BucketCollector collector = BucketCollector.wrap(Arrays.asList(aggs));
    collector.preCollection();
    // A regular search always exclude nested docs, so we use NonNestedDocsFilter.INSTANCE here
    // (otherwise MatchAllDocsQuery would be sufficient)
    // We exclude root doc with uid type#2, this will trigger the bug if we don't reset the root doc
    // when we process a new segment, because
    // root doc type#3 and root doc type#1 have the same segment docid
    BooleanQuery.Builder bq = new BooleanQuery.Builder();
    bq.add(Queries.newNonNestedFilter(), Occur.MUST);
    bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), Occur.MUST_NOT);
    searcher.search(new ConstantScoreQuery(bq.build()), collector);
    collector.postCollection();

    Nested nested = (Nested) aggs[0].buildAggregation(0);
    // The bug manifests if 6 docs are returned, because currentRootDoc isn't reset the previous
    // child docs from the first segment are emitted as hits.
    assertThat(nested.getDocCount(), equalTo(4L));

    directoryReader.close();
    directory.close();
  }
 /** Validates the state of this factory (makes sure the factory is properly configured) */
 public final void validate() {
   doValidate();
   factories.validate();
 }