/**
   * If there are configured {@link IndexSearcherWrapper} instances, the {@link IndexSearcher} of
   * the provided engine searcher gets wrapped and a new {@link Engine.Searcher} instances is
   * returned, otherwise the provided {@link Engine.Searcher} is returned.
   *
   * <p>This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for
   * example search)
   */
  public final Engine.Searcher wrap(EngineConfig engineConfig, Engine.Searcher engineSearcher)
      throws IOException {
    final ElasticsearchDirectoryReader elasticsearchDirectoryReader =
        ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(
            engineSearcher.getDirectoryReader());
    if (elasticsearchDirectoryReader == null) {
      throw new IllegalStateException("Can't wrap non elasticsearch directory reader");
    }
    NonClosingReaderWrapper nonClosingReaderWrapper =
        new NonClosingReaderWrapper(engineSearcher.getDirectoryReader());
    DirectoryReader reader = wrap(nonClosingReaderWrapper);
    if (reader != nonClosingReaderWrapper) {
      if (reader.getCoreCacheKey() != elasticsearchDirectoryReader.getCoreCacheKey()) {
        throw new IllegalStateException(
            "wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey, wrappers must override this method and delegate"
                + " to the original readers core cache key. Wrapped readers can't be used as cache keys since their are used only per request which would lead to subtle bugs");
      }
      if (ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(reader)
          != elasticsearchDirectoryReader) {
        // prevent that somebody wraps with a non-filter reader
        throw new IllegalStateException(
            "wrapped directory reader hides actual ElasticsearchDirectoryReader but shouldn't");
      }
    }

    final IndexSearcher innerIndexSearcher = new IndexSearcher(reader);
    innerIndexSearcher.setQueryCache(engineConfig.getQueryCache());
    innerIndexSearcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy());
    innerIndexSearcher.setSimilarity(engineConfig.getSimilarity());
    // TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we
    // should revise this extension point
    // For example if IndexSearcher#rewrite() is overwritten than also
    // IndexSearcher#createNormalizedWeight needs to be overwritten
    // This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped
    // multiple times
    final IndexSearcher indexSearcher = wrap(engineConfig, innerIndexSearcher);
    if (reader == nonClosingReaderWrapper && indexSearcher == innerIndexSearcher) {
      return engineSearcher;
    } else {
      return new Engine.Searcher(engineSearcher.source(), indexSearcher) {
        @Override
        public void close() throws ElasticsearchException {
          try {
            reader().close();
            // we close the reader to make sure wrappers can release resources if needed....
            // our NonClosingReaderWrapper makes sure that our reader is not closed
          } catch (IOException e) {
            throw new ElasticsearchException("failed to close reader", e);
          } finally {
            engineSearcher.close();
          }
        }
      };
    }
  }
 private SearcherManager createSearcherManager() throws EngineException {
   boolean success = false;
   SearcherManager searcherManager = null;
   try {
     try {
       final DirectoryReader directoryReader =
           ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
       searcherManager = new SearcherManager(directoryReader, searcherFactory);
       lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
       success = true;
       return searcherManager;
     } catch (IOException e) {
       maybeFailEngine("start", e);
       try {
         indexWriter.rollback();
       } catch (IOException e1) { // iw is closed below
         e.addSuppressed(e1);
       }
       throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e);
     }
   } finally {
     if (success == false) { // release everything we created on a failure
       IOUtils.closeWhileHandlingException(searcherManager, indexWriter);
     }
   }
 }
  public void testResetRootDocId() throws Exception {
    Directory directory = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, iwc);

    List<Document> documents = new ArrayList<>();

    // 1 segment with, 1 root document, with 3 nested sub docs
    Document document = new Document();
    document.add(
        new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(
        new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(
        new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(
        new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(
        new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(
        new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);
    indexWriter.commit();

    documents.clear();
    // 1 segment with:
    // 1 document, with 1 nested subdoc
    document = new Document();
    document.add(
        new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(
        new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);
    documents.clear();
    // and 1 document, with 1 nested subdoc
    document = new Document();
    document.add(
        new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(
        new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);

    indexWriter.commit();
    indexWriter.close();

    IndexService indexService = createIndex("test");
    DirectoryReader directoryReader = DirectoryReader.open(directory);
    directoryReader =
        ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexService.index(), 0));
    IndexSearcher searcher = new IndexSearcher(directoryReader);

    indexService
        .mapperService()
        .merge(
            "test",
            new CompressedXContent(
                PutMappingRequest.buildFromSimplifiedDef("test", "nested_field", "type=nested")
                    .string()),
            MapperService.MergeReason.MAPPING_UPDATE,
            false);
    SearchContext searchContext = createSearchContext(indexService);
    AggregationContext context = new AggregationContext(searchContext);

    AggregatorFactories.Builder builder = AggregatorFactories.builder();
    NestedAggregatorBuilder factory = new NestedAggregatorBuilder("test", "nested_field");
    builder.addAggregator(factory);
    AggregatorFactories factories = builder.build(context, null);
    searchContext.aggregations(new SearchContextAggregations(factories));
    Aggregator[] aggs = factories.createTopLevelAggregators();
    BucketCollector collector = BucketCollector.wrap(Arrays.asList(aggs));
    collector.preCollection();
    // A regular search always exclude nested docs, so we use NonNestedDocsFilter.INSTANCE here
    // (otherwise MatchAllDocsQuery would be sufficient)
    // We exclude root doc with uid type#2, this will trigger the bug if we don't reset the root doc
    // when we process a new segment, because
    // root doc type#3 and root doc type#1 have the same segment docid
    BooleanQuery.Builder bq = new BooleanQuery.Builder();
    bq.add(Queries.newNonNestedFilter(), Occur.MUST);
    bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), Occur.MUST_NOT);
    searcher.search(new ConstantScoreQuery(bq.build()), collector);
    collector.postCollection();

    Nested nested = (Nested) aggs[0].buildAggregation(0);
    // The bug manifests if 6 docs are returned, because currentRootDoc isn't reset the previous
    // child docs from the first segment are emitted as hits.
    assertThat(nested.getDocCount(), equalTo(4L));

    directoryReader.close();
    directory.close();
  }