public ScrollQueryFetchSearchResult executeScan(InternalScrollSearchRequest request) {
   final SearchContext context = findContext(request.id());
   contextProcessing(context);
   try {
     processScroll(request, context);
     if (context.searchType() == SearchType.QUERY_THEN_FETCH) {
       // first scanning, reset the from to 0
       context.searchType(SearchType.SCAN);
       context.from(0);
     }
     queryPhase.execute(context);
     shortcutDocIdsToLoadForScanning(context);
     fetchPhase.execute(context);
     if (context.scroll() == null || context.fetchResult().hits().hits().length < context.size()) {
       freeContext(request.id());
     } else {
       contextProcessedSuccessfully(context);
     }
     return new ScrollQueryFetchSearchResult(
         new QueryFetchSearchResult(context.queryResult(), context.fetchResult()),
         context.shardTarget());
   } catch (Throwable e) {
     logger.trace("Scan phase failed", e);
     processFailure(context, e);
     throw ExceptionsHelper.convertToRuntime(e);
   } finally {
     cleanContext(context);
   }
 }
  public QuerySearchResult executeScan(ShardSearchRequest request) {
    final SearchContext context = createAndPutContext(request);
    final int originalSize = context.size();
    try {
      if (context.aggregations() != null) {
        throw new IllegalArgumentException("aggregations are not supported with search_type=scan");
      }

      if (context.scroll() == null) {
        throw new ElasticsearchException("Scroll must be provided when scanning...");
      }

      assert context.searchType() == SearchType.SCAN;
      context.searchType(
          SearchType
              .QUERY_THEN_FETCH); // move to QUERY_THEN_FETCH, and then, when scrolling, move to
      // SCAN
      context.size(0); // set size to 0 so that we only count matches
      assert context.searchType() == SearchType.QUERY_THEN_FETCH;

      contextProcessing(context);
      queryPhase.execute(context);
      contextProcessedSuccessfully(context);
      return context.queryResult();
    } catch (Throwable e) {
      logger.trace("Scan phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      context.size(originalSize);
      cleanContext(context);
    }
  }
  @Inject
  public SearchService(
      Settings settings,
      ClusterService clusterService,
      IndicesService indicesService,
      IndicesWarmer indicesWarmer,
      ThreadPool threadPool,
      ScriptService scriptService,
      PageCacheRecycler pageCacheRecycler,
      BigArrays bigArrays,
      DfsPhase dfsPhase,
      QueryPhase queryPhase,
      FetchPhase fetchPhase,
      IndicesQueryCache indicesQueryCache) {
    super(settings);
    this.threadPool = threadPool;
    this.clusterService = clusterService;
    this.indicesService = indicesService;
    indicesService
        .indicesLifecycle()
        .addListener(
            new IndicesLifecycle.Listener() {

              @Override
              public void afterIndexDeleted(Index index, @IndexSettings Settings indexSettings) {
                // once an index is closed we can just clean up all the pending search context
                // information
                // to release memory and let references to the filesystem go etc.
                freeAllContextForIndex(index);
              }
            });
    this.indicesWarmer = indicesWarmer;
    this.scriptService = scriptService;
    this.pageCacheRecycler = pageCacheRecycler;
    this.bigArrays = bigArrays;
    this.dfsPhase = dfsPhase;
    this.queryPhase = queryPhase;
    this.fetchPhase = fetchPhase;
    this.indicesQueryCache = indicesQueryCache;

    TimeValue keepAliveInterval = settings.getAsTime(KEEPALIVE_INTERVAL_KEY, timeValueMinutes(1));
    // we can have 5 minutes here, since we make sure to clean with search requests and when
    // shard/index closes
    this.defaultKeepAlive = settings.getAsTime(DEFAULT_KEEPALIVE_KEY, timeValueMinutes(5)).millis();

    Map<String, SearchParseElement> elementParsers = new HashMap<>();
    elementParsers.putAll(dfsPhase.parseElements());
    elementParsers.putAll(queryPhase.parseElements());
    elementParsers.putAll(fetchPhase.parseElements());
    elementParsers.put("stats", new StatsGroupsParseElement());
    this.elementParsers = ImmutableMap.copyOf(elementParsers);

    this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval);

    this.indicesWarmer.addListener(new NormsWarmer());
    this.indicesWarmer.addListener(new FieldDataWarmer());
    this.indicesWarmer.addListener(new SearchWarmer());
  }
 /**
  * Try to load the query results from the cache or execute the query phase directly if the cache
  * cannot be used.
  */
 private void loadOrExecuteQueryPhase(
     final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase)
     throws Exception {
   final boolean canCache = indicesQueryCache.canCache(request, context);
   if (canCache) {
     indicesQueryCache.loadIntoContext(request, context, queryPhase);
   } else {
     queryPhase.execute(context);
   }
 }
 public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) {
   final SearchContext context = findContext(request.id());
   contextProcessing(context);
   try {
     final IndexCache indexCache = context.indexShard().indexService().cache();
     context
         .searcher()
         .dfSource(
             new CachedDfSource(
                 context.searcher().getIndexReader(),
                 request.dfs(),
                 context.similarityService().similarity(),
                 indexCache.filter(),
                 indexCache.filterPolicy()));
   } catch (Throwable e) {
     freeContext(context.id());
     cleanContext(context);
     throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
   }
   try {
     ShardSearchStats shardSearchStats = context.indexShard().searchService();
     shardSearchStats.onPreQueryPhase(context);
     long time = System.nanoTime();
     try {
       queryPhase.execute(context);
     } catch (Throwable e) {
       shardSearchStats.onFailedQueryPhase(context);
       throw ExceptionsHelper.convertToRuntime(e);
     }
     long time2 = System.nanoTime();
     shardSearchStats.onQueryPhase(context, time2 - time);
     shardSearchStats.onPreFetchPhase(context);
     try {
       shortcutDocIdsToLoad(context);
       fetchPhase.execute(context);
       if (context.scroll() == null) {
         freeContext(request.id());
       } else {
         contextProcessedSuccessfully(context);
       }
     } catch (Throwable e) {
       shardSearchStats.onFailedFetchPhase(context);
       throw ExceptionsHelper.convertToRuntime(e);
     }
     shardSearchStats.onFetchPhase(context, System.nanoTime() - time2);
     return new QueryFetchSearchResult(context.queryResult(), context.fetchResult());
   } catch (Throwable e) {
     logger.trace("Fetch phase failed", e);
     processFailure(context, e);
     throw ExceptionsHelper.convertToRuntime(e);
   } finally {
     cleanContext(context);
   }
 }
 public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) {
   final SearchContext context = findContext(request.id());
   contextProcessing(context);
   try {
     ShardSearchStats shardSearchStats = context.indexShard().searchService();
     processScroll(request, context);
     shardSearchStats.onPreQueryPhase(context);
     long time = System.nanoTime();
     try {
       queryPhase.execute(context);
     } catch (Throwable e) {
       shardSearchStats.onFailedQueryPhase(context);
       throw ExceptionsHelper.convertToRuntime(e);
     }
     long time2 = System.nanoTime();
     shardSearchStats.onQueryPhase(context, time2 - time);
     shardSearchStats.onPreFetchPhase(context);
     try {
       shortcutDocIdsToLoad(context);
       fetchPhase.execute(context);
       if (context.scroll() == null) {
         freeContext(request.id());
       } else {
         contextProcessedSuccessfully(context);
       }
     } catch (Throwable e) {
       shardSearchStats.onFailedFetchPhase(context);
       throw ExceptionsHelper.convertToRuntime(e);
     }
     shardSearchStats.onFetchPhase(context, System.nanoTime() - time2);
     return new ScrollQueryFetchSearchResult(
         new QueryFetchSearchResult(context.queryResult(), context.fetchResult()),
         context.shardTarget());
   } catch (Throwable e) {
     logger.trace("Fetch phase failed", e);
     processFailure(context, e);
     throw ExceptionsHelper.convertToRuntime(e);
   } finally {
     cleanContext(context);
   }
 }
 public QuerySearchResult executeQueryPhase(QuerySearchRequest request) {
   final SearchContext context = findContext(request.id());
   contextProcessing(context);
   try {
     final IndexCache indexCache = context.indexShard().indexService().cache();
     context
         .searcher()
         .dfSource(
             new CachedDfSource(
                 context.searcher().getIndexReader(),
                 request.dfs(),
                 context.similarityService().similarity(),
                 indexCache.filter(),
                 indexCache.filterPolicy()));
   } catch (Throwable e) {
     processFailure(context, e);
     cleanContext(context);
     throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
   }
   ShardSearchStats shardSearchStats = context.indexShard().searchService();
   try {
     shardSearchStats.onPreQueryPhase(context);
     long time = System.nanoTime();
     queryPhase.execute(context);
     if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scroll() == null) {
       // no hits, we can release the context since there will be no fetch phase
       freeContext(context.id());
     } else {
       contextProcessedSuccessfully(context);
     }
     shardSearchStats.onQueryPhase(context, System.nanoTime() - time);
     return context.queryResult();
   } catch (Throwable e) {
     shardSearchStats.onFailedQueryPhase(context);
     logger.trace("Query phase failed", e);
     processFailure(context, e);
     throw ExceptionsHelper.convertToRuntime(e);
   } finally {
     cleanContext(context);
   }
 }
 public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) {
   final SearchContext context = findContext(request.id());
   ShardSearchStats shardSearchStats = context.indexShard().searchService();
   try {
     shardSearchStats.onPreQueryPhase(context);
     long time = System.nanoTime();
     contextProcessing(context);
     processScroll(request, context);
     queryPhase.execute(context);
     contextProcessedSuccessfully(context);
     shardSearchStats.onQueryPhase(context, System.nanoTime() - time);
     return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget());
   } catch (Throwable e) {
     shardSearchStats.onFailedQueryPhase(context);
     logger.trace("Query phase failed", e);
     processFailure(context, e);
     throw ExceptionsHelper.convertToRuntime(e);
   } finally {
     cleanContext(context);
   }
 }
    @Override
    public Value call() throws Exception {
      queryPhase.execute(context);

      /* BytesStreamOutput allows to pass the expected size but by default uses
       * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie.
       * a date histogram with 3 buckets is ~100byte so 16k might be very wasteful
       * since we don't shrink to the actual size once we are done serializing.
       * By passing 512 as the expected size we will resize the byte array in the stream
       * slowly until we hit the page size and don't waste too much memory for small query
       * results.*/
      final int expectedSizeInBytes = 512;
      try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) {
        context.queryResult().writeToNoId(out);
        // for now, keep the paged data structure, which might have unused bytes to fill a page, but
        // better to keep
        // the memory properly paged instead of having varied sized bytes
        final BytesReference reference = out.bytes();
        loaded = true;
        Value value = new Value(reference, out.ramBytesUsed());
        key.shard.requestCache().onCached(key, value);
        return value;
      }
    }
  final SearchContext createContext(
      ShardSearchRequest request, @Nullable Engine.Searcher searcher) {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(request.shardId());

    SearchShardTarget shardTarget =
        new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());

    Engine.Searcher engineSearcher =
        searcher == null ? indexShard.acquireSearcher("search") : searcher;
    SearchContext context =
        new DefaultSearchContext(
            idGenerator.incrementAndGet(),
            request,
            shardTarget,
            engineSearcher,
            indexService,
            indexShard,
            scriptService,
            pageCacheRecycler,
            bigArrays,
            threadPool.estimatedTimeInMillisCounter());
    SearchContext.setCurrent(context);
    try {
      context.scroll(request.scroll());

      parseTemplate(request);
      parseSource(context, request.source());
      parseSource(context, request.extraSource());

      // if the from and size are still not set, default them
      if (context.from() == -1) {
        context.from(0);
      }
      if (context.searchType() == SearchType.COUNT) {
        // so that the optimizations we apply to size=0 also apply to search_type=COUNT
        // and that we close contexts when done with the query phase
        context.searchType(SearchType.QUERY_THEN_FETCH);
        context.size(0);
      } else if (context.size() == -1) {
        context.size(10);
      }

      // pre process
      dfsPhase.preProcess(context);
      queryPhase.preProcess(context);
      fetchPhase.preProcess(context);

      // compute the context keep alive
      long keepAlive = defaultKeepAlive;
      if (request.scroll() != null && request.scroll().keepAlive() != null) {
        keepAlive = request.scroll().keepAlive().millis();
      }
      context.keepAlive(keepAlive);
    } catch (Throwable e) {
      context.close();
      throw ExceptionsHelper.convertToRuntime(e);
    }

    return context;
  }