public ScrollQueryFetchSearchResult executeScan(InternalScrollSearchRequest request) {
   final SearchContext context = findContext(request.id());
   contextProcessing(context);
   try {
     processScroll(request, context);
     if (context.searchType() == SearchType.QUERY_THEN_FETCH) {
       // first scanning, reset the from to 0
       context.searchType(SearchType.SCAN);
       context.from(0);
     }
     queryPhase.execute(context);
     shortcutDocIdsToLoadForScanning(context);
     fetchPhase.execute(context);
     if (context.scroll() == null || context.fetchResult().hits().hits().length < context.size()) {
       freeContext(request.id());
     } else {
       contextProcessedSuccessfully(context);
     }
     return new ScrollQueryFetchSearchResult(
         new QueryFetchSearchResult(context.queryResult(), context.fetchResult()),
         context.shardTarget());
   } catch (Throwable e) {
     logger.trace("Scan phase failed", e);
     processFailure(context, e);
     throw ExceptionsHelper.convertToRuntime(e);
   } finally {
     cleanContext(context);
   }
 }
  public QuerySearchResult executeScan(ShardSearchRequest request) {
    final SearchContext context = createAndPutContext(request);
    final int originalSize = context.size();
    try {
      if (context.aggregations() != null) {
        throw new IllegalArgumentException("aggregations are not supported with search_type=scan");
      }

      if (context.scroll() == null) {
        throw new ElasticsearchException("Scroll must be provided when scanning...");
      }

      assert context.searchType() == SearchType.SCAN;
      context.searchType(
          SearchType
              .QUERY_THEN_FETCH); // move to QUERY_THEN_FETCH, and then, when scrolling, move to
      // SCAN
      context.size(0); // set size to 0 so that we only count matches
      assert context.searchType() == SearchType.QUERY_THEN_FETCH;

      contextProcessing(context);
      queryPhase.execute(context);
      contextProcessedSuccessfully(context);
      return context.queryResult();
    } catch (Throwable e) {
      logger.trace("Scan phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      context.size(originalSize);
      cleanContext(context);
    }
  }
 /**
  * Try to load the query results from the cache or execute the query phase directly if the cache
  * cannot be used.
  */
 private void loadOrExecuteQueryPhase(
     final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase)
     throws Exception {
   final boolean canCache = indicesQueryCache.canCache(request, context);
   if (canCache) {
     indicesQueryCache.loadIntoContext(request, context, queryPhase);
   } else {
     queryPhase.execute(context);
   }
 }
 public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) {
   final SearchContext context = findContext(request.id());
   contextProcessing(context);
   try {
     final IndexCache indexCache = context.indexShard().indexService().cache();
     context
         .searcher()
         .dfSource(
             new CachedDfSource(
                 context.searcher().getIndexReader(),
                 request.dfs(),
                 context.similarityService().similarity(),
                 indexCache.filter(),
                 indexCache.filterPolicy()));
   } catch (Throwable e) {
     freeContext(context.id());
     cleanContext(context);
     throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
   }
   try {
     ShardSearchStats shardSearchStats = context.indexShard().searchService();
     shardSearchStats.onPreQueryPhase(context);
     long time = System.nanoTime();
     try {
       queryPhase.execute(context);
     } catch (Throwable e) {
       shardSearchStats.onFailedQueryPhase(context);
       throw ExceptionsHelper.convertToRuntime(e);
     }
     long time2 = System.nanoTime();
     shardSearchStats.onQueryPhase(context, time2 - time);
     shardSearchStats.onPreFetchPhase(context);
     try {
       shortcutDocIdsToLoad(context);
       fetchPhase.execute(context);
       if (context.scroll() == null) {
         freeContext(request.id());
       } else {
         contextProcessedSuccessfully(context);
       }
     } catch (Throwable e) {
       shardSearchStats.onFailedFetchPhase(context);
       throw ExceptionsHelper.convertToRuntime(e);
     }
     shardSearchStats.onFetchPhase(context, System.nanoTime() - time2);
     return new QueryFetchSearchResult(context.queryResult(), context.fetchResult());
   } catch (Throwable e) {
     logger.trace("Fetch phase failed", e);
     processFailure(context, e);
     throw ExceptionsHelper.convertToRuntime(e);
   } finally {
     cleanContext(context);
   }
 }
 public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) {
   final SearchContext context = findContext(request.id());
   contextProcessing(context);
   try {
     ShardSearchStats shardSearchStats = context.indexShard().searchService();
     processScroll(request, context);
     shardSearchStats.onPreQueryPhase(context);
     long time = System.nanoTime();
     try {
       queryPhase.execute(context);
     } catch (Throwable e) {
       shardSearchStats.onFailedQueryPhase(context);
       throw ExceptionsHelper.convertToRuntime(e);
     }
     long time2 = System.nanoTime();
     shardSearchStats.onQueryPhase(context, time2 - time);
     shardSearchStats.onPreFetchPhase(context);
     try {
       shortcutDocIdsToLoad(context);
       fetchPhase.execute(context);
       if (context.scroll() == null) {
         freeContext(request.id());
       } else {
         contextProcessedSuccessfully(context);
       }
     } catch (Throwable e) {
       shardSearchStats.onFailedFetchPhase(context);
       throw ExceptionsHelper.convertToRuntime(e);
     }
     shardSearchStats.onFetchPhase(context, System.nanoTime() - time2);
     return new ScrollQueryFetchSearchResult(
         new QueryFetchSearchResult(context.queryResult(), context.fetchResult()),
         context.shardTarget());
   } catch (Throwable e) {
     logger.trace("Fetch phase failed", e);
     processFailure(context, e);
     throw ExceptionsHelper.convertToRuntime(e);
   } finally {
     cleanContext(context);
   }
 }
 public QuerySearchResult executeQueryPhase(QuerySearchRequest request) {
   final SearchContext context = findContext(request.id());
   contextProcessing(context);
   try {
     final IndexCache indexCache = context.indexShard().indexService().cache();
     context
         .searcher()
         .dfSource(
             new CachedDfSource(
                 context.searcher().getIndexReader(),
                 request.dfs(),
                 context.similarityService().similarity(),
                 indexCache.filter(),
                 indexCache.filterPolicy()));
   } catch (Throwable e) {
     processFailure(context, e);
     cleanContext(context);
     throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
   }
   ShardSearchStats shardSearchStats = context.indexShard().searchService();
   try {
     shardSearchStats.onPreQueryPhase(context);
     long time = System.nanoTime();
     queryPhase.execute(context);
     if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scroll() == null) {
       // no hits, we can release the context since there will be no fetch phase
       freeContext(context.id());
     } else {
       contextProcessedSuccessfully(context);
     }
     shardSearchStats.onQueryPhase(context, System.nanoTime() - time);
     return context.queryResult();
   } catch (Throwable e) {
     shardSearchStats.onFailedQueryPhase(context);
     logger.trace("Query phase failed", e);
     processFailure(context, e);
     throw ExceptionsHelper.convertToRuntime(e);
   } finally {
     cleanContext(context);
   }
 }
 public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) {
   final SearchContext context = findContext(request.id());
   ShardSearchStats shardSearchStats = context.indexShard().searchService();
   try {
     shardSearchStats.onPreQueryPhase(context);
     long time = System.nanoTime();
     contextProcessing(context);
     processScroll(request, context);
     queryPhase.execute(context);
     contextProcessedSuccessfully(context);
     shardSearchStats.onQueryPhase(context, System.nanoTime() - time);
     return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget());
   } catch (Throwable e) {
     shardSearchStats.onFailedQueryPhase(context);
     logger.trace("Query phase failed", e);
     processFailure(context, e);
     throw ExceptionsHelper.convertToRuntime(e);
   } finally {
     cleanContext(context);
   }
 }
    @Override
    public Value call() throws Exception {
      queryPhase.execute(context);

      /* BytesStreamOutput allows to pass the expected size but by default uses
       * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie.
       * a date histogram with 3 buckets is ~100byte so 16k might be very wasteful
       * since we don't shrink to the actual size once we are done serializing.
       * By passing 512 as the expected size we will resize the byte array in the stream
       * slowly until we hit the page size and don't waste too much memory for small query
       * results.*/
      final int expectedSizeInBytes = 512;
      try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) {
        context.queryResult().writeToNoId(out);
        // for now, keep the paged data structure, which might have unused bytes to fill a page, but
        // better to keep
        // the memory properly paged instead of having varied sized bytes
        final BytesReference reference = out.bytes();
        loaded = true;
        Value value = new Value(reference, out.ramBytesUsed());
        key.shard.requestCache().onCached(key, value);
        return value;
      }
    }