/**
  * Loads the cache result, computing it if needed by executing the query phase and otherwise
  * deserializing the cached value into the {@link SearchContext#queryResult() context's query
  * result}. The combination of load + compute allows to have a single load operation that will
  * cause other requests with the same key to wait till its loaded an reuse the same cache.
  */
 public void loadIntoContext(
     final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase)
     throws Exception {
   assert canCache(request, context);
   Key key = buildKey(request, context);
   Loader loader = new Loader(queryPhase, context, key);
   Value value = cache.get(key, loader);
   if (loader.isLoaded()) {
     key.shard.requestCache().onMiss();
     // see if its the first time we see this reader, and make sure to register a cleanup key
     CleanupKey cleanupKey =
         new CleanupKey(
             context.indexShard(),
             ((DirectoryReader) context.searcher().getIndexReader()).getVersion());
     if (!registeredClosedListeners.containsKey(cleanupKey)) {
       Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE);
       if (previous == null) {
         context.searcher().getIndexReader().addReaderClosedListener(cleanupKey);
       }
     }
   } else {
     key.shard.requestCache().onHit();
     // restore the cached query result into the context
     final QuerySearchResult result = context.queryResult();
     result.readFromWithId(context.id(), value.reference.streamInput());
     result.shardTarget(context.shardTarget());
   }
 }
 protected ShardFetchSearchRequest createFetchRequest(
     QuerySearchResult queryResult,
     AtomicArray.Entry<IntArrayList> entry,
     ScoreDoc[] lastEmittedDocPerShard) {
   final ScoreDoc lastEmittedDoc =
       (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[entry.index] : null;
   return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc);
 }
    private void executeFetchPhase() {
      sortedShardList = searchPhaseController.sortDocs(queryResults);
      AtomicArray<ExtTIntArrayList> docIdsToLoad =
          new AtomicArray<ExtTIntArrayList>(queryResults.length());
      searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);

      if (docIdsToLoad.asList().isEmpty()) {
        finishHim();
      }

      final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());

      for (final AtomicArray.Entry<ExtTIntArrayList> entry : docIdsToLoad.asList()) {
        ExtTIntArrayList docIds = entry.value;
        final QuerySearchResult querySearchResult = queryResults.get(entry.index);
        FetchSearchRequest fetchSearchRequest =
            new FetchSearchRequest(request, querySearchResult.id(), docIds);
        DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
        searchService.sendExecuteFetch(
            node,
            fetchSearchRequest,
            new SearchServiceListener<FetchSearchResult>() {
              @Override
              public void onResult(FetchSearchResult result) {
                result.shardTarget(querySearchResult.shardTarget());
                fetchResults.set(entry.index, result);
                if (counter.decrementAndGet() == 0) {
                  finishHim();
                }
              }

              @Override
              public void onFailure(Throwable t) {
                if (logger.isDebugEnabled()) {
                  logger.debug("Failed to execute fetch phase", t);
                }
                successfulOps.decrementAndGet();
                if (counter.decrementAndGet() == 0) {
                  finishHim();
                }
              }
            });
      }
    }
 public PercolateShardResponse(
     BytesRef[] matches,
     List<Map<String, HighlightField>> hls,
     long count,
     float[] scores,
     PercolateContext context,
     ShardId shardId) {
   super(shardId);
   this.matches = matches;
   this.hls = hls;
   this.count = count;
   this.scores = scores;
   this.percolatorTypeId = context.percolatorTypeId;
   this.requestedSize = context.size();
   QuerySearchResult result = context.queryResult();
   if (result != null) {
     if (result.aggregations() != null) {
       this.aggregations = (InternalAggregations) result.aggregations();
     }
     this.pipelineAggregators = result.pipelineAggregators();
   }
 }
 /** Releases shard targets that are not used in the docsIdsToLoad. */
 protected void releaseIrrelevantSearchContexts(
     AtomicArray<? extends QuerySearchResultProvider> queryResults,
     AtomicArray<IntArrayList> docIdsToLoad) {
   if (docIdsToLoad == null) {
     return;
   }
   // we only release search context that we did not fetch from if we are not scrolling
   if (request.scroll() == null) {
     for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults.asList()) {
       QuerySearchResult queryResult = entry.value.queryResult();
       if (queryResult.hasHits()
           && docIdsToLoad.get(entry.index)
               == null) { // but none of them made it to the global top docs
         try {
           DiscoveryNode node =
               nodeIdToDiscoveryNode.apply(entry.value.queryResult().shardTarget().nodeId());
           sendReleaseSearchContext(entry.value.queryResult().id(), node);
         } catch (Exception e) {
           logger.trace("failed to release context", e);
         }
       }
     }
   }
 }
 @Override
 protected void processFirstPhaseResult(ShardRouting shard, QuerySearchResult result) {
   queryResults.put(result.shardTarget(), result);
 }
  public InternalSearchResponse merge(
      ScoreDoc[] sortedDocs,
      AtomicArray<? extends QuerySearchResultProvider> queryResultsArr,
      AtomicArray<? extends FetchSearchResultProvider> fetchResultsArr) {

    List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults =
        queryResultsArr.asList();
    List<? extends AtomicArray.Entry<? extends FetchSearchResultProvider>> fetchResults =
        fetchResultsArr.asList();

    if (queryResults.isEmpty()) {
      return InternalSearchResponse.empty();
    }

    QuerySearchResult firstResult = queryResults.get(0).value.queryResult();

    boolean sorted = false;
    int sortScoreIndex = -1;
    if (firstResult.topDocs() instanceof TopFieldDocs) {
      sorted = true;
      TopFieldDocs fieldDocs = (TopFieldDocs) firstResult.queryResult().topDocs();
      for (int i = 0; i < fieldDocs.fields.length; i++) {
        if (fieldDocs.fields[i].getType() == SortField.Type.SCORE) {
          sortScoreIndex = i;
        }
      }
    }

    // merge facets
    InternalFacets facets = null;
    if (!queryResults.isEmpty()) {
      // we rely on the fact that the order of facets is the same on all query results
      if (firstResult.facets() != null
          && firstResult.facets().facets() != null
          && !firstResult.facets().facets().isEmpty()) {
        List<Facet> aggregatedFacets = Lists.newArrayList();
        List<Facet> namedFacets = Lists.newArrayList();
        for (Facet facet : firstResult.facets()) {
          // aggregate each facet name into a single list, and aggregate it
          namedFacets.clear();
          for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
            for (Facet facet1 : entry.value.queryResult().facets()) {
              if (facet.getName().equals(facet1.getName())) {
                namedFacets.add(facet1);
              }
            }
          }
          if (!namedFacets.isEmpty()) {
            Facet aggregatedFacet =
                ((InternalFacet) namedFacets.get(0))
                    .reduce(new InternalFacet.ReduceContext(cacheRecycler, namedFacets));
            aggregatedFacets.add(aggregatedFacet);
          }
        }
        facets = new InternalFacets(aggregatedFacets);
      }
    }

    // count the total (we use the query result provider here, since we might not get any hits (we
    // scrolled past them))
    long totalHits = 0;
    float maxScore = Float.NEGATIVE_INFINITY;
    boolean timedOut = false;
    Boolean terminatedEarly = null;
    for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
      QuerySearchResult result = entry.value.queryResult();
      if (result.searchTimedOut()) {
        timedOut = true;
      }
      if (result.terminatedEarly() != null) {
        if (terminatedEarly == null) {
          terminatedEarly = result.terminatedEarly();
        } else if (result.terminatedEarly()) {
          terminatedEarly = true;
        }
      }
      totalHits += result.topDocs().totalHits;
      if (!Float.isNaN(result.topDocs().getMaxScore())) {
        maxScore = Math.max(maxScore, result.topDocs().getMaxScore());
      }
    }
    if (Float.isInfinite(maxScore)) {
      maxScore = Float.NaN;
    }

    // clean the fetch counter
    for (AtomicArray.Entry<? extends FetchSearchResultProvider> entry : fetchResults) {
      entry.value.fetchResult().initCounter();
    }

    // merge hits
    List<InternalSearchHit> hits = new ArrayList<>();
    if (!fetchResults.isEmpty()) {
      for (ScoreDoc shardDoc : sortedDocs) {
        FetchSearchResultProvider fetchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
        if (fetchResultProvider == null) {
          continue;
        }
        FetchSearchResult fetchResult = fetchResultProvider.fetchResult();
        int index = fetchResult.counterGetAndIncrement();
        if (index < fetchResult.hits().internalHits().length) {
          InternalSearchHit searchHit = fetchResult.hits().internalHits()[index];
          searchHit.score(shardDoc.score);
          searchHit.shard(fetchResult.shardTarget());

          if (sorted) {
            FieldDoc fieldDoc = (FieldDoc) shardDoc;
            searchHit.sortValues(fieldDoc.fields);
            if (sortScoreIndex != -1) {
              searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue());
            }
          }

          hits.add(searchHit);
        }
      }
    }

    // merge suggest results
    Suggest suggest = null;
    if (!queryResults.isEmpty()) {
      final Map<String, List<Suggest.Suggestion>> groupedSuggestions = new HashMap<>();
      boolean hasSuggestions = false;
      for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
        Suggest shardResult = entry.value.queryResult().queryResult().suggest();

        if (shardResult == null) {
          continue;
        }
        hasSuggestions = true;
        Suggest.group(groupedSuggestions, shardResult);
      }

      suggest =
          hasSuggestions
              ? new Suggest(Suggest.Fields.SUGGEST, Suggest.reduce(groupedSuggestions))
              : null;
    }

    // merge addAggregation
    InternalAggregations aggregations = null;
    if (!queryResults.isEmpty()) {
      if (firstResult.aggregations() != null && firstResult.aggregations().asList() != null) {
        List<InternalAggregations> aggregationsList = new ArrayList<>(queryResults.size());
        for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
          aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations());
        }
        aggregations =
            InternalAggregations.reduce(
                aggregationsList, new ReduceContext(null, bigArrays, scriptService));
      }
    }

    InternalSearchHits searchHits =
        new InternalSearchHits(
            hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore);

    return new InternalSearchResponse(
        searchHits, facets, aggregations, suggest, timedOut, terminatedEarly);
  }
  /**
   * @param scrollSort Whether to ignore the from and sort all hits in each shard result. Only used
   *     for scroll search
   * @param resultsArr Shard result holder
   */
  public ScoreDoc[] sortDocs(
      boolean scrollSort, AtomicArray<? extends QuerySearchResultProvider> resultsArr)
      throws IOException {
    List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> results =
        resultsArr.asList();
    if (results.isEmpty()) {
      return EMPTY_DOCS;
    }

    if (optimizeSingleShard) {
      boolean canOptimize = false;
      QuerySearchResult result = null;
      int shardIndex = -1;
      if (results.size() == 1) {
        canOptimize = true;
        result = results.get(0).value.queryResult();
        shardIndex = results.get(0).index;
      } else {
        // lets see if we only got hits from a single shard, if so, we can optimize...
        for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : results) {
          if (entry.value.queryResult().topDocs().scoreDocs.length > 0) {
            if (result != null) { // we already have one, can't really optimize
              canOptimize = false;
              break;
            }
            canOptimize = true;
            result = entry.value.queryResult();
            shardIndex = entry.index;
          }
        }
      }
      if (canOptimize) {
        int offset = result.from();
        if (scrollSort) {
          offset = 0;
        }
        ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
        if (scoreDocs.length == 0 || scoreDocs.length < offset) {
          return EMPTY_DOCS;
        }

        int resultDocsSize = result.size();
        if ((scoreDocs.length - offset) < resultDocsSize) {
          resultDocsSize = scoreDocs.length - offset;
        }
        ScoreDoc[] docs = new ScoreDoc[resultDocsSize];
        for (int i = 0; i < resultDocsSize; i++) {
          ScoreDoc scoreDoc = scoreDocs[offset + i];
          scoreDoc.shardIndex = shardIndex;
          docs[i] = scoreDoc;
        }
        return docs;
      }
    }

    @SuppressWarnings("unchecked")
    AtomicArray.Entry<? extends QuerySearchResultProvider>[] sortedResults =
        results.toArray(new AtomicArray.Entry[results.size()]);
    Arrays.sort(sortedResults, QUERY_RESULT_ORDERING);
    QuerySearchResultProvider firstResult = sortedResults[0].value;

    final Sort sort;
    if (firstResult.queryResult().topDocs() instanceof TopFieldDocs) {
      TopFieldDocs firstTopDocs = (TopFieldDocs) firstResult.queryResult().topDocs();
      sort = new Sort(firstTopDocs.fields);
    } else {
      sort = null;
    }

    int topN = firstResult.queryResult().size();
    // Need to use the length of the resultsArr array, since the slots will be based on the position
    // in the resultsArr array
    TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()];
    if (firstResult.includeFetch()) {
      // if we did both query and fetch on the same go, we have fetched all the docs from each
      // shards already, use them...
      // this is also important since we shortcut and fetch only docs from "from" and up to "size"
      topN *= sortedResults.length;
    }
    for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : sortedResults) {
      TopDocs topDocs = sortedResult.value.queryResult().topDocs();
      // the 'index' field is the position in the resultsArr atomic array
      shardTopDocs[sortedResult.index] = topDocs;
    }
    int from = firstResult.queryResult().from();
    if (scrollSort) {
      from = 0;
    }
    // TopDocs#merge can't deal with null shard TopDocs
    for (int i = 0; i < shardTopDocs.length; i++) {
      if (shardTopDocs[i] == null) {
        shardTopDocs[i] = Lucene.EMPTY_TOP_DOCS;
      }
    }
    TopDocs mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs);
    return mergedTopDocs.scoreDocs;
  }