@Override
  public FunctionValues getValues(Map context, LeafReaderContext readerContext) throws IOException {
    final int off = readerContext.docBase;
    final LeafReader r;
    Object o = context.get("searcher");
    if (o instanceof SolrIndexSearcher) {
      SolrIndexSearcher is = (SolrIndexSearcher) o;
      SchemaField sf = is.getSchema().getFieldOrNull(field);
      if (sf != null
          && sf.hasDocValues() == false
          && sf.multiValued() == false
          && sf.getType().getNumericType() != null) {
        // it's a single-valued numeric field: we must currently create insanity :(
        List<LeafReaderContext> leaves = is.getIndexReader().leaves();
        LeafReader insaneLeaves[] = new LeafReader[leaves.size()];
        int upto = 0;
        for (LeafReaderContext raw : leaves) {
          insaneLeaves[upto++] = Insanity.wrapInsanity(raw.reader(), field);
        }
        r = SlowCompositeReaderWrapper.wrap(new MultiReader(insaneLeaves));
      } else {
        // reuse ordinalmap
        r = ((SolrIndexSearcher) o).getLeafReader();
      }
    } else {
      IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
      r = SlowCompositeReaderWrapper.wrap(topReader);
    }
    // if it's e.g. tokenized/multivalued, emulate old behavior of single-valued fc
    final SortedDocValues sindex =
        SortedSetSelector.wrap(DocValues.getSortedSet(r, field), SortedSetSelector.Type.MIN);
    final int end = sindex.getValueCount();

    return new IntDocValues(this) {
      @Override
      public int intVal(int doc) {
        return (end - sindex.getOrd(doc + off) - 1);
      }
    };
  }
  @Override
  public void transform(SolrDocument doc, int docid) {
    // This is only good for random-access functions

    try {

      // TODO: calculate this stuff just once across diff functions
      int idx = ReaderUtil.subIndex(docid, readerContexts);
      LeafReaderContext rcontext = readerContexts.get(idx);
      FunctionValues values = docValuesArr[idx];
      if (values == null) {
        docValuesArr[idx] = values = valueSource.getValues(fcontext, rcontext);
      }

      int localId = docid - rcontext.docBase;
      setValue(doc, values.objectVal(localId));
    } catch (IOException e) {
      throw new SolrException(
          SolrException.ErrorCode.SERVER_ERROR,
          "exception at docid " + docid + " for valuesource " + valueSource,
          e);
    }
  }
  @Override
  public void execute(SearchContext context) {
    FieldsVisitor fieldsVisitor;
    Set<String> fieldNames = null;
    List<String> extractFieldNames = null;

    boolean loadAllStored = false;
    if (!context.hasFieldNames()) {
      // no fields specified, default to return source if no explicit indication
      if (!context.hasScriptFields() && !context.hasFetchSourceContext()) {
        context.fetchSourceContext(new FetchSourceContext(true));
      }
      fieldsVisitor = new FieldsVisitor(context.sourceRequested());
    } else if (context.fieldNames().isEmpty()) {
      fieldsVisitor = new FieldsVisitor(context.sourceRequested());
    } else {
      for (String fieldName : context.fieldNames()) {
        if (fieldName.equals("*")) {
          loadAllStored = true;
          continue;
        }
        if (fieldName.equals(SourceFieldMapper.NAME)) {
          if (context.hasFetchSourceContext()) {
            context.fetchSourceContext().fetchSource(true);
          } else {
            context.fetchSourceContext(new FetchSourceContext(true));
          }
          continue;
        }
        MappedFieldType fieldType = context.smartNameFieldType(fieldName);
        if (fieldType == null) {
          // Only fail if we know it is a object field, missing paths / fields shouldn't fail.
          if (context.getObjectMapper(fieldName) != null) {
            throw new IllegalArgumentException("field [" + fieldName + "] isn't a leaf field");
          }
        } else if (fieldType.stored()) {
          if (fieldNames == null) {
            fieldNames = new HashSet<>();
          }
          fieldNames.add(fieldType.names().indexName());
        } else {
          if (extractFieldNames == null) {
            extractFieldNames = newArrayList();
          }
          extractFieldNames.add(fieldName);
        }
      }
      if (loadAllStored) {
        fieldsVisitor = new AllFieldsVisitor(); // load everything, including _source
      } else if (fieldNames != null) {
        boolean loadSource = extractFieldNames != null || context.sourceRequested();
        fieldsVisitor = new CustomFieldsVisitor(fieldNames, loadSource);
      } else {
        fieldsVisitor = new FieldsVisitor(extractFieldNames != null || context.sourceRequested());
      }
    }

    InternalSearchHit[] hits = new InternalSearchHit[context.docIdsToLoadSize()];
    FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
    for (int index = 0; index < context.docIdsToLoadSize(); index++) {
      int docId = context.docIdsToLoad()[context.docIdsToLoadFrom() + index];
      int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves());
      LeafReaderContext subReaderContext =
          context.searcher().getIndexReader().leaves().get(readerIndex);
      int subDocId = docId - subReaderContext.docBase;

      final InternalSearchHit searchHit;
      try {
        int rootDocId = findRootDocumentIfNested(context, subReaderContext, subDocId);
        if (rootDocId != -1) {
          searchHit =
              createNestedSearchHit(
                  context,
                  docId,
                  subDocId,
                  rootDocId,
                  extractFieldNames,
                  loadAllStored,
                  fieldNames,
                  subReaderContext);
        } else {
          searchHit =
              createSearchHit(
                  context, fieldsVisitor, docId, subDocId, extractFieldNames, subReaderContext);
        }
      } catch (IOException e) {
        throw ExceptionsHelper.convertToElastic(e);
      }

      hits[index] = searchHit;
      hitContext.reset(searchHit, subReaderContext, subDocId, context.searcher());
      for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
        if (fetchSubPhase.hitExecutionNeeded(context)) {
          fetchSubPhase.hitExecute(context, hitContext);
        }
      }
    }

    for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
      if (fetchSubPhase.hitsExecutionNeeded(context)) {
        fetchSubPhase.hitsExecute(context, hits);
      }
    }

    context
        .fetchResult()
        .hits(
            new InternalSearchHits(
                hits,
                context.queryResult().topDocs().totalHits,
                context.queryResult().topDocs().getMaxScore()));
  }
        @Override
        public PercolateShardResponse doPercolate(
            PercolateShardRequest request, PercolateContext context) {
          Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
          try {
            MatchAndSort matchAndSort = QueryCollector.matchAndSort(logger, context);
            queryBasedPercolating(percolatorSearcher, context, matchAndSort);
            TopDocs topDocs = matchAndSort.topDocs();
            long count = topDocs.totalHits;
            List<BytesRef> matches = new ArrayList<BytesRef>(topDocs.scoreDocs.length);
            float[] scores = new float[topDocs.scoreDocs.length];
            List<Map<String, HighlightField>> hls = null;
            if (context.highlight() != null) {
              hls = new ArrayList<Map<String, HighlightField>>(topDocs.scoreDocs.length);
            }

            final FieldMapper<?> idMapper =
                context.mapperService().smartNameFieldMapper(IdFieldMapper.NAME);
            final IndexFieldData<?> idFieldData = context.fieldData().getForField(idMapper);
            int i = 0;
            final HashedBytesRef spare = new HashedBytesRef(new BytesRef());
            for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
              int segmentIdx =
                  ReaderUtil.subIndex(scoreDoc.doc, percolatorSearcher.reader().leaves());
              AtomicReaderContext atomicReaderContext =
                  percolatorSearcher.reader().leaves().get(segmentIdx);
              BytesValues values = idFieldData.load(atomicReaderContext).getBytesValues(true);
              final int localDocId = scoreDoc.doc - atomicReaderContext.docBase;
              final int numValues = values.setDocument(localDocId);
              assert numValues == 1;
              spare.bytes = values.nextValue();
              spare.hash = values.currentValueHash();
              matches.add(values.copyShared());
              if (hls != null) {
                Query query = context.percolateQueries().get(spare);
                context.parsedQuery(new ParsedQuery(query, ImmutableMap.<String, Filter>of()));
                context.hitContext().cache().clear();
                highlightPhase.hitExecute(context, context.hitContext());
                hls.add(i, context.hitContext().hit().getHighlightFields());
              }
              scores[i++] = scoreDoc.score;
            }
            if (hls != null) {
              return new PercolateShardResponse(
                  matches.toArray(new BytesRef[matches.size()]),
                  hls,
                  count,
                  scores,
                  context,
                  request.index(),
                  request.shardId());
            } else {
              return new PercolateShardResponse(
                  matches.toArray(new BytesRef[matches.size()]),
                  count,
                  scores,
                  context,
                  request.index(),
                  request.shardId());
            }
          } catch (Throwable e) {
            logger.debug("failed to execute", e);
            throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
          } finally {
            percolatorSearcher.release();
          }
        }
  private Map<Integer, Object> highlightField(
      String field,
      String contents[],
      BreakIterator bi,
      BytesRef terms[],
      int[] docids,
      List<AtomicReaderContext> leaves,
      int maxPassages,
      Query query)
      throws IOException {
    Map<Integer, Object> highlights = new HashMap<>();

    PassageFormatter fieldFormatter = getFormatter(field);
    if (fieldFormatter == null) {
      throw new NullPointerException("PassageFormatter cannot be null");
    }

    // check if we should do any multiterm processing
    Analyzer analyzer = getIndexAnalyzer(field);
    CharacterRunAutomaton automata[] = new CharacterRunAutomaton[0];
    if (analyzer != null) {
      automata = MultiTermHighlighting.extractAutomata(query, field);
    }

    // resize 'terms', where the last term is the multiterm matcher
    if (automata.length > 0) {
      BytesRef newTerms[] = new BytesRef[terms.length + 1];
      System.arraycopy(terms, 0, newTerms, 0, terms.length);
      terms = newTerms;
    }

    // we are processing in increasing docid order, so we only need to reinitialize stuff on segment
    // changes
    // otherwise, we will just advance() existing enums to the new document in the same segment.
    DocsAndPositionsEnum postings[] = null;
    TermsEnum termsEnum = null;
    int lastLeaf = -1;

    for (int i = 0; i < docids.length; i++) {
      String content = contents[i];
      if (content.length() == 0) {
        continue; // nothing to do
      }
      bi.setText(content);
      int doc = docids[i];
      int leaf = ReaderUtil.subIndex(doc, leaves);
      AtomicReaderContext subContext = leaves.get(leaf);
      AtomicReader r = subContext.reader();

      assert leaf >= lastLeaf; // increasing order

      // if the segment has changed, we must initialize new enums.
      if (leaf != lastLeaf) {
        Terms t = r.terms(field);
        if (t != null) {
          termsEnum = t.iterator(null);
          postings = new DocsAndPositionsEnum[terms.length];
        }
      }
      if (termsEnum == null) {
        continue; // no terms for this field, nothing to do
      }

      // if there are multi-term matches, we have to initialize the "fake" enum for each document
      if (automata.length > 0) {
        DocsAndPositionsEnum dp =
            MultiTermHighlighting.getDocsEnum(analyzer.tokenStream(field, content), automata);
        dp.advance(doc - subContext.docBase);
        postings[terms.length - 1] = dp; // last term is the multiterm matcher
      }

      Passage passages[] =
          highlightDoc(
              field,
              terms,
              content.length(),
              bi,
              doc - subContext.docBase,
              termsEnum,
              postings,
              maxPassages);

      if (passages.length == 0) {
        // no passages were returned, so ask for a default summary
        passages = getEmptyHighlight(field, bi, maxPassages);
      }

      if (passages.length > 0) {
        highlights.put(doc, fieldFormatter.format(passages, content));
      }

      lastLeaf = leaf;
    }

    return highlights;
  }
  int resolveParentDocuments(
      TopDocs topDocs,
      SearchContext context,
      Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs) {
    int parentHitsResolved = 0;
    Recycler.V<ObjectObjectOpenHashMap<Object, Recycler.V<IntObjectOpenHashMap<ParentDoc>>>>
        parentDocsPerReader =
            cacheRecycler.hashMap(context.searcher().getIndexReader().leaves().size());
    for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
      int readerIndex =
          ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves());
      AtomicReaderContext subContext =
          context.searcher().getIndexReader().leaves().get(readerIndex);
      int subDoc = scoreDoc.doc - subContext.docBase;

      // find the parent id
      HashedBytesArray parentId =
          context.idCache().reader(subContext.reader()).parentIdByDoc(parentType, subDoc);
      if (parentId == null) {
        // no parent found
        continue;
      }
      // now go over and find the parent doc Id and reader tuple
      for (AtomicReaderContext atomicReaderContext : context.searcher().getIndexReader().leaves()) {
        AtomicReader indexReader = atomicReaderContext.reader();
        int parentDocId = context.idCache().reader(indexReader).docById(parentType, parentId);
        Bits liveDocs = indexReader.getLiveDocs();
        if (parentDocId != -1 && (liveDocs == null || liveDocs.get(parentDocId))) {
          // we found a match, add it and break

          Recycler.V<IntObjectOpenHashMap<ParentDoc>> readerParentDocs =
              parentDocsPerReader.v().get(indexReader.getCoreCacheKey());
          if (readerParentDocs == null) {
            readerParentDocs = cacheRecycler.intObjectMap(indexReader.maxDoc());
            parentDocsPerReader.v().put(indexReader.getCoreCacheKey(), readerParentDocs);
          }

          ParentDoc parentDoc = readerParentDocs.v().get(parentDocId);
          if (parentDoc == null) {
            parentHitsResolved++; // we have a hit on a parent
            parentDoc = new ParentDoc();
            parentDoc.docId = parentDocId;
            parentDoc.count = 1;
            parentDoc.maxScore = scoreDoc.score;
            parentDoc.sumScores = scoreDoc.score;
            readerParentDocs.v().put(parentDocId, parentDoc);
          } else {
            parentDoc.count++;
            parentDoc.sumScores += scoreDoc.score;
            if (scoreDoc.score > parentDoc.maxScore) {
              parentDoc.maxScore = scoreDoc.score;
            }
          }
        }
      }
    }
    boolean[] states = parentDocsPerReader.v().allocated;
    Object[] keys = parentDocsPerReader.v().keys;
    Object[] values = parentDocsPerReader.v().values;
    for (int i = 0; i < states.length; i++) {
      if (states[i]) {
        Recycler.V<IntObjectOpenHashMap<ParentDoc>> value =
            (Recycler.V<IntObjectOpenHashMap<ParentDoc>>) values[i];
        ParentDoc[] _parentDocs = value.v().values().toArray(ParentDoc.class);
        Arrays.sort(_parentDocs, PARENT_DOC_COMP);
        parentDocs.v().put(keys[i], _parentDocs);
        Releasables.release(value);
      }
    }
    Releasables.release(parentDocsPerReader);
    return parentHitsResolved;
  }
    public void handleMergeFields(ResponseBuilder rb, SolrIndexSearcher searcher)
        throws IOException {
      SolrQueryRequest req = rb.req;
      SolrQueryResponse rsp = rb.rsp;
      // The query cache doesn't currently store sort field values, and SolrIndexSearcher doesn't
      // currently have an option to return sort field values.  Because of this, we
      // take the documents given and re-derive the sort values.
      //
      // TODO: See SOLR-5595
      boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES, false);
      if (fsv) {
        NamedList<Object[]> sortVals = new NamedList<>(); // order is important for the sort fields
        IndexReaderContext topReaderContext = searcher.getTopReaderContext();
        List<LeafReaderContext> leaves = topReaderContext.leaves();
        LeafReaderContext currentLeaf = null;
        if (leaves.size() == 1) {
          // if there is a single segment, use that subReader and avoid looking up each time
          currentLeaf = leaves.get(0);
          leaves = null;
        }

        DocList docList = rb.getResults().docList;

        // sort ids from lowest to highest so we can access them in order
        int nDocs = docList.size();
        final long[] sortedIds = new long[nDocs];
        final float[] scores = new float[nDocs]; // doc scores, parallel to sortedIds
        DocList docs = rb.getResults().docList;
        DocIterator it = docs.iterator();
        for (int i = 0; i < nDocs; i++) {
          sortedIds[i] = (((long) it.nextDoc()) << 32) | i;
          scores[i] = docs.hasScores() ? it.score() : Float.NaN;
        }

        // sort ids and scores together
        new InPlaceMergeSorter() {
          @Override
          protected void swap(int i, int j) {
            long tmpId = sortedIds[i];
            float tmpScore = scores[i];
            sortedIds[i] = sortedIds[j];
            scores[i] = scores[j];
            sortedIds[j] = tmpId;
            scores[j] = tmpScore;
          }

          @Override
          protected int compare(int i, int j) {
            return Long.compare(sortedIds[i], sortedIds[j]);
          }
        }.sort(0, sortedIds.length);

        SortSpec sortSpec = rb.getSortSpec();
        Sort sort = searcher.weightSort(sortSpec.getSort());
        SortField[] sortFields =
            sort == null ? new SortField[] {SortField.FIELD_SCORE} : sort.getSort();
        List<SchemaField> schemaFields = sortSpec.getSchemaFields();

        for (int fld = 0; fld < schemaFields.size(); fld++) {
          SchemaField schemaField = schemaFields.get(fld);
          FieldType ft = null == schemaField ? null : schemaField.getType();
          SortField sortField = sortFields[fld];

          SortField.Type type = sortField.getType();
          // :TODO: would be simpler to always serialize every position of SortField[]
          if (type == SortField.Type.SCORE || type == SortField.Type.DOC) continue;

          FieldComparator<?> comparator = null;
          LeafFieldComparator leafComparator = null;
          Object[] vals = new Object[nDocs];

          int lastIdx = -1;
          int idx = 0;

          for (int i = 0; i < sortedIds.length; ++i) {
            long idAndPos = sortedIds[i];
            float score = scores[i];
            int doc = (int) (idAndPos >>> 32);
            int position = (int) idAndPos;

            if (leaves != null) {
              idx = ReaderUtil.subIndex(doc, leaves);
              currentLeaf = leaves.get(idx);
              if (idx != lastIdx) {
                // we switched segments.  invalidate comparator.
                comparator = null;
              }
            }

            if (comparator == null) {
              comparator = sortField.getComparator(1, 0);
              leafComparator = comparator.getLeafComparator(currentLeaf);
            }

            doc -= currentLeaf.docBase; // adjust for what segment this is in
            leafComparator.setScorer(new FakeScorer(doc, score));
            leafComparator.copy(0, doc);
            Object val = comparator.value(0);
            if (null != ft) val = ft.marshalSortValue(val);
            vals[position] = val;
          }

          sortVals.add(sortField.getField(), vals);
        }

        rsp.add("merge_values", sortVals);
      }
    }
 private boolean shouldCache(LeafReaderContext context) throws IOException {
   return cacheEntryHasReasonableWorstCaseSize(
           ReaderUtil.getTopLevelContext(context).reader().maxDoc())
       && policy.shouldCache(in.getQuery(), context);
 }