private void queryBasedPercolating(
      Engine.Searcher percolatorSearcher,
      PercolateContext context,
      QueryCollector percolateCollector)
      throws IOException {
    Filter percolatorTypeFilter =
        context.indexService().mapperService().documentMapper(TYPE_NAME).typeFilter();
    percolatorTypeFilter = context.indexService().cache().filter().cache(percolatorTypeFilter);
    FilteredQuery query = new FilteredQuery(context.percolateQuery(), percolatorTypeFilter);
    percolatorSearcher.searcher().search(query, percolateCollector);

    for (Collector queryCollector : percolateCollector.facetAndAggregatorCollector) {
      if (queryCollector instanceof XCollector) {
        ((XCollector) queryCollector).postCollection();
      }
    }
    if (context.facets() != null) {
      facetPhase.execute(context);
    }
    if (context.aggregations() != null) {
      aggregationPhase.execute(context);
    }
  }
  private ParsedDocument parseRequest(
      IndexService documentIndexService, PercolateShardRequest request, PercolateContext context)
      throws ElasticsearchException {
    BytesReference source = request.source();
    if (source == null || source.length() == 0) {
      return null;
    }

    // TODO: combine all feature parse elements into one map
    Map<String, ? extends SearchParseElement> hlElements = highlightPhase.parseElements();
    Map<String, ? extends SearchParseElement> facetElements = facetPhase.parseElements();
    Map<String, ? extends SearchParseElement> aggregationElements =
        aggregationPhase.parseElements();

    ParsedDocument doc = null;
    XContentParser parser = null;

    // Some queries (function_score query when for decay functions) rely on a SearchContext being
    // set:
    // We switch types because this context needs to be in the context of the percolate queries in
    // the shard and
    // not the in memory percolate doc
    String[] previousTypes = context.types();
    context.types(new String[] {TYPE_NAME});
    SearchContext.setCurrent(context);
    try {
      parser = XContentFactory.xContent(source).createParser(source);
      String currentFieldName = null;
      XContentParser.Token token;
      while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
          currentFieldName = parser.currentName();
          // we need to check the "doc" here, so the next token will be START_OBJECT which is
          // the actual document starting
          if ("doc".equals(currentFieldName)) {
            if (doc != null) {
              throw new ElasticsearchParseException("Either specify doc or get, not both");
            }

            MapperService mapperService = documentIndexService.mapperService();
            DocumentMapper docMapper =
                mapperService.documentMapperWithAutoCreate(request.documentType());
            doc = docMapper.parse(source(parser).type(request.documentType()).flyweight(true));
            // the document parsing exists the "doc" object, so we need to set the new current
            // field.
            currentFieldName = parser.currentName();
          }
        } else if (token == XContentParser.Token.START_OBJECT) {
          SearchParseElement element = hlElements.get(currentFieldName);
          if (element == null) {
            element = facetElements.get(currentFieldName);
            if (element == null) {
              element = aggregationElements.get(currentFieldName);
            }
          }

          if ("query".equals(currentFieldName)) {
            if (context.percolateQuery() != null) {
              throw new ElasticsearchParseException("Either specify query or filter, not both");
            }
            context.percolateQuery(documentIndexService.queryParserService().parse(parser).query());
          } else if ("filter".equals(currentFieldName)) {
            if (context.percolateQuery() != null) {
              throw new ElasticsearchParseException("Either specify query or filter, not both");
            }
            Filter filter =
                documentIndexService.queryParserService().parseInnerFilter(parser).filter();
            context.percolateQuery(new XConstantScoreQuery(filter));
          } else if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          } else if (element != null) {
            element.parse(parser, context);
          }
        } else if (token == XContentParser.Token.START_ARRAY) {
          if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          }
        } else if (token == null) {
          break;
        } else if (token.isValue()) {
          if ("size".equals(currentFieldName)) {
            context.size(parser.intValue());
            if (context.size() < 0) {
              throw new ElasticsearchParseException(
                  "size is set to ["
                      + context.size()
                      + "] and is expected to be higher or equal to 0");
            }
          } else if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          } else if ("track_scores".equals(currentFieldName)
              || "trackScores".equals(currentFieldName)) {
            context.trackScores(parser.booleanValue());
          }
        }
      }

      // We need to get the actual source from the request body for highlighting, so parse the
      // request body again
      // and only get the doc source.
      if (context.highlight() != null) {
        parser.close();
        currentFieldName = null;
        parser = XContentFactory.xContent(source).createParser(source);
        token = parser.nextToken();
        assert token == XContentParser.Token.START_OBJECT;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
          if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
          } else if (token == XContentParser.Token.START_OBJECT) {
            if ("doc".equals(currentFieldName)) {
              BytesStreamOutput bStream = new BytesStreamOutput();
              XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream);
              builder.copyCurrentStructure(parser);
              builder.close();
              doc.setSource(bStream.bytes());
              break;
            } else {
              parser.skipChildren();
            }
          } else if (token == null) {
            break;
          }
        }
      }

    } catch (Throwable e) {
      throw new ElasticsearchParseException("failed to parse request", e);
    } finally {
      context.types(previousTypes);
      SearchContext.removeCurrent();
      if (parser != null) {
        parser.close();
      }
    }

    return doc;
  }
  public PercolateShardResponse percolate(PercolateShardRequest request) {
    IndexService percolateIndexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = percolateIndexService.shardSafe(request.shardId());

    ShardPercolateService shardPercolateService = indexShard.shardPercolateService();
    shardPercolateService.prePercolate();
    long startTime = System.nanoTime();

    SearchShardTarget searchShardTarget =
        new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());
    final PercolateContext context =
        new PercolateContext(
            request,
            searchShardTarget,
            indexShard,
            percolateIndexService,
            cacheRecycler,
            pageCacheRecycler,
            bigArrays,
            scriptService);
    try {
      ParsedDocument parsedDocument = parseRequest(percolateIndexService, request, context);
      if (context.percolateQueries().isEmpty()) {
        return new PercolateShardResponse(context, request.index(), request.shardId());
      }

      if (request.docSource() != null && request.docSource().length() != 0) {
        parsedDocument =
            parseFetchedDoc(
                context, request.docSource(), percolateIndexService, request.documentType());
      } else if (parsedDocument == null) {
        throw new ElasticsearchIllegalArgumentException("Nothing to percolate");
      }

      if (context.percolateQuery() == null
          && (context.trackScores()
              || context.doSort
              || context.facets() != null
              || context.aggregations() != null)) {
        context.percolateQuery(new MatchAllDocsQuery());
      }

      if (context.doSort && !context.limit) {
        throw new ElasticsearchIllegalArgumentException("Can't sort if size isn't specified");
      }

      if (context.highlight() != null && !context.limit) {
        throw new ElasticsearchIllegalArgumentException("Can't highlight if size isn't specified");
      }

      if (context.size() < 0) {
        context.size(0);
      }

      // parse the source either into one MemoryIndex, if it is a single document or index multiple
      // docs if nested
      PercolatorIndex percolatorIndex;
      if (indexShard.mapperService().documentMapper(request.documentType()).hasNestedObjects()) {
        percolatorIndex = multi;
      } else {
        percolatorIndex = single;
      }

      PercolatorType action;
      if (request.onlyCount()) {
        action = context.percolateQuery() != null ? queryCountPercolator : countPercolator;
      } else {
        if (context.doSort) {
          action = topMatchingPercolator;
        } else if (context.percolateQuery() != null) {
          action = context.trackScores() ? scoringPercolator : queryPercolator;
        } else {
          action = matchPercolator;
        }
      }
      context.percolatorTypeId = action.id();

      percolatorIndex.prepare(context, parsedDocument);

      indexShard.readAllowed();
      return action.doPercolate(request, context);
    } finally {
      context.release();
      shardPercolateService.postPercolate(System.nanoTime() - startTime);
    }
  }