private void queryBasedPercolating( Engine.Searcher percolatorSearcher, PercolateContext context, QueryCollector percolateCollector) throws IOException { Filter percolatorTypeFilter = context.indexService().mapperService().documentMapper(TYPE_NAME).typeFilter(); percolatorTypeFilter = context.indexService().cache().filter().cache(percolatorTypeFilter); FilteredQuery query = new FilteredQuery(context.percolateQuery(), percolatorTypeFilter); percolatorSearcher.searcher().search(query, percolateCollector); for (Collector queryCollector : percolateCollector.facetAndAggregatorCollector) { if (queryCollector instanceof XCollector) { ((XCollector) queryCollector).postCollection(); } } if (context.facets() != null) { facetPhase.execute(context); } if (context.aggregations() != null) { aggregationPhase.execute(context); } }
private ParsedDocument parseRequest( IndexService documentIndexService, PercolateShardRequest request, PercolateContext context) throws ElasticsearchException { BytesReference source = request.source(); if (source == null || source.length() == 0) { return null; } // TODO: combine all feature parse elements into one map Map<String, ? extends SearchParseElement> hlElements = highlightPhase.parseElements(); Map<String, ? extends SearchParseElement> facetElements = facetPhase.parseElements(); Map<String, ? extends SearchParseElement> aggregationElements = aggregationPhase.parseElements(); ParsedDocument doc = null; XContentParser parser = null; // Some queries (function_score query when for decay functions) rely on a SearchContext being // set: // We switch types because this context needs to be in the context of the percolate queries in // the shard and // not the in memory percolate doc String[] previousTypes = context.types(); context.types(new String[] {TYPE_NAME}); SearchContext.setCurrent(context); try { parser = XContentFactory.xContent(source).createParser(source); String currentFieldName = null; XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); // we need to check the "doc" here, so the next token will be START_OBJECT which is // the actual document starting if ("doc".equals(currentFieldName)) { if (doc != null) { throw new ElasticsearchParseException("Either specify doc or get, not both"); } MapperService mapperService = documentIndexService.mapperService(); DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(request.documentType()); doc = docMapper.parse(source(parser).type(request.documentType()).flyweight(true)); // the document parsing exists the "doc" object, so we need to set the new current // field. currentFieldName = parser.currentName(); } } else if (token == XContentParser.Token.START_OBJECT) { SearchParseElement element = hlElements.get(currentFieldName); if (element == null) { element = facetElements.get(currentFieldName); if (element == null) { element = aggregationElements.get(currentFieldName); } } if ("query".equals(currentFieldName)) { if (context.percolateQuery() != null) { throw new ElasticsearchParseException("Either specify query or filter, not both"); } context.percolateQuery(documentIndexService.queryParserService().parse(parser).query()); } else if ("filter".equals(currentFieldName)) { if (context.percolateQuery() != null) { throw new ElasticsearchParseException("Either specify query or filter, not both"); } Filter filter = documentIndexService.queryParserService().parseInnerFilter(parser).filter(); context.percolateQuery(new XConstantScoreQuery(filter)); } else if ("sort".equals(currentFieldName)) { parseSort(parser, context); } else if (element != null) { element.parse(parser, context); } } else if (token == XContentParser.Token.START_ARRAY) { if ("sort".equals(currentFieldName)) { parseSort(parser, context); } } else if (token == null) { break; } else if (token.isValue()) { if ("size".equals(currentFieldName)) { context.size(parser.intValue()); if (context.size() < 0) { throw new ElasticsearchParseException( "size is set to [" + context.size() + "] and is expected to be higher or equal to 0"); } } else if ("sort".equals(currentFieldName)) { parseSort(parser, context); } else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) { context.trackScores(parser.booleanValue()); } } } // We need to get the actual source from the request body for highlighting, so parse the // request body again // and only get the doc source. if (context.highlight() != null) { parser.close(); currentFieldName = null; parser = XContentFactory.xContent(source).createParser(source); token = parser.nextToken(); assert token == XContentParser.Token.START_OBJECT; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { if ("doc".equals(currentFieldName)) { BytesStreamOutput bStream = new BytesStreamOutput(); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream); builder.copyCurrentStructure(parser); builder.close(); doc.setSource(bStream.bytes()); break; } else { parser.skipChildren(); } } else if (token == null) { break; } } } } catch (Throwable e) { throw new ElasticsearchParseException("failed to parse request", e); } finally { context.types(previousTypes); SearchContext.removeCurrent(); if (parser != null) { parser.close(); } } return doc; }