예제 #1
0
        @Override
        public PercolateShardResponse doPercolate(
            PercolateShardRequest request, PercolateContext context) {
          long count = 0;
          List<BytesRef> matches = new ArrayList<>();
          List<Map<String, HighlightField>> hls = new ArrayList<>();
          Lucene.ExistsCollector collector = new Lucene.ExistsCollector();

          for (Map.Entry<HashedBytesRef, Query> entry : context.percolateQueries().entrySet()) {
            collector.reset();
            if (context.highlight() != null) {
              context.parsedQuery(
                  new ParsedQuery(entry.getValue(), ImmutableMap.<String, Filter>of()));
              context.hitContext().cache().clear();
            }
            try {
              context.docSearcher().search(entry.getValue(), collector);
            } catch (Throwable e) {
              logger.debug("[" + entry.getKey() + "] failed to execute query", e);
              throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
            }

            if (collector.exists()) {
              if (!context.limit || count < context.size()) {
                matches.add(entry.getKey().bytes);
                if (context.highlight() != null) {
                  highlightPhase.hitExecute(context, context.hitContext());
                  hls.add(context.hitContext().hit().getHighlightFields());
                }
              }
              count++;
            }
          }

          BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
          return new PercolateShardResponse(
              finalMatches, hls, count, context, request.index(), request.shardId());
        }
예제 #2
0
        @Override
        public PercolateShardResponse doPercolate(
            PercolateShardRequest request, PercolateContext context) {
          Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
          try {
            MatchAndSort matchAndSort = QueryCollector.matchAndSort(logger, context);
            queryBasedPercolating(percolatorSearcher, context, matchAndSort);
            TopDocs topDocs = matchAndSort.topDocs();
            long count = topDocs.totalHits;
            List<BytesRef> matches = new ArrayList<BytesRef>(topDocs.scoreDocs.length);
            float[] scores = new float[topDocs.scoreDocs.length];
            List<Map<String, HighlightField>> hls = null;
            if (context.highlight() != null) {
              hls = new ArrayList<Map<String, HighlightField>>(topDocs.scoreDocs.length);
            }

            final FieldMapper<?> idMapper =
                context.mapperService().smartNameFieldMapper(IdFieldMapper.NAME);
            final IndexFieldData<?> idFieldData = context.fieldData().getForField(idMapper);
            int i = 0;
            final HashedBytesRef spare = new HashedBytesRef(new BytesRef());
            for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
              int segmentIdx =
                  ReaderUtil.subIndex(scoreDoc.doc, percolatorSearcher.reader().leaves());
              AtomicReaderContext atomicReaderContext =
                  percolatorSearcher.reader().leaves().get(segmentIdx);
              BytesValues values = idFieldData.load(atomicReaderContext).getBytesValues(true);
              final int localDocId = scoreDoc.doc - atomicReaderContext.docBase;
              final int numValues = values.setDocument(localDocId);
              assert numValues == 1;
              spare.bytes = values.nextValue();
              spare.hash = values.currentValueHash();
              matches.add(values.copyShared());
              if (hls != null) {
                Query query = context.percolateQueries().get(spare);
                context.parsedQuery(new ParsedQuery(query, ImmutableMap.<String, Filter>of()));
                context.hitContext().cache().clear();
                highlightPhase.hitExecute(context, context.hitContext());
                hls.add(i, context.hitContext().hit().getHighlightFields());
              }
              scores[i++] = scoreDoc.score;
            }
            if (hls != null) {
              return new PercolateShardResponse(
                  matches.toArray(new BytesRef[matches.size()]),
                  hls,
                  count,
                  scores,
                  context,
                  request.index(),
                  request.shardId());
            } else {
              return new PercolateShardResponse(
                  matches.toArray(new BytesRef[matches.size()]),
                  count,
                  scores,
                  context,
                  request.index(),
                  request.shardId());
            }
          } catch (Throwable e) {
            logger.debug("failed to execute", e);
            throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
          } finally {
            percolatorSearcher.release();
          }
        }
예제 #3
0
  private ParsedDocument parseRequest(
      IndexService documentIndexService, PercolateShardRequest request, PercolateContext context)
      throws ElasticsearchException {
    BytesReference source = request.source();
    if (source == null || source.length() == 0) {
      return null;
    }

    // TODO: combine all feature parse elements into one map
    Map<String, ? extends SearchParseElement> hlElements = highlightPhase.parseElements();
    Map<String, ? extends SearchParseElement> facetElements = facetPhase.parseElements();
    Map<String, ? extends SearchParseElement> aggregationElements =
        aggregationPhase.parseElements();

    ParsedDocument doc = null;
    XContentParser parser = null;

    // Some queries (function_score query when for decay functions) rely on a SearchContext being
    // set:
    // We switch types because this context needs to be in the context of the percolate queries in
    // the shard and
    // not the in memory percolate doc
    String[] previousTypes = context.types();
    context.types(new String[] {TYPE_NAME});
    SearchContext.setCurrent(context);
    try {
      parser = XContentFactory.xContent(source).createParser(source);
      String currentFieldName = null;
      XContentParser.Token token;
      while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
          currentFieldName = parser.currentName();
          // we need to check the "doc" here, so the next token will be START_OBJECT which is
          // the actual document starting
          if ("doc".equals(currentFieldName)) {
            if (doc != null) {
              throw new ElasticsearchParseException("Either specify doc or get, not both");
            }

            MapperService mapperService = documentIndexService.mapperService();
            DocumentMapper docMapper =
                mapperService.documentMapperWithAutoCreate(request.documentType());
            doc = docMapper.parse(source(parser).type(request.documentType()).flyweight(true));
            // the document parsing exists the "doc" object, so we need to set the new current
            // field.
            currentFieldName = parser.currentName();
          }
        } else if (token == XContentParser.Token.START_OBJECT) {
          SearchParseElement element = hlElements.get(currentFieldName);
          if (element == null) {
            element = facetElements.get(currentFieldName);
            if (element == null) {
              element = aggregationElements.get(currentFieldName);
            }
          }

          if ("query".equals(currentFieldName)) {
            if (context.percolateQuery() != null) {
              throw new ElasticsearchParseException("Either specify query or filter, not both");
            }
            context.percolateQuery(documentIndexService.queryParserService().parse(parser).query());
          } else if ("filter".equals(currentFieldName)) {
            if (context.percolateQuery() != null) {
              throw new ElasticsearchParseException("Either specify query or filter, not both");
            }
            Filter filter =
                documentIndexService.queryParserService().parseInnerFilter(parser).filter();
            context.percolateQuery(new XConstantScoreQuery(filter));
          } else if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          } else if (element != null) {
            element.parse(parser, context);
          }
        } else if (token == XContentParser.Token.START_ARRAY) {
          if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          }
        } else if (token == null) {
          break;
        } else if (token.isValue()) {
          if ("size".equals(currentFieldName)) {
            context.size(parser.intValue());
            if (context.size() < 0) {
              throw new ElasticsearchParseException(
                  "size is set to ["
                      + context.size()
                      + "] and is expected to be higher or equal to 0");
            }
          } else if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          } else if ("track_scores".equals(currentFieldName)
              || "trackScores".equals(currentFieldName)) {
            context.trackScores(parser.booleanValue());
          }
        }
      }

      // We need to get the actual source from the request body for highlighting, so parse the
      // request body again
      // and only get the doc source.
      if (context.highlight() != null) {
        parser.close();
        currentFieldName = null;
        parser = XContentFactory.xContent(source).createParser(source);
        token = parser.nextToken();
        assert token == XContentParser.Token.START_OBJECT;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
          if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
          } else if (token == XContentParser.Token.START_OBJECT) {
            if ("doc".equals(currentFieldName)) {
              BytesStreamOutput bStream = new BytesStreamOutput();
              XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream);
              builder.copyCurrentStructure(parser);
              builder.close();
              doc.setSource(bStream.bytes());
              break;
            } else {
              parser.skipChildren();
            }
          } else if (token == null) {
            break;
          }
        }
      }

    } catch (Throwable e) {
      throw new ElasticsearchParseException("failed to parse request", e);
    } finally {
      context.types(previousTypes);
      SearchContext.removeCurrent();
      if (parser != null) {
        parser.close();
      }
    }

    return doc;
  }