@Override
 public PhraseTooLargeActionModule parse(QueryParseContext parseContext) throws IOException {
   PhraseTooLargeActionModule module = new PhraseTooLargeActionModule();
   XContentParser parser = parseContext.parser();
   String currentFieldName = null;
   XContentParser.Token token;
   while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
     if (token == XContentParser.Token.FIELD_NAME) {
       currentFieldName = parser.currentName();
     } else if (token.isValue()) {
       switch (currentFieldName) {
         case "max_terms_per_query":
         case "maxTermsPerQuery":
           module.maxTermsPerQuery(parser.intValue());
           break;
         case "max_terms_in_all_queries":
         case "maxTermsInAllQueries":
           module.maxTermsInAllQueries(parser.intValue());
           break;
         case "phrase_too_large_action":
         case "phraseTooLargeAction":
           module.phraseTooLargeAction(PhraseTooLargeAction.parse(parser.text()));
           break;
         default:
           throw new QueryParsingException(
               parseContext.index(),
               "[safer][phrase] query does not support the field [" + currentFieldName + "]");
       }
     } else {
       throw new QueryParsingException(
           parseContext.index(), "[safer][phrase] only supports values, not objects.");
     }
   }
   return module;
 }
 private void parseCandidateGenerator(
     XContentParser parser,
     SearchContext context,
     String fieldName,
     PhraseSuggestionContext.DirectCandidateGenerator generator)
     throws IOException {
   if (!SuggestUtils.parseDirectSpellcheckerSettings(parser, fieldName, generator)) {
     if ("field".equals(fieldName)) {
       generator.setField(parser.text());
     } else if ("size".equals(fieldName)) {
       generator.size(parser.intValue());
     } else if ("pre_filter".equals(fieldName) || "preFilter".equals(fieldName)) {
       String analyzerName = parser.text();
       Analyzer analyzer = context.mapperService().analysisService().analyzer(analyzerName);
       if (analyzer == null) {
         throw new ElasticSearchIllegalArgumentException(
             "Analyzer [" + analyzerName + "] doesn't exists");
       }
       generator.preFilter(analyzer);
     } else if ("post_filter".equals(fieldName) || "postFilter".equals(fieldName)) {
       String analyzerName = parser.text();
       Analyzer analyzer = context.mapperService().analysisService().analyzer(analyzerName);
       if (analyzer == null) {
         throw new ElasticSearchIllegalArgumentException(
             "Analyzer [" + analyzerName + "] doesn't exists");
       }
       generator.postFilter(analyzer);
     } else {
       throw new ElasticSearchIllegalArgumentException(
           "CandidateGenerator doesn't support [" + fieldName + "]");
     }
   }
 }
 private static Object readValue(
     XContentParser parser, MapFactory mapFactory, XContentParser.Token t) throws IOException {
   if (t == XContentParser.Token.VALUE_NULL) {
     return null;
   } else if (t == XContentParser.Token.VALUE_STRING) {
     return parser.text();
   } else if (t == XContentParser.Token.VALUE_NUMBER) {
     XContentParser.NumberType numberType = parser.numberType();
     if (numberType == XContentParser.NumberType.INT) {
       return parser.intValue();
     } else if (numberType == XContentParser.NumberType.LONG) {
       return parser.longValue();
     } else if (numberType == XContentParser.NumberType.FLOAT) {
       return parser.floatValue();
     } else if (numberType == XContentParser.NumberType.DOUBLE) {
       return parser.doubleValue();
     }
   } else if (t == XContentParser.Token.VALUE_BOOLEAN) {
     return parser.booleanValue();
   } else if (t == XContentParser.Token.START_OBJECT) {
     return readMap(parser, mapFactory);
   } else if (t == XContentParser.Token.START_ARRAY) {
     return readList(parser, mapFactory, t);
   }
   return null;
 }
Пример #4
0
 public static IncludeExclude parseInclude(XContentParser parser, QueryParseContext context)
     throws IOException {
   XContentParser.Token token = parser.currentToken();
   if (token == XContentParser.Token.VALUE_STRING) {
     return new IncludeExclude(parser.text(), null);
   } else if (token == XContentParser.Token.START_ARRAY) {
     return new IncludeExclude(new TreeSet<>(parseArrayToSet(parser)), null);
   } else if (token == XContentParser.Token.START_OBJECT) {
     ParseFieldMatcher parseFieldMatcher = context.getParseFieldMatcher();
     String currentFieldName = null;
     Integer partition = null, numPartitions = null;
     while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
       if (token == XContentParser.Token.FIELD_NAME) {
         currentFieldName = parser.currentName();
       } else
       // This "include":{"pattern":"foo.*"} syntax is undocumented since 2.0
       // Regexes should be "include":"foo.*"
       if (parseFieldMatcher.match(currentFieldName, PATTERN_FIELD)) {
         return new IncludeExclude(parser.text(), null);
       } else if (parseFieldMatcher.match(currentFieldName, NUM_PARTITIONS_FIELD)) {
         numPartitions = parser.intValue();
       } else if (parseFieldMatcher.match(currentFieldName, PARTITION_FIELD)) {
         partition = parser.intValue();
       } else {
         throw new ElasticsearchParseException(
             "Unknown parameter in Include/Exclude clause: " + currentFieldName);
       }
     }
     if (partition == null) {
       throw new IllegalArgumentException(
           "Missing ["
               + PARTITION_FIELD.getPreferredName()
               + "] parameter for partition-based include");
     }
     if (numPartitions == null) {
       throw new IllegalArgumentException(
           "Missing ["
               + NUM_PARTITIONS_FIELD.getPreferredName()
               + "] parameter for partition-based include");
     }
     return new IncludeExclude(partition, numPartitions);
   } else {
     throw new IllegalArgumentException("Unrecognized token for an include [" + token + "]");
   }
 }
    public static IndexTemplateMetaData fromXContent(XContentParser parser) throws IOException {
      Builder builder = new Builder(parser.currentName());

      String currentFieldName = null;
      XContentParser.Token token = parser.nextToken();
      while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
          currentFieldName = parser.currentName();
        } else if (token == XContentParser.Token.START_OBJECT) {
          if ("settings".equals(currentFieldName)) {
            ImmutableSettings.Builder settingsBuilder = ImmutableSettings.settingsBuilder();
            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
              String key = parser.currentName();
              token = parser.nextToken();
              String value = parser.text();
              settingsBuilder.put(key, value);
            }
            builder.settings(settingsBuilder.build());
          } else if ("mappings".equals(currentFieldName)) {
            while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
              Map<String, Object> mapping = parser.map();
              if (mapping.size() == 1) {
                String mappingType = mapping.keySet().iterator().next();
                String mappingSource = XContentFactory.jsonBuilder().map(mapping).string();

                if (mappingSource == null) {
                  // crap, no mapping source, warn?
                } else {
                  builder.putMapping(mappingType, mappingSource);
                }
              }
            }
          }
        } else if (token.isValue()) {
          if ("template".equals(currentFieldName)) {
            builder.template(parser.text());
          } else if ("order".equals(currentFieldName)) {
            builder.order(parser.intValue());
          }
        }
      }
      return builder.build();
    }
    @Override
    public CancelAllocationCommand fromXContent(XContentParser parser) throws IOException {
      String index = null;
      int shardId = -1;
      String nodeId = null;
      boolean allowPrimary = false;

      String currentFieldName = null;
      XContentParser.Token token;
      while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
          currentFieldName = parser.currentName();
        } else if (token.isValue()) {
          if ("index".equals(currentFieldName)) {
            index = parser.text();
          } else if ("shard".equals(currentFieldName)) {
            shardId = parser.intValue();
          } else if ("node".equals(currentFieldName)) {
            nodeId = parser.text();
          } else if ("allow_primary".equals(currentFieldName)
              || "allowPrimary".equals(currentFieldName)) {
            allowPrimary = parser.booleanValue();
          } else {
            throw new ElasticsearchParseException(
                "[{}] command does not support field [{}]", NAME, currentFieldName);
          }
        } else {
          throw new ElasticsearchParseException(
              "[{}] command does not support complex json tokens [{}]", NAME, token);
        }
      }
      if (index == null) {
        throw new ElasticsearchParseException("[{}] command missing the index parameter", NAME);
      }
      if (shardId == -1) {
        throw new ElasticsearchParseException("[{}] command missing the shard parameter", NAME);
      }
      if (nodeId == null) {
        throw new ElasticsearchParseException("[{}] command missing the node parameter", NAME);
      }
      return new CancelAllocationCommand(new ShardId(index, shardId), nodeId, allowPrimary);
    }
Пример #7
0
  private ParsedDocument parseRequest(
      IndexService documentIndexService, PercolateShardRequest request, PercolateContext context)
      throws ElasticsearchException {
    BytesReference source = request.source();
    if (source == null || source.length() == 0) {
      return null;
    }

    // TODO: combine all feature parse elements into one map
    Map<String, ? extends SearchParseElement> hlElements = highlightPhase.parseElements();
    Map<String, ? extends SearchParseElement> facetElements = facetPhase.parseElements();
    Map<String, ? extends SearchParseElement> aggregationElements =
        aggregationPhase.parseElements();

    ParsedDocument doc = null;
    XContentParser parser = null;

    // Some queries (function_score query when for decay functions) rely on a SearchContext being
    // set:
    // We switch types because this context needs to be in the context of the percolate queries in
    // the shard and
    // not the in memory percolate doc
    String[] previousTypes = context.types();
    context.types(new String[] {TYPE_NAME});
    SearchContext.setCurrent(context);
    try {
      parser = XContentFactory.xContent(source).createParser(source);
      String currentFieldName = null;
      XContentParser.Token token;
      while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
          currentFieldName = parser.currentName();
          // we need to check the "doc" here, so the next token will be START_OBJECT which is
          // the actual document starting
          if ("doc".equals(currentFieldName)) {
            if (doc != null) {
              throw new ElasticsearchParseException("Either specify doc or get, not both");
            }

            MapperService mapperService = documentIndexService.mapperService();
            DocumentMapper docMapper =
                mapperService.documentMapperWithAutoCreate(request.documentType());
            doc = docMapper.parse(source(parser).type(request.documentType()).flyweight(true));
            // the document parsing exists the "doc" object, so we need to set the new current
            // field.
            currentFieldName = parser.currentName();
          }
        } else if (token == XContentParser.Token.START_OBJECT) {
          SearchParseElement element = hlElements.get(currentFieldName);
          if (element == null) {
            element = facetElements.get(currentFieldName);
            if (element == null) {
              element = aggregationElements.get(currentFieldName);
            }
          }

          if ("query".equals(currentFieldName)) {
            if (context.percolateQuery() != null) {
              throw new ElasticsearchParseException("Either specify query or filter, not both");
            }
            context.percolateQuery(documentIndexService.queryParserService().parse(parser).query());
          } else if ("filter".equals(currentFieldName)) {
            if (context.percolateQuery() != null) {
              throw new ElasticsearchParseException("Either specify query or filter, not both");
            }
            Filter filter =
                documentIndexService.queryParserService().parseInnerFilter(parser).filter();
            context.percolateQuery(new XConstantScoreQuery(filter));
          } else if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          } else if (element != null) {
            element.parse(parser, context);
          }
        } else if (token == XContentParser.Token.START_ARRAY) {
          if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          }
        } else if (token == null) {
          break;
        } else if (token.isValue()) {
          if ("size".equals(currentFieldName)) {
            context.size(parser.intValue());
            if (context.size() < 0) {
              throw new ElasticsearchParseException(
                  "size is set to ["
                      + context.size()
                      + "] and is expected to be higher or equal to 0");
            }
          } else if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          } else if ("track_scores".equals(currentFieldName)
              || "trackScores".equals(currentFieldName)) {
            context.trackScores(parser.booleanValue());
          }
        }
      }

      // We need to get the actual source from the request body for highlighting, so parse the
      // request body again
      // and only get the doc source.
      if (context.highlight() != null) {
        parser.close();
        currentFieldName = null;
        parser = XContentFactory.xContent(source).createParser(source);
        token = parser.nextToken();
        assert token == XContentParser.Token.START_OBJECT;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
          if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
          } else if (token == XContentParser.Token.START_OBJECT) {
            if ("doc".equals(currentFieldName)) {
              BytesStreamOutput bStream = new BytesStreamOutput();
              XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream);
              builder.copyCurrentStructure(parser);
              builder.close();
              doc.setSource(bStream.bytes());
              break;
            } else {
              parser.skipChildren();
            }
          } else if (token == null) {
            break;
          }
        }
      }

    } catch (Throwable e) {
      throw new ElasticsearchParseException("failed to parse request", e);
    } finally {
      context.types(previousTypes);
      SearchContext.removeCurrent();
      if (parser != null) {
        parser.close();
      }
    }

    return doc;
  }
  public static SpanNearQueryBuilder fromXContent(QueryParseContext parseContext)
      throws IOException {
    XContentParser parser = parseContext.parser();

    float boost = AbstractQueryBuilder.DEFAULT_BOOST;
    Integer slop = null;
    boolean inOrder = SpanNearQueryBuilder.DEFAULT_IN_ORDER;
    String queryName = null;

    List<SpanQueryBuilder> clauses = new ArrayList<>();

    String currentFieldName = null;
    XContentParser.Token token;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
      if (token == XContentParser.Token.FIELD_NAME) {
        currentFieldName = parser.currentName();
      } else if (token == XContentParser.Token.START_ARRAY) {
        if (parseContext.getParseFieldMatcher().match(currentFieldName, CLAUSES_FIELD)) {
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            QueryBuilder query = parseContext.parseInnerQueryBuilder();
            if (!(query instanceof SpanQueryBuilder)) {
              throw new ParsingException(
                  parser.getTokenLocation(), "spanNear [clauses] must be of type span query");
            }
            clauses.add((SpanQueryBuilder) query);
          }
        } else {
          throw new ParsingException(
              parser.getTokenLocation(),
              "[span_near] query does not support [" + currentFieldName + "]");
        }
      } else if (token.isValue()) {
        if (parseContext.getParseFieldMatcher().match(currentFieldName, IN_ORDER_FIELD)) {
          inOrder = parser.booleanValue();
        } else if (parseContext
            .getParseFieldMatcher()
            .match(currentFieldName, COLLECT_PAYLOADS_FIELD)) {
          // Deprecated in 3.0.0
        } else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) {
          slop = parser.intValue();
        } else if (parseContext
            .getParseFieldMatcher()
            .match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
          boost = parser.floatValue();
        } else if (parseContext
            .getParseFieldMatcher()
            .match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
          queryName = parser.text();
        } else {
          throw new ParsingException(
              parser.getTokenLocation(),
              "[span_near] query does not support [" + currentFieldName + "]");
        }
      } else {
        throw new ParsingException(
            parser.getTokenLocation(),
            "[span_near] query does not support [" + currentFieldName + "]");
      }
    }

    if (clauses.isEmpty()) {
      throw new ParsingException(parser.getTokenLocation(), "span_near must include [clauses]");
    }

    if (slop == null) {
      throw new ParsingException(parser.getTokenLocation(), "span_near must include [slop]");
    }

    SpanNearQueryBuilder queryBuilder = new SpanNearQueryBuilder(clauses.get(0), slop);
    for (int i = 1; i < clauses.size(); i++) {
      queryBuilder.clause(clauses.get(i));
    }
    queryBuilder.inOrder(inOrder);
    queryBuilder.boost(boost);
    queryBuilder.queryName(queryName);
    return queryBuilder;
  }
  @Override
  public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
    XContentParser parser = parseContext.parser();

    MoreLikeThisQuery mltQuery = new MoreLikeThisQuery();
    mltQuery.setSimilarity(parseContext.searchSimilarity());
    Analyzer analyzer = null;
    List<String> moreLikeFields = null;
    boolean failOnUnsupportedField = true;
    String queryName = null;
    boolean include = false;

    XContentParser.Token token;
    String currentFieldName = null;

    List<String> likeTexts = new ArrayList<>();
    MultiTermVectorsRequest likeItems = new MultiTermVectorsRequest();

    List<String> unlikeTexts = new ArrayList<>();
    MultiTermVectorsRequest unlikeItems = new MultiTermVectorsRequest();

    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
      if (token == XContentParser.Token.FIELD_NAME) {
        currentFieldName = parser.currentName();
      } else if (token.isValue()) {
        if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.LIKE_TEXT)) {
          likeTexts.add(parser.text());
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.LIKE)) {
          parseLikeField(parser, likeTexts, likeItems);
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.UNLIKE)) {
          parseLikeField(parser, unlikeTexts, unlikeItems);
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.MIN_TERM_FREQ)) {
          mltQuery.setMinTermFrequency(parser.intValue());
        } else if (parseContext
            .parseFieldMatcher()
            .match(currentFieldName, Fields.MAX_QUERY_TERMS)) {
          mltQuery.setMaxQueryTerms(parser.intValue());
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.MIN_DOC_FREQ)) {
          mltQuery.setMinDocFreq(parser.intValue());
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.MAX_DOC_FREQ)) {
          mltQuery.setMaxDocFreq(parser.intValue());
        } else if (parseContext
            .parseFieldMatcher()
            .match(currentFieldName, Fields.MIN_WORD_LENGTH)) {
          mltQuery.setMinWordLen(parser.intValue());
        } else if (parseContext
            .parseFieldMatcher()
            .match(currentFieldName, Fields.MAX_WORD_LENGTH)) {
          mltQuery.setMaxWordLen(parser.intValue());
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.BOOST_TERMS)) {
          float boostFactor = parser.floatValue();
          if (boostFactor != 0) {
            mltQuery.setBoostTerms(true);
            mltQuery.setBoostTermsFactor(boostFactor);
          }
        } else if (parseContext
            .parseFieldMatcher()
            .match(currentFieldName, Fields.MINIMUM_SHOULD_MATCH)) {
          mltQuery.setMinimumShouldMatch(parser.text());
        } else if ("analyzer".equals(currentFieldName)) {
          analyzer = parseContext.analysisService().analyzer(parser.text());
        } else if ("boost".equals(currentFieldName)) {
          mltQuery.setBoost(parser.floatValue());
        } else if (parseContext
            .parseFieldMatcher()
            .match(currentFieldName, Fields.FAIL_ON_UNSUPPORTED_FIELD)) {
          failOnUnsupportedField = parser.booleanValue();
        } else if ("_name".equals(currentFieldName)) {
          queryName = parser.text();
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.INCLUDE)) {
          include = parser.booleanValue();
        } else {
          throw new QueryParsingException(
              parseContext, "[mlt] query does not support [" + currentFieldName + "]");
        }
      } else if (token == XContentParser.Token.START_ARRAY) {
        if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.STOP_WORDS)) {
          Set<String> stopWords = Sets.newHashSet();
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            stopWords.add(parser.text());
          }
          mltQuery.setStopWords(stopWords);
        } else if ("fields".equals(currentFieldName)) {
          moreLikeFields = new LinkedList<>();
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            String field = parser.text();
            MappedFieldType fieldType = parseContext.fieldMapper(field);
            moreLikeFields.add(fieldType == null ? field : fieldType.names().indexName());
          }
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.DOCUMENT_IDS)) {
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            if (!token.isValue()) {
              throw new IllegalArgumentException("ids array element should only contain ids");
            }
            likeItems.add(newTermVectorsRequest().id(parser.text()));
          }
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.DOCUMENTS)) {
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            if (token != XContentParser.Token.START_OBJECT) {
              throw new IllegalArgumentException("docs array element should include an object");
            }
            likeItems.add(parseDocument(parser));
          }
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.LIKE)) {
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            parseLikeField(parser, likeTexts, likeItems);
          }
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.UNLIKE)) {
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            parseLikeField(parser, unlikeTexts, unlikeItems);
          }
        } else {
          throw new QueryParsingException(
              parseContext, "[mlt] query does not support [" + currentFieldName + "]");
        }
      } else if (token == XContentParser.Token.START_OBJECT) {
        if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.LIKE)) {
          parseLikeField(parser, likeTexts, likeItems);
        } else if (parseContext.parseFieldMatcher().match(currentFieldName, Fields.UNLIKE)) {
          parseLikeField(parser, unlikeTexts, unlikeItems);
        } else {
          throw new QueryParsingException(
              parseContext, "[mlt] query does not support [" + currentFieldName + "]");
        }
      }
    }

    if (likeTexts.isEmpty() && likeItems.isEmpty()) {
      throw new QueryParsingException(
          parseContext, "more_like_this requires 'like' to be specified");
    }
    if (moreLikeFields != null && moreLikeFields.isEmpty()) {
      throw new QueryParsingException(
          parseContext, "more_like_this requires 'fields' to be non-empty");
    }

    // set analyzer
    if (analyzer == null) {
      analyzer = parseContext.mapperService().searchAnalyzer();
    }
    mltQuery.setAnalyzer(analyzer);

    // set like text fields
    boolean useDefaultField = (moreLikeFields == null);
    if (useDefaultField) {
      moreLikeFields = Collections.singletonList(parseContext.defaultField());
    }
    // possibly remove unsupported fields
    removeUnsupportedFields(moreLikeFields, analyzer, failOnUnsupportedField);
    if (moreLikeFields.isEmpty()) {
      return null;
    }
    mltQuery.setMoreLikeFields(moreLikeFields.toArray(Strings.EMPTY_ARRAY));

    // support for named query
    if (queryName != null) {
      parseContext.addNamedQuery(queryName, mltQuery);
    }

    // handle like texts
    if (!likeTexts.isEmpty()) {
      mltQuery.setLikeText(likeTexts);
    }
    if (!unlikeTexts.isEmpty()) {
      mltQuery.setIgnoreText(unlikeTexts);
    }

    // handle items
    if (!likeItems.isEmpty()) {
      // set default index, type and fields if not specified
      MultiTermVectorsRequest items = likeItems;
      for (TermVectorsRequest item : unlikeItems) {
        items.add(item);
      }

      for (TermVectorsRequest item : items) {
        if (item.index() == null) {
          item.index(parseContext.index().name());
        }
        if (item.type() == null) {
          if (parseContext.queryTypes().size() > 1) {
            throw new QueryParsingException(
                parseContext,
                "ambiguous type for item with id: " + item.id() + " and index: " + item.index());
          } else {
            item.type(parseContext.queryTypes().iterator().next());
          }
        }
        // default fields if not present but don't override for artificial docs
        if (item.selectedFields() == null && item.doc() == null) {
          if (useDefaultField) {
            item.selectedFields("*");
          } else {
            item.selectedFields(moreLikeFields.toArray(new String[moreLikeFields.size()]));
          }
        }
      }
      // fetching the items with multi-termvectors API
      items.copyContextAndHeadersFrom(SearchContext.current());
      MultiTermVectorsResponse responses = fetchService.fetchResponse(items);

      // getting the Fields for liked items
      mltQuery.setLikeText(MoreLikeThisFetchService.getFields(responses, likeItems));

      // getting the Fields for ignored items
      if (!unlikeItems.isEmpty()) {
        org.apache.lucene.index.Fields[] ignoreFields =
            MoreLikeThisFetchService.getFields(responses, unlikeItems);
        if (ignoreFields.length > 0) {
          mltQuery.setUnlikeText(ignoreFields);
        }
      }

      BooleanQuery.Builder boolQuery = new BooleanQuery.Builder();
      boolQuery.add(mltQuery, BooleanClause.Occur.SHOULD);

      // exclude the items from the search
      if (!include) {
        handleExclude(boolQuery, likeItems);
      }
      return boolQuery.build();
    }

    return mltQuery;
  }
Пример #10
0
  @Override
  public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context)
      throws IOException {
    String field = null;
    int size = 10;
    int shardSize = -1;

    String[] fieldsNames = null;
    ImmutableSet<BytesRef> excluded = ImmutableSet.of();
    String regex = null;
    String regexFlags = null;
    TermsFacet.ComparatorType comparatorType = TermsFacet.ComparatorType.COUNT;
    String scriptLang = null;
    String script = null;
    ScriptService.ScriptType scriptType = null;
    Map<String, Object> params = null;
    boolean allTerms = false;
    String executionHint = null;

    String currentFieldName = null;
    XContentParser.Token token;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
      if (token == XContentParser.Token.FIELD_NAME) {
        currentFieldName = parser.currentName();
      } else if (token == XContentParser.Token.START_OBJECT) {
        if ("params".equals(currentFieldName)) {
          params = parser.map();
        } else {
          throw new ElasticsearchParseException(
              "unknown parameter ["
                  + currentFieldName
                  + "] while parsing terms facet ["
                  + facetName
                  + "]");
        }
      } else if (token == XContentParser.Token.START_ARRAY) {
        if ("exclude".equals(currentFieldName)) {
          ImmutableSet.Builder<BytesRef> builder = ImmutableSet.builder();
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            builder.add(parser.bytes());
          }
          excluded = builder.build();
        } else if ("fields".equals(currentFieldName)) {
          List<String> fields = Lists.newArrayListWithCapacity(4);
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            fields.add(parser.text());
          }
          fieldsNames = fields.toArray(new String[fields.size()]);
        } else {
          throw new ElasticsearchParseException(
              "unknown parameter ["
                  + currentFieldName
                  + "] while parsing terms facet ["
                  + facetName
                  + "]");
        }
      } else if (token.isValue()) {
        if ("field".equals(currentFieldName)) {
          field = parser.text();
        } else if (ScriptService.SCRIPT_INLINE.match(currentFieldName)) {
          script = parser.text();
          scriptType = ScriptService.ScriptType.INLINE;
        } else if (ScriptService.SCRIPT_ID.match(currentFieldName)) {
          script = parser.text();
          scriptType = ScriptService.ScriptType.INDEXED;
        } else if (ScriptService.SCRIPT_FILE.match(currentFieldName)) {
          script = parser.text();
          scriptType = ScriptService.ScriptType.FILE;
        } else if (ScriptService.SCRIPT_LANG.match(currentFieldName)) {
          scriptLang = parser.text();
        } else if ("size".equals(currentFieldName)) {
          size = parser.intValue();
        } else if ("shard_size".equals(currentFieldName) || "shardSize".equals(currentFieldName)) {
          shardSize = parser.intValue();
        } else if ("all_terms".equals(currentFieldName) || "allTerms".equals(currentFieldName)) {
          allTerms = parser.booleanValue();
        } else if ("regex".equals(currentFieldName)) {
          regex = parser.text();
        } else if ("regex_flags".equals(currentFieldName)
            || "regexFlags".equals(currentFieldName)) {
          regexFlags = parser.text();
        } else if ("order".equals(currentFieldName) || "comparator".equals(currentFieldName)) {
          comparatorType = TermsFacet.ComparatorType.fromString(parser.text());
        } else if ("execution_hint".equals(currentFieldName)
            || "executionHint".equals(currentFieldName)) {
          executionHint = parser.textOrNull();
        } else {
          throw new ElasticsearchParseException(
              "unknown parameter ["
                  + currentFieldName
                  + "] while parsing terms facet ["
                  + facetName
                  + "]");
        }
      }
    }

    if (fieldsNames != null && fieldsNames.length == 1) {
      field = fieldsNames[0];
      fieldsNames = null;
    }

    Pattern pattern = null;
    if (regex != null) {
      pattern = Regex.compile(regex, regexFlags);
    }

    SearchScript searchScript = null;
    if (script != null) {
      searchScript =
          context.scriptService().search(context.lookup(), scriptLang, script, scriptType, params);
    }

    // shard_size cannot be smaller than size as we need to at least fetch <size> entries from every
    // shards in order to return <size>
    if (shardSize < size) {
      shardSize = size;
    }

    if (fieldsNames != null) {

      // in case of multi files, we only collect the fields that are mapped and facet on them.
      ArrayList<FieldMapper> mappers = new ArrayList<>(fieldsNames.length);
      for (int i = 0; i < fieldsNames.length; i++) {
        FieldMapper mapper = context.smartNameFieldMapper(fieldsNames[i]);
        if (mapper != null) {
          mappers.add(mapper);
        }
      }
      if (mappers.isEmpty()) {
        // non of the fields is mapped
        return new UnmappedFieldExecutor(size, comparatorType);
      }
      return new FieldsTermsStringFacetExecutor(
          mappers.toArray(new FieldMapper[mappers.size()]),
          size,
          shardSize,
          comparatorType,
          allTerms,
          context,
          excluded,
          pattern,
          searchScript);
    }
    if (field == null && script != null) {
      return new ScriptTermsStringFieldFacetExecutor(
          size,
          shardSize,
          comparatorType,
          context,
          excluded,
          pattern,
          scriptLang,
          script,
          scriptType,
          params,
          context.cacheRecycler());
    }

    if (field == null) {
      throw new ElasticsearchParseException(
          "terms facet [" + facetName + "] must have a field, fields or script parameter");
    }

    FieldMapper fieldMapper = context.smartNameFieldMapper(field);
    if (fieldMapper == null) {
      return new UnmappedFieldExecutor(size, comparatorType);
    }

    IndexFieldData indexFieldData = context.fieldData().getForField(fieldMapper);
    if (indexFieldData instanceof IndexNumericFieldData) {
      IndexNumericFieldData indexNumericFieldData = (IndexNumericFieldData) indexFieldData;
      if (indexNumericFieldData.getNumericType().isFloatingPoint()) {
        return new TermsDoubleFacetExecutor(
            indexNumericFieldData,
            size,
            shardSize,
            comparatorType,
            allTerms,
            context,
            excluded,
            searchScript,
            context.cacheRecycler());
      } else {
        return new TermsLongFacetExecutor(
            indexNumericFieldData,
            size,
            shardSize,
            comparatorType,
            allTerms,
            context,
            excluded,
            searchScript,
            context.cacheRecycler());
      }
    } else {
      if (script != null || "map".equals(executionHint)) {
        return new TermsStringFacetExecutor(
            indexFieldData,
            size,
            shardSize,
            comparatorType,
            allTerms,
            context,
            excluded,
            pattern,
            searchScript);
      } else if (indexFieldData instanceof IndexOrdinalsFieldData) {
        return new TermsStringOrdinalsFacetExecutor(
            (IndexOrdinalsFieldData) indexFieldData,
            size,
            shardSize,
            comparatorType,
            allTerms,
            context,
            excluded,
            pattern,
            ordinalsCacheAbove);
      } else {
        return new TermsStringFacetExecutor(
            indexFieldData,
            size,
            shardSize,
            comparatorType,
            allTerms,
            context,
            excluded,
            pattern,
            searchScript);
      }
    }
  }
 @Override
 protected void innerParseCreateField(ParseContext context, List<Field> fields)
     throws IOException {
   int value;
   float boost = this.boost;
   if (context.externalValueSet()) {
     Object externalValue = context.externalValue();
     if (externalValue == null) {
       if (nullValue == null) {
         return;
       }
       value = nullValue;
     } else if (externalValue instanceof String) {
       String sExternalValue = (String) externalValue;
       if (sExternalValue.length() == 0) {
         if (nullValue == null) {
           return;
         }
         value = nullValue;
       } else {
         value = Integer.parseInt(sExternalValue);
       }
     } else {
       value = ((Number) externalValue).intValue();
     }
     if (context.includeInAll(includeInAll, this)) {
       context.allEntries().addText(names.fullName(), Integer.toString(value), boost);
     }
   } else {
     XContentParser parser = context.parser();
     if (parser.currentToken() == XContentParser.Token.VALUE_NULL
         || (parser.currentToken() == XContentParser.Token.VALUE_STRING
             && parser.textLength() == 0)) {
       if (nullValue == null) {
         return;
       }
       value = nullValue;
       if (nullValueAsString != null && (context.includeInAll(includeInAll, this))) {
         context.allEntries().addText(names.fullName(), nullValueAsString, boost);
       }
     } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
       XContentParser.Token token;
       String currentFieldName = null;
       Integer objValue = nullValue;
       while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
         if (token == XContentParser.Token.FIELD_NAME) {
           currentFieldName = parser.currentName();
         } else {
           if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
             if (parser.currentToken() != XContentParser.Token.VALUE_NULL) {
               objValue = parser.intValue(coerce.value());
             }
           } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) {
             boost = parser.floatValue();
           } else {
             throw new ElasticsearchIllegalArgumentException(
                 "unknown property [" + currentFieldName + "]");
           }
         }
       }
       if (objValue == null) {
         // no value
         return;
       }
       value = objValue;
     } else {
       value = parser.intValue(coerce.value());
       if (context.includeInAll(includeInAll, this)) {
         context.allEntries().addText(names.fullName(), parser.text(), boost);
       }
     }
   }
   addIntegerFields(context, fields, value, boost);
 }
  @Override
  public AggregatorFactory parse(
      String aggregationName, XContentParser parser, SearchContext context) throws IOException {

    ValuesSourceParser vsParser =
        ValuesSourceParser.any(aggregationName, SignificantStringTerms.TYPE, context)
            .scriptable(false)
            .formattable(true)
            .requiresSortedValues(true)
            .requiresUniqueValues(true)
            .build();

    IncludeExclude.Parser incExcParser =
        new IncludeExclude.Parser(aggregationName, SignificantStringTerms.TYPE, context);

    Filter filter = null;
    int requiredSize = DEFAULT_REQUIRED_SIZE;
    int shardSize = DEFAULT_SHARD_SIZE;
    long minDocCount = DEFAULT_MIN_DOC_COUNT;
    String executionHint = null;

    XContentParser.Token token;
    String currentFieldName = null;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
      if (token == XContentParser.Token.FIELD_NAME) {
        currentFieldName = parser.currentName();
      } else if (vsParser.token(currentFieldName, token, parser)) {
        continue;
      } else if (incExcParser.token(currentFieldName, token, parser)) {
        continue;
      } else if (token == XContentParser.Token.VALUE_STRING) {
        if ("execution_hint".equals(currentFieldName) || "executionHint".equals(currentFieldName)) {
          executionHint = parser.text();
        } else {
          throw new SearchParseException(
              context,
              "Unknown key for a "
                  + token
                  + " in ["
                  + aggregationName
                  + "]: ["
                  + currentFieldName
                  + "].");
        }
      } else if (token == XContentParser.Token.VALUE_NUMBER) {
        if ("size".equals(currentFieldName)) {
          requiredSize = parser.intValue();
        } else if ("shard_size".equals(currentFieldName) || "shardSize".equals(currentFieldName)) {
          shardSize = parser.intValue();
        } else if ("min_doc_count".equals(currentFieldName)
            || "minDocCount".equals(currentFieldName)) {
          minDocCount = parser.intValue();
        } else {
          throw new SearchParseException(
              context,
              "Unknown key for a "
                  + token
                  + " in ["
                  + aggregationName
                  + "]: ["
                  + currentFieldName
                  + "].");
        }
      } else if (token == XContentParser.Token.START_OBJECT) {
        // TODO not sure if code below is the best means to declare a filter for
        // defining an alternative background stats context.
        // In trial runs it becomes obvious that the choice of background does have to
        // be a strict superset of the foreground subset otherwise the significant terms algo
        // immediately singles out the odd terms that are in the foreground but not represented
        // in the background. So a better approach may be to use a designated parent agg as the
        // background because parent aggs are always guaranteed to be a superset whereas arbitrary
        // filters defined by end users and parsed below are not.
        //                if ("background_context".equals(currentFieldName)) {
        //                    filter =
        // context.queryParserService().parseInnerFilter(parser).filter();
        //                }

      } else {
        throw new SearchParseException(
            context, "Unexpected token " + token + " in [" + aggregationName + "].");
      }
    }

    if (shardSize == DEFAULT_SHARD_SIZE) {
      // The user has not made a shardSize selection .
      // Use default heuristic to avoid any wrong-ranking caused by distributed counting
      // but request double the usual amount.
      // We typically need more than the number of "top" terms requested by other aggregations
      // as the significance algorithm is in less of a position to down-select at shard-level -
      // some of the things we want to find have only one occurrence on each shard and as
      // such are impossible to differentiate from non-significant terms at that early stage.
      shardSize = 2 * BucketUtils.suggestShardSideQueueSize(requiredSize, context.numberOfShards());
    }

    // shard_size cannot be smaller than size as we need to at least fetch <size> entries from every
    // shards in order to return <size>
    if (shardSize < requiredSize) {
      shardSize = requiredSize;
    }

    IncludeExclude includeExclude = incExcParser.includeExclude();
    return new SignificantTermsAggregatorFactory(
        aggregationName,
        vsParser.config(),
        requiredSize,
        shardSize,
        minDocCount,
        includeExclude,
        executionHint,
        filter);
  }
Пример #13
0
  @Override
  public AggregatorFactory parse(
      String aggregationName, XContentParser parser, SearchContext context) throws IOException {

    String field = null;
    String script = null;
    String scriptLang = null;
    Map<String, Object> scriptParams = null;
    Terms.ValueType valueType = null;
    int requiredSize = 10;
    String orderKey = "_count";
    boolean orderAsc = false;
    String format = null;
    boolean assumeUnique = false;

    XContentParser.Token token;
    String currentFieldName = null;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
      if (token == XContentParser.Token.FIELD_NAME) {
        currentFieldName = parser.currentName();
      } else if (token == XContentParser.Token.VALUE_STRING) {
        if ("field".equals(currentFieldName)) {
          field = parser.text();
        } else if ("script".equals(currentFieldName)) {
          script = parser.text();
        } else if ("script_lang".equals(currentFieldName)
            || "scriptLang".equals(currentFieldName)) {
          scriptLang = parser.text();
        } else if ("value_type".equals(currentFieldName) || "valueType".equals(currentFieldName)) {
          valueType = Terms.ValueType.resolveType(parser.text());
        } else if ("format".equals(currentFieldName)) {
          format = parser.text();
        }
      } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
        if ("script_values_unique".equals(currentFieldName)) {
          assumeUnique = parser.booleanValue();
        }
      } else if (token == XContentParser.Token.VALUE_NUMBER) {
        if ("size".equals(currentFieldName)) {
          requiredSize = parser.intValue();
        }
      } else if (token == XContentParser.Token.START_OBJECT) {
        if ("params".equals(currentFieldName)) {
          scriptParams = parser.map();
        } else if ("order".equals(currentFieldName)) {
          while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
              orderKey = parser.currentName();
            } else if (token == XContentParser.Token.VALUE_STRING) {
              String dir = parser.text();
              orderAsc = "asc".equalsIgnoreCase(dir);
              // TODO: do we want to throw a parse error if the alternative is not "desc"???
            }
          }
        }
      }
    }

    InternalOrder order = resolveOrder(orderKey, orderAsc);
    SearchScript searchScript = null;
    if (script != null) {
      searchScript =
          context.scriptService().search(context.lookup(), scriptLang, script, scriptParams);
    }

    if (field == null) {

      Class<? extends ValuesSource> valueSourceType =
          script == null
              ? ValuesSource.class
              : // unknown, will inherit whatever is in the context
              valueType != null
                  ? valueType.scriptValueType.getValuesSourceType()
                  : // the user explicitly defined a value type
                  BytesValuesSource.class; // defaulting to bytes

      ValuesSourceConfig<?> config = new ValuesSourceConfig(valueSourceType);
      if (valueType != null) {
        config.scriptValueType(valueType.scriptValueType);
      }
      config.script(searchScript);
      if (!assumeUnique) {
        config.ensureUnique(true);
      }
      return new TermsAggregatorFactory(aggregationName, config, order, requiredSize);
    }

    FieldMapper<?> mapper = context.smartNameFieldMapper(field);
    if (mapper == null) {
      ValuesSourceConfig<?> config =
          new ValuesSourceConfig<BytesValuesSource>(BytesValuesSource.class);
      config.unmapped(true);
      return new TermsAggregatorFactory(aggregationName, config, order, requiredSize);
    }
    IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);

    ValuesSourceConfig<?> config;

    if (mapper instanceof DateFieldMapper) {
      DateFieldMapper dateMapper = (DateFieldMapper) mapper;
      ValueFormatter formatter =
          format == null
              ? new ValueFormatter.DateTime(dateMapper.dateTimeFormatter())
              : new ValueFormatter.DateTime(format);
      config =
          new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class)
              .formatter(formatter)
              .parser(new ValueParser.DateMath(dateMapper.dateMathParser()));

    } else if (mapper instanceof IpFieldMapper) {
      config =
          new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class)
              .formatter(ValueFormatter.IPv4)
              .parser(ValueParser.IPv4);

    } else if (indexFieldData instanceof IndexNumericFieldData) {
      config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
      if (format != null) {
        config.formatter(new ValueFormatter.Number.Pattern(format));
      }

    } else {
      config = new ValuesSourceConfig<BytesValuesSource>(BytesValuesSource.class);
      // TODO: it will make sense to set false instead here if the aggregator factory uses
      // ordinals instead of hash tables
      config.needsHashes(true);
    }

    config.script(searchScript);

    config.fieldContext(new FieldContext(field, indexFieldData));

    // We need values to be unique to be able to run terms aggs efficiently
    if (!assumeUnique) {
      config.ensureUnique(true);
    }

    return new TermsAggregatorFactory(aggregationName, config, order, requiredSize);
  }
  static TermSuggestionBuilder innerFromXContent(QueryParseContext parseContext)
      throws IOException {
    XContentParser parser = parseContext.parser();
    TermSuggestionBuilder tmpSuggestion = new TermSuggestionBuilder("_na_");
    ParseFieldMatcher parseFieldMatcher = parseContext.getParseFieldMatcher();
    XContentParser.Token token;
    String currentFieldName = null;
    String fieldname = null;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
      if (token == XContentParser.Token.FIELD_NAME) {
        currentFieldName = parser.currentName();
      } else if (token.isValue()) {
        if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.ANALYZER_FIELD)) {
          tmpSuggestion.analyzer(parser.text());
        } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.FIELDNAME_FIELD)) {
          fieldname = parser.text();
        } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.SIZE_FIELD)) {
          tmpSuggestion.size(parser.intValue());
        } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.SHARDSIZE_FIELD)) {
          tmpSuggestion.shardSize(parser.intValue());
        } else if (parseFieldMatcher.match(currentFieldName, SUGGESTMODE_FIELD)) {
          tmpSuggestion.suggestMode(SuggestMode.resolve(parser.text()));
        } else if (parseFieldMatcher.match(currentFieldName, ACCURACY_FIELD)) {
          tmpSuggestion.accuracy(parser.floatValue());
        } else if (parseFieldMatcher.match(currentFieldName, SORT_FIELD)) {
          tmpSuggestion.sort(SortBy.resolve(parser.text()));
        } else if (parseFieldMatcher.match(currentFieldName, STRING_DISTANCE_FIELD)) {
          tmpSuggestion.stringDistance(StringDistanceImpl.resolve(parser.text()));
        } else if (parseFieldMatcher.match(currentFieldName, MAX_EDITS_FIELD)) {
          tmpSuggestion.maxEdits(parser.intValue());
        } else if (parseFieldMatcher.match(currentFieldName, MAX_INSPECTIONS_FIELD)) {
          tmpSuggestion.maxInspections(parser.intValue());
        } else if (parseFieldMatcher.match(currentFieldName, MAX_TERM_FREQ_FIELD)) {
          tmpSuggestion.maxTermFreq(parser.floatValue());
        } else if (parseFieldMatcher.match(currentFieldName, PREFIX_LENGTH_FIELD)) {
          tmpSuggestion.prefixLength(parser.intValue());
        } else if (parseFieldMatcher.match(currentFieldName, MIN_WORD_LENGTH_FIELD)) {
          tmpSuggestion.minWordLength(parser.intValue());
        } else if (parseFieldMatcher.match(currentFieldName, MIN_DOC_FREQ_FIELD)) {
          tmpSuggestion.minDocFreq(parser.floatValue());
        } else if (parseFieldMatcher.match(currentFieldName, EXACT_MATCH_FIELD)) {
          tmpSuggestion.exactMatch(parser.booleanValue());
        } else {
          throw new ParsingException(
              parser.getTokenLocation(),
              "suggester[term] doesn't support field [" + currentFieldName + "]");
        }
      } else {
        throw new ParsingException(
            parser.getTokenLocation(),
            "suggester[term] parsing failed on [" + currentFieldName + "]");
      }
    }

    // now we should have field name, check and copy fields over to the suggestion builder we return
    if (fieldname == null) {
      throw new ElasticsearchParseException(
          "the required field option [" + FIELDNAME_FIELD.getPreferredName() + "] is missing");
    }
    return new TermSuggestionBuilder(fieldname, tmpSuggestion);
  }
Пример #15
0
 public static Snapshot fromXContent(XContentParser parser) throws IOException {
   String name = null;
   Version version = Version.CURRENT;
   SnapshotState state = SnapshotState.IN_PROGRESS;
   String reason = null;
   List<String> indices = Collections.emptyList();
   long startTime = 0;
   long endTime = 0;
   int totalShard = 0;
   int successfulShards = 0;
   List<SnapshotShardFailure> shardFailures = NO_FAILURES;
   if (parser.currentToken() == null) { // fresh parser? move to the first token
     parser.nextToken();
   }
   if (parser.currentToken()
       == XContentParser.Token.START_OBJECT) { // on a start object move to next token
     parser.nextToken();
   }
   XContentParser.Token token;
   if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
     String currentFieldName = parser.currentName();
     if ("snapshot".equals(currentFieldName)) {
       while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
         if (token == XContentParser.Token.FIELD_NAME) {
           currentFieldName = parser.currentName();
           token = parser.nextToken();
           if (token.isValue()) {
             if ("name".equals(currentFieldName)) {
               name = parser.text();
             } else if ("state".equals(currentFieldName)) {
               state = SnapshotState.valueOf(parser.text());
             } else if ("reason".equals(currentFieldName)) {
               reason = parser.text();
             } else if ("start_time".equals(currentFieldName)) {
               startTime = parser.longValue();
             } else if ("end_time".equals(currentFieldName)) {
               endTime = parser.longValue();
             } else if ("total_shards".equals(currentFieldName)) {
               totalShard = parser.intValue();
             } else if ("successful_shards".equals(currentFieldName)) {
               successfulShards = parser.intValue();
             } else if ("version_id".equals(currentFieldName)) {
               version = Version.fromId(parser.intValue());
             }
           } else if (token == XContentParser.Token.START_ARRAY) {
             if ("indices".equals(currentFieldName)) {
               ArrayList<String> indicesArray = new ArrayList<>();
               while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
                 indicesArray.add(parser.text());
               }
               indices = Collections.unmodifiableList(indicesArray);
             } else if ("failures".equals(currentFieldName)) {
               ArrayList<SnapshotShardFailure> shardFailureArrayList = new ArrayList<>();
               while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
                 shardFailureArrayList.add(SnapshotShardFailure.fromXContent(parser));
               }
               shardFailures = Collections.unmodifiableList(shardFailureArrayList);
             } else {
               // It was probably created by newer version - ignoring
               parser.skipChildren();
             }
           } else if (token == XContentParser.Token.START_OBJECT) {
             // It was probably created by newer version - ignoring
             parser.skipChildren();
           }
         }
       }
     }
   } else {
     throw new ElasticsearchParseException("unexpected token  [" + token + "]");
   }
   return new Snapshot(
       name,
       indices,
       state,
       reason,
       version,
       startTime,
       endTime,
       totalShard,
       successfulShards,
       shardFailures);
 }
  @Override
  public void parse(XContentParser parser, SearchContext context) throws Exception {
    XContentParser.Token token;
    String topLevelFieldName = null;
    List<SearchContextHighlight.Field> fields = newArrayList();

    String[] globalPreTags = DEFAULT_PRE_TAGS;
    String[] globalPostTags = DEFAULT_POST_TAGS;
    boolean globalScoreOrdered = false;
    boolean globalHighlightFilter = true;
    int globalFragmentSize = 100;
    int globalNumOfFragments = 5;

    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
      if (token == XContentParser.Token.FIELD_NAME) {
        topLevelFieldName = parser.currentName();
      } else if (token == XContentParser.Token.START_ARRAY) {
        if ("pre_tags".equals(topLevelFieldName) || "preTags".equals(topLevelFieldName)) {
          List<String> preTagsList = Lists.newArrayList();
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            preTagsList.add(parser.text());
          }
          globalPreTags = preTagsList.toArray(new String[preTagsList.size()]);
        } else if ("post_tags".equals(topLevelFieldName) || "postTags".equals(topLevelFieldName)) {
          List<String> postTagsList = Lists.newArrayList();
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            postTagsList.add(parser.text());
          }
          globalPostTags = postTagsList.toArray(new String[postTagsList.size()]);
        }
      } else if (token.isValue()) {
        if ("order".equals(topLevelFieldName)) {
          globalScoreOrdered = "score".equals(parser.text());
        } else if ("tags_schema".equals(topLevelFieldName)
            || "tagsSchema".equals(topLevelFieldName)) {
          String schema = parser.text();
          if ("styled".equals(schema)) {
            globalPreTags = STYLED_PRE_TAG;
            globalPostTags = STYLED_POST_TAGS;
          }
        } else if ("highlight_filter".equals(topLevelFieldName)
            || "highlightFilter".equals(topLevelFieldName)) {
          globalHighlightFilter = parser.booleanValue();
        } else if ("fragment_size".equals(topLevelFieldName)
            || "fragmentSize".equals(topLevelFieldName)) {
          globalFragmentSize = parser.intValue();
        } else if ("number_of_fragments".equals(topLevelFieldName)
            || "numberOfFragments".equals(topLevelFieldName)) {
          globalNumOfFragments = parser.intValue();
        }
      } else if (token == XContentParser.Token.START_OBJECT) {
        if ("fields".equals(topLevelFieldName)) {
          String highlightFieldName = null;
          while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
              highlightFieldName = parser.currentName();
            } else if (token == XContentParser.Token.START_OBJECT) {
              SearchContextHighlight.Field field =
                  new SearchContextHighlight.Field(highlightFieldName);
              String fieldName = null;
              while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                if (token == XContentParser.Token.FIELD_NAME) {
                  fieldName = parser.currentName();
                } else if (token == XContentParser.Token.START_ARRAY) {
                  if ("pre_tags".equals(fieldName) || "preTags".equals(fieldName)) {
                    List<String> preTagsList = Lists.newArrayList();
                    while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                      preTagsList.add(parser.text());
                    }
                    field.preTags(preTagsList.toArray(new String[preTagsList.size()]));
                  } else if ("post_tags".equals(fieldName) || "postTags".equals(fieldName)) {
                    List<String> postTagsList = Lists.newArrayList();
                    while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                      postTagsList.add(parser.text());
                    }
                    field.postTags(postTagsList.toArray(new String[postTagsList.size()]));
                  }
                } else if (token.isValue()) {
                  if ("fragment_size".equals(fieldName) || "fragmentSize".equals(fieldName)) {
                    field.fragmentCharSize(parser.intValue());
                  } else if ("number_of_fragments".equals(fieldName)
                      || "numberOfFragments".equals(fieldName)) {
                    field.numberOfFragments(parser.intValue());
                  } else if ("fragment_offset".equals(fieldName)
                      || "fragmentOffset".equals(fieldName)) {
                    field.fragmentOffset(parser.intValue());
                  } else if ("highlight_filter".equals(fieldName)
                      || "highlightFilter".equals(fieldName)) {
                    field.highlightFilter(parser.booleanValue());
                  } else if ("order".equals(fieldName)) {
                    field.scoreOrdered("score".equals(parser.text()));
                  }
                }
              }
              fields.add(field);
            }
          }
        }
      }
    }
    if (globalPreTags != null && globalPostTags == null) {
      throw new SearchParseException(
          context, "Highlighter global preTags are set, but global postTags are not set");
    }

    // now, go over and fill all fields with default values from the global state
    for (SearchContextHighlight.Field field : fields) {
      if (field.preTags() == null) {
        field.preTags(globalPreTags);
      }
      if (field.postTags() == null) {
        field.postTags(globalPostTags);
      }
      if (field.highlightFilter() == null) {
        field.highlightFilter(globalHighlightFilter);
      }
      if (field.scoreOrdered() == null) {
        field.scoreOrdered(globalScoreOrdered);
      }
      if (field.fragmentCharSize() == -1) {
        field.fragmentCharSize(globalFragmentSize);
      }
      if (field.numberOfFragments() == -1) {
        field.numberOfFragments(globalNumOfFragments);
      }
    }

    context.highlight(new SearchContextHighlight(fields));
  }
Пример #17
0
  @Override
  public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
    ensureNotDeleteByQuery(NAME, parseContext);
    XContentParser parser = parseContext.parser();

    boolean queryFound = false;
    float boost = 1.0f;
    String childType = null;
    ScoreType scoreType = ScoreType.NONE;
    int minChildren = 0;
    int maxChildren = 0;
    int shortCircuitParentDocSet = 8192;
    String queryName = null;
    Tuple<String, SubSearchContext> innerHits = null;

    String currentFieldName = null;
    XContentParser.Token token;
    XContentStructure.InnerQuery iq = null;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
      if (token == XContentParser.Token.FIELD_NAME) {
        currentFieldName = parser.currentName();
      } else if (token == XContentParser.Token.START_OBJECT) {
        // Usually, the query would be parsed here, but the child
        // type may not have been extracted yet, so use the
        // XContentStructure.<type> facade to parse if available,
        // or delay parsing if not.
        if ("query".equals(currentFieldName)) {
          iq =
              new XContentStructure.InnerQuery(
                  parseContext, childType == null ? null : new String[] {childType});
          queryFound = true;
        } else if ("inner_hits".equals(currentFieldName)) {
          innerHits = innerHitsQueryParserHelper.parse(parseContext);
        } else {
          throw new QueryParsingException(
              parseContext.index(),
              "[has_child] query does not support [" + currentFieldName + "]");
        }
      } else if (token.isValue()) {
        if ("type".equals(currentFieldName)
            || "child_type".equals(currentFieldName)
            || "childType".equals(currentFieldName)) {
          childType = parser.text();
        } else if ("score_type".equals(currentFieldName) || "scoreType".equals(currentFieldName)) {
          scoreType = ScoreType.fromString(parser.text());
        } else if ("score_mode".equals(currentFieldName) || "scoreMode".equals(currentFieldName)) {
          scoreType = ScoreType.fromString(parser.text());
        } else if ("boost".equals(currentFieldName)) {
          boost = parser.floatValue();
        } else if ("min_children".equals(currentFieldName)
            || "minChildren".equals(currentFieldName)) {
          minChildren = parser.intValue(true);
        } else if ("max_children".equals(currentFieldName)
            || "maxChildren".equals(currentFieldName)) {
          maxChildren = parser.intValue(true);
        } else if ("short_circuit_cutoff".equals(currentFieldName)) {
          shortCircuitParentDocSet = parser.intValue();
        } else if ("_name".equals(currentFieldName)) {
          queryName = parser.text();
        } else {
          throw new QueryParsingException(
              parseContext.index(),
              "[has_child] query does not support [" + currentFieldName + "]");
        }
      }
    }
    if (!queryFound) {
      throw new QueryParsingException(parseContext.index(), "[has_child] requires 'query' field");
    }
    if (childType == null) {
      throw new QueryParsingException(parseContext.index(), "[has_child] requires 'type' field");
    }

    Query innerQuery = iq.asQuery(childType);

    if (innerQuery == null) {
      return null;
    }
    innerQuery.setBoost(boost);

    DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType);
    if (childDocMapper == null) {
      throw new QueryParsingException(
          parseContext.index(), "[has_child] No mapping for for type [" + childType + "]");
    }
    if (!childDocMapper.parentFieldMapper().active()) {
      throw new QueryParsingException(
          parseContext.index(),
          "[has_child]  Type [" + childType + "] does not have parent mapping");
    }

    if (innerHits != null) {
      InnerHitsContext.ParentChildInnerHits parentChildInnerHits =
          new InnerHitsContext.ParentChildInnerHits(
              innerHits.v2(), innerQuery, null, childDocMapper);
      String name = innerHits.v1() != null ? innerHits.v1() : childType;
      parseContext.addInnerHits(name, parentChildInnerHits);
    }

    ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper();
    if (!parentFieldMapper.active()) {
      throw new QueryParsingException(
          parseContext.index(), "[has_child] _parent field not configured");
    }

    String parentType = parentFieldMapper.type();
    DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
    if (parentDocMapper == null) {
      throw new QueryParsingException(
          parseContext.index(),
          "[has_child]  Type ["
              + childType
              + "] points to a non existent parent type ["
              + parentType
              + "]");
    }

    if (maxChildren > 0 && maxChildren < minChildren) {
      throw new QueryParsingException(
          parseContext.index(), "[has_child] 'max_children' is less than 'min_children'");
    }

    BitDocIdSetFilter nonNestedDocsFilter = null;
    if (parentDocMapper.hasNestedObjects()) {
      nonNestedDocsFilter = parseContext.bitsetFilter(NonNestedDocsFilter.INSTANCE);
    }

    // wrap the query with type query
    innerQuery =
        new FilteredQuery(
            innerQuery,
            parseContext.cacheFilter(
                childDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy()));

    Query query;
    Filter parentFilter =
        parseContext.cacheFilter(
            parentDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy());
    ParentChildIndexFieldData parentChildIndexFieldData =
        parseContext.getForField(parentFieldMapper);
    if (minChildren > 1 || maxChildren > 0 || scoreType != ScoreType.NONE) {
      query =
          new ChildrenQuery(
              parentChildIndexFieldData,
              parentType,
              childType,
              parentFilter,
              innerQuery,
              scoreType,
              minChildren,
              maxChildren,
              shortCircuitParentDocSet,
              nonNestedDocsFilter);
    } else {
      query =
          new ChildrenConstantScoreQuery(
              parentChildIndexFieldData,
              innerQuery,
              parentType,
              childType,
              parentFilter,
              shortCircuitParentDocSet,
              nonNestedDocsFilter);
    }
    if (queryName != null) {
      parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query));
    }
    query.setBoost(boost);
    return query;
  }
Пример #18
0
  public SuggestionSearchContext.SuggestionContext parse(
      XContentParser parser, SearchContext context) throws IOException {
    PhraseSuggestionContext suggestion = new PhraseSuggestionContext(suggester);
    XContentParser.Token token;
    String fieldName = null;
    boolean gramSizeSet = false;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
      if (token == XContentParser.Token.FIELD_NAME) {
        fieldName = parser.currentName();
      } else if (token.isValue()) {
        if (!SuggestUtils.parseSuggestContext(parser, context, fieldName, suggestion)) {
          if ("real_word_error_likelihood".equals(fieldName)) {
            suggestion.setRealWordErrorLikelihood(parser.floatValue());
            if (suggestion.realworldErrorLikelyhood() <= 0.0) {
              throw new ElasticSearchIllegalArgumentException(
                  "real_word_error_likelihood must be > 0.0");
            }
          } else if ("confidence".equals(fieldName)) {
            suggestion.setConfidence(parser.floatValue());
            if (suggestion.confidence() < 0.0) {
              throw new ElasticSearchIllegalArgumentException("confidence must be >= 0.0");
            }
          } else if ("separator".equals(fieldName)) {
            suggestion.setSeparator(new BytesRef(parser.text()));
          } else if ("max_errors".equals(fieldName)) {
            suggestion.setMaxErrors(parser.floatValue());
            if (suggestion.maxErrors() <= 0.0) {
              throw new ElasticSearchIllegalArgumentException("max_error must be > 0.0");
            }
          } else if ("gram_size".equals(fieldName)) {
            suggestion.setGramSize(parser.intValue());
            if (suggestion.gramSize() < 1) {
              throw new ElasticSearchIllegalArgumentException("gram_size must be >= 1");
            }
            gramSizeSet = true;
          } else if ("force_unigrams".equals(fieldName)) {
            suggestion.setRequireUnigram(parser.booleanValue());
          }
        }
      } else if (token == Token.START_ARRAY) {
        if ("direct_generator".equals(fieldName)) {
          // for now we only have a single type of generators
          while ((token = parser.nextToken()) == Token.START_OBJECT) {
            PhraseSuggestionContext.DirectCandidateGenerator generator =
                new PhraseSuggestionContext.DirectCandidateGenerator();
            while ((token = parser.nextToken()) != Token.END_OBJECT) {
              if (token == XContentParser.Token.FIELD_NAME) {
                fieldName = parser.currentName();
              }
              if (token.isValue()) {
                parseCandidateGenerator(parser, context, fieldName, generator);
              }
            }
            verifyGenerator(context, generator);
            suggestion.addGenerator(generator);
          }
        } else {
          throw new ElasticSearchIllegalArgumentException(
              "suggester[phrase]  doesn't support array field [" + fieldName + "]");
        }
      } else if (token == Token.START_OBJECT) {
        if ("linear".equals(fieldName)) {
          ensureNoSmoothing(suggestion);
          final double[] lambdas = new double[3];
          while ((token = parser.nextToken()) != Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
              fieldName = parser.currentName();
            }
            if (token.isValue()) {
              if ("trigram_lambda".equals(fieldName)) {
                lambdas[0] = parser.doubleValue();
                if (lambdas[0] < 0) {
                  throw new ElasticSearchIllegalArgumentException(
                      "trigram_lambda must be positive");
                }
              }
              if ("bigram_lambda".equals(fieldName)) {
                lambdas[1] = parser.doubleValue();
                if (lambdas[1] < 0) {
                  throw new ElasticSearchIllegalArgumentException("bigram_lambda must be positive");
                }
              }
              if ("unigram_lambda".equals(fieldName)) {
                lambdas[2] = parser.doubleValue();
                if (lambdas[2] < 0) {
                  throw new ElasticSearchIllegalArgumentException(
                      "unigram_lambda must be positive");
                }
              }
            }
          }
          double sum = 0.0d;
          for (int i = 0; i < lambdas.length; i++) {
            sum += lambdas[i];
          }
          if (Math.abs(sum - 1.0) > 0.001) {
            throw new ElasticSearchIllegalArgumentException(
                "linear smoothing lambdas must sum to 1");
          }
          suggestion.setModel(
              new WordScorer.WordScorerFactory() {
                @Override
                public WordScorer newScorer(
                    IndexReader reader, String field, double realWordLikelyhood, BytesRef separator)
                    throws IOException {
                  return new LinearInterpoatingScorer(
                      reader,
                      field,
                      realWordLikelyhood,
                      separator,
                      lambdas[0],
                      lambdas[1],
                      lambdas[2]);
                }
              });
        } else if ("laplace".equals(fieldName)) {
          ensureNoSmoothing(suggestion);
          double theAlpha = 0.5;

          while ((token = parser.nextToken()) != Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
              fieldName = parser.currentName();
            }
            if (token.isValue()) {
              if ("alpha".equals(fieldName)) {
                theAlpha = parser.doubleValue();
              }
            }
          }
          final double alpha = theAlpha;
          suggestion.setModel(
              new WordScorer.WordScorerFactory() {
                @Override
                public WordScorer newScorer(
                    IndexReader reader, String field, double realWordLikelyhood, BytesRef separator)
                    throws IOException {
                  return new LaplaceScorer(reader, field, realWordLikelyhood, separator, alpha);
                }
              });

        } else if ("stupid_backoff".equals(fieldName)) {
          ensureNoSmoothing(suggestion);
          double theDiscount = 0.4;
          while ((token = parser.nextToken()) != Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
              fieldName = parser.currentName();
            }
            if (token.isValue()) {
              if ("discount".equals(fieldName)) {
                theDiscount = parser.doubleValue();
              }
            }
          }
          final double discount = theDiscount;
          suggestion.setModel(
              new WordScorer.WordScorerFactory() {
                @Override
                public WordScorer newScorer(
                    IndexReader reader, String field, double realWordLikelyhood, BytesRef separator)
                    throws IOException {
                  return new StupidBackoffScorer(
                      reader, field, realWordLikelyhood, separator, discount);
                }
              });

        } else {
          throw new ElasticSearchIllegalArgumentException(
              "suggester[phrase] doesn't support object field [" + fieldName + "]");
        }

      } else {
        throw new ElasticSearchIllegalArgumentException(
            "suggester[phrase] doesn't support field [" + fieldName + "]");
      }
    }

    if (suggestion.getField() == null) {
      throw new ElasticSearchIllegalArgumentException("The required field option is missing");
    }

    if (suggestion.model() == null) {
      suggestion.setModel(LaplaceScorer.FACTORY);
    }

    if (!gramSizeSet || suggestion.generators().isEmpty()) {
      final ShingleTokenFilterFactory shingleFilterFactory =
          SuggestUtils.getShingleFilterFactory(
              suggestion.getAnalyzer() == null
                  ? context.mapperService().fieldSearchAnalyzer(suggestion.getField())
                  : suggestion.getAnalyzer());
      ;
      if (!gramSizeSet) {
        // try to detect the shingle size
        if (shingleFilterFactory != null) {
          suggestion.setGramSize(shingleFilterFactory.getMaxShingleSize());
          if (suggestion.getAnalyzer() == null
              && shingleFilterFactory.getMinShingleSize() > 1
              && !shingleFilterFactory.getOutputUnigrams()) {
            throw new ElasticSearchIllegalArgumentException(
                "The default analyzer for field: ["
                    + suggestion.getField()
                    + "] doesn't emit unigrams. If this is intentional try to set the analyzer explicitly");
          }
        }
      }
      if (suggestion.generators().isEmpty()) {
        if (shingleFilterFactory != null
            && shingleFilterFactory.getMinShingleSize() > 1
            && !shingleFilterFactory.getOutputUnigrams()
            && suggestion.getRequireUnigram()) {
          throw new ElasticSearchIllegalArgumentException(
              "The default candidate generator for phrase suggest can't operate on field: ["
                  + suggestion.getField()
                  + "] since it doesn't emit unigrams. If this is intentional try to set the candidate generator field explicitly");
        }
        // use a default generator on the same field
        DirectCandidateGenerator generator = new DirectCandidateGenerator();
        generator.setField(suggestion.getField());
        suggestion.addGenerator(generator);
      }
    }

    return suggestion;
  }
Пример #19
0
  @Override
  public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
    XContentParser parser = parseContext.parser();

    boolean disableCoord = false;
    float boost = 1.0f;
    int minimumNumberShouldMatch = -1;

    List<BooleanClause> clauses = newArrayList();

    String currentFieldName = null;
    XContentParser.Token token;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
      if (token == XContentParser.Token.FIELD_NAME) {
        currentFieldName = parser.currentName();
      } else if (token == XContentParser.Token.START_OBJECT) {
        if ("must".equals(currentFieldName)) {
          clauses.add(new BooleanClause(parseContext.parseInnerQuery(), BooleanClause.Occur.MUST));
        } else if ("must_not".equals(currentFieldName) || "mustNot".equals(currentFieldName)) {
          clauses.add(
              new BooleanClause(parseContext.parseInnerQuery(), BooleanClause.Occur.MUST_NOT));
        } else if ("should".equals(currentFieldName)) {
          clauses.add(
              new BooleanClause(parseContext.parseInnerQuery(), BooleanClause.Occur.SHOULD));
        }
      } else if (token == XContentParser.Token.START_ARRAY) {
        if ("must".equals(currentFieldName)) {
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            clauses.add(
                new BooleanClause(parseContext.parseInnerQuery(), BooleanClause.Occur.MUST));
          }
        } else if ("must_not".equals(currentFieldName) || "mustNot".equals(currentFieldName)) {
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            clauses.add(
                new BooleanClause(parseContext.parseInnerQuery(), BooleanClause.Occur.MUST_NOT));
          }
        } else if ("should".equals(currentFieldName)) {
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            clauses.add(
                new BooleanClause(parseContext.parseInnerQuery(), BooleanClause.Occur.SHOULD));
          }
        }
      } else if (token.isValue()) {
        if ("disable_coord".equals(currentFieldName) || "disableCoord".equals(currentFieldName)) {
          disableCoord = parser.booleanValue();
        } else if ("minimum_number_should_match".equals(currentFieldName)
            || "minimumNumberShouldMatch".equals(currentFieldName)) {
          minimumNumberShouldMatch = parser.intValue();
        } else if ("boost".equals(currentFieldName)) {
          boost = parser.floatValue();
        }
      }
    }

    BooleanQuery query = new BooleanQuery(disableCoord);
    for (BooleanClause clause : clauses) {
      query.add(clause);
    }
    query.setBoost(boost);
    if (minimumNumberShouldMatch != -1) {
      query.setMinimumNumberShouldMatch(minimumNumberShouldMatch);
    }
    return optimizeQuery(fixNegativeQueryIfNeeded(query));
  }
  public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context)
      throws IOException {
    SearchSourceBuilder builder = new SearchSourceBuilder();
    XContentParser.Token token = parser.currentToken();
    String currentFieldName = null;
    if (token != XContentParser.Token.START_OBJECT
        && (token = parser.nextToken()) != XContentParser.Token.START_OBJECT) {
      throw new ParsingException(
          parser.getTokenLocation(),
          "Expected [" + XContentParser.Token.START_OBJECT + "] but found [" + token + "]",
          parser.getTokenLocation());
    }
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
      if (token == XContentParser.Token.FIELD_NAME) {
        currentFieldName = parser.currentName();
      } else if (token.isValue()) {
        if (context.parseFieldMatcher().match(currentFieldName, FROM_FIELD)) {
          builder.from = parser.intValue();
        } else if (context.parseFieldMatcher().match(currentFieldName, SIZE_FIELD)) {
          builder.size = parser.intValue();
        } else if (context.parseFieldMatcher().match(currentFieldName, TIMEOUT_FIELD)) {
          builder.timeoutInMillis = parser.longValue();
        } else if (context.parseFieldMatcher().match(currentFieldName, TERMINATE_AFTER_FIELD)) {
          builder.terminateAfter = parser.intValue();
        } else if (context.parseFieldMatcher().match(currentFieldName, MIN_SCORE_FIELD)) {
          builder.minScore = parser.floatValue();
        } else if (context.parseFieldMatcher().match(currentFieldName, VERSION_FIELD)) {
          builder.version = parser.booleanValue();
        } else if (context.parseFieldMatcher().match(currentFieldName, EXPLAIN_FIELD)) {
          builder.explain = parser.booleanValue();
        } else if (context.parseFieldMatcher().match(currentFieldName, TRACK_SCORES_FIELD)) {
          builder.trackScores = parser.booleanValue();
        } else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
          builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
        } else if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
          List<String> fieldNames = new ArrayList<>();
          fieldNames.add(parser.text());
          builder.fieldNames = fieldNames;
        } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
          builder.sort(parser.text());
        } else {
          throw new ParsingException(
              parser.getTokenLocation(),
              "Unknown key for a " + token + " in [" + currentFieldName + "].",
              parser.getTokenLocation());
        }
      } else if (token == XContentParser.Token.START_OBJECT) {
        if (context.parseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {
          builder.queryBuilder = context.parseInnerQueryBuilder();
        } else if (context.parseFieldMatcher().match(currentFieldName, POST_FILTER_FIELD)) {
          builder.postQueryBuilder = context.parseInnerQueryBuilder();
        } else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
          builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
        } else if (context.parseFieldMatcher().match(currentFieldName, SCRIPT_FIELDS_FIELD)) {
          List<ScriptField> scriptFields = new ArrayList<>();
          while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            String scriptFieldName = parser.currentName();
            token = parser.nextToken();
            if (token == XContentParser.Token.START_OBJECT) {
              Script script = null;
              boolean ignoreFailure = false;
              while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                if (token == XContentParser.Token.FIELD_NAME) {
                  currentFieldName = parser.currentName();
                } else if (token.isValue()) {
                  if (context.parseFieldMatcher().match(currentFieldName, SCRIPT_FIELD)) {
                    script = Script.parse(parser, context.parseFieldMatcher());
                  } else if (context
                      .parseFieldMatcher()
                      .match(currentFieldName, IGNORE_FAILURE_FIELD)) {
                    ignoreFailure = parser.booleanValue();
                  } else {
                    throw new ParsingException(
                        parser.getTokenLocation(),
                        "Unknown key for a " + token + " in [" + currentFieldName + "].",
                        parser.getTokenLocation());
                  }
                } else if (token == XContentParser.Token.START_OBJECT) {
                  if (context.parseFieldMatcher().match(currentFieldName, SCRIPT_FIELD)) {
                    script = Script.parse(parser, context.parseFieldMatcher());
                  } else {
                    throw new ParsingException(
                        parser.getTokenLocation(),
                        "Unknown key for a " + token + " in [" + currentFieldName + "].",
                        parser.getTokenLocation());
                  }
                } else {
                  throw new ParsingException(
                      parser.getTokenLocation(),
                      "Unknown key for a " + token + " in [" + currentFieldName + "].",
                      parser.getTokenLocation());
                }
              }
              scriptFields.add(new ScriptField(scriptFieldName, script, ignoreFailure));
            } else {
              throw new ParsingException(
                  parser.getTokenLocation(),
                  "Expected ["
                      + XContentParser.Token.START_OBJECT
                      + "] in ["
                      + currentFieldName
                      + "] but found ["
                      + token
                      + "]",
                  parser.getTokenLocation());
            }
          }
          builder.scriptFields = scriptFields;
        } else if (context.parseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) {
          ObjectFloatHashMap<String> indexBoost = new ObjectFloatHashMap<String>();
          while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
              currentFieldName = parser.currentName();
            } else if (token.isValue()) {
              indexBoost.put(currentFieldName, parser.floatValue());
            } else {
              throw new ParsingException(
                  parser.getTokenLocation(),
                  "Unknown key for a " + token + " in [" + currentFieldName + "].",
                  parser.getTokenLocation());
            }
          }
          builder.indexBoost = indexBoost;
        } else if (context.parseFieldMatcher().match(currentFieldName, AGGREGATIONS_FIELD)) {
          List<BytesReference> aggregations = new ArrayList<>();
          while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            currentFieldName = parser.currentName();
            token = parser.nextToken();
            if (token == XContentParser.Token.START_OBJECT) {
              XContentBuilder xContentBuilder =
                  XContentFactory.contentBuilder(parser.contentType());
              xContentBuilder.startObject();
              xContentBuilder.field(currentFieldName);
              xContentBuilder.copyCurrentStructure(parser);
              xContentBuilder.endObject();
              aggregations.add(xContentBuilder.bytes());
            } else {
              throw new ParsingException(
                  parser.getTokenLocation(),
                  "Unknown key for a " + token + " in [" + currentFieldName + "].",
                  parser.getTokenLocation());
            }
          }
          builder.aggregations = aggregations;
        } else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {
          XContentBuilder xContentBuilder =
              XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
          builder.highlightBuilder = xContentBuilder.bytes();
        } else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) {
          XContentBuilder xContentBuilder =
              XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
          builder.innerHitsBuilder = xContentBuilder.bytes();
        } else if (context.parseFieldMatcher().match(currentFieldName, SUGGEST_FIELD)) {
          XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType());
          xContentBuilder.copyCurrentStructure(parser);
          builder.suggestBuilder = xContentBuilder.bytes();
        } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
          List<BytesReference> sorts = new ArrayList<>();
          XContentBuilder xContentBuilder =
              XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
          sorts.add(xContentBuilder.bytes());
          builder.sorts = sorts;
        } else if (context.parseFieldMatcher().match(currentFieldName, EXT_FIELD)) {
          XContentBuilder xContentBuilder =
              XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
          builder.ext = xContentBuilder.bytes();
        } else {
          throw new ParsingException(
              parser.getTokenLocation(),
              "Unknown key for a " + token + " in [" + currentFieldName + "].",
              parser.getTokenLocation());
        }
      } else if (token == XContentParser.Token.START_ARRAY) {

        if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
          List<String> fieldNames = new ArrayList<>();
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            if (token == XContentParser.Token.VALUE_STRING) {
              fieldNames.add(parser.text());
            } else {
              throw new ParsingException(
                  parser.getTokenLocation(),
                  "Expected ["
                      + XContentParser.Token.VALUE_STRING
                      + "] in ["
                      + currentFieldName
                      + "] but found ["
                      + token
                      + "]",
                  parser.getTokenLocation());
            }
          }
          builder.fieldNames = fieldNames;
        } else if (context.parseFieldMatcher().match(currentFieldName, FIELDDATA_FIELDS_FIELD)) {
          List<String> fieldDataFields = new ArrayList<>();
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            if (token == XContentParser.Token.VALUE_STRING) {
              fieldDataFields.add(parser.text());
            } else {
              throw new ParsingException(
                  parser.getTokenLocation(),
                  "Expected ["
                      + XContentParser.Token.VALUE_STRING
                      + "] in ["
                      + currentFieldName
                      + "] but found ["
                      + token
                      + "]",
                  parser.getTokenLocation());
            }
          }
          builder.fieldDataFields = fieldDataFields;
        } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
          List<BytesReference> sorts = new ArrayList<>();
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            XContentBuilder xContentBuilder =
                XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
            sorts.add(xContentBuilder.bytes());
          }
          builder.sorts = sorts;
        } else if (context.parseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) {
          List<BytesReference> rescoreBuilders = new ArrayList<>();
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            XContentBuilder xContentBuilder =
                XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
            rescoreBuilders.add(xContentBuilder.bytes());
          }
          builder.rescoreBuilders = rescoreBuilders;
        } else if (context.parseFieldMatcher().match(currentFieldName, STATS_FIELD)) {
          List<String> stats = new ArrayList<>();
          while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
            if (token == XContentParser.Token.VALUE_STRING) {
              stats.add(parser.text());
            } else {
              throw new ParsingException(
                  parser.getTokenLocation(),
                  "Expected ["
                      + XContentParser.Token.VALUE_STRING
                      + "] in ["
                      + currentFieldName
                      + "] but found ["
                      + token
                      + "]",
                  parser.getTokenLocation());
            }
          }
          builder.stats = stats;
        } else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
          builder.fetchSourceContext = FetchSourceContext.parse(parser, context);
        } else {
          throw new ParsingException(
              parser.getTokenLocation(),
              "Unknown key for a " + token + " in [" + currentFieldName + "].",
              parser.getTokenLocation());
        }
      } else {
        throw new ParsingException(
            parser.getTokenLocation(),
            "Unknown key for a " + token + " in [" + currentFieldName + "].",
            parser.getTokenLocation());
      }
    }
    return builder;
  }
    @Override
    public Builder fromXContent(QueryParseContext parseContext) throws IOException {
      XContentParser parser = parseContext.parser();

      String fieldName = null;
      String geohash = null;
      Integer levels = null;
      Boolean neighbors = null;
      String queryName = null;
      Float boost = null;

      XContentParser.Token token;
      if ((token = parser.currentToken()) != Token.START_OBJECT) {
        throw new ElasticsearchParseException(
            "failed to parse [{}] query. expected an object but found [{}] instead", NAME, token);
      }

      while ((token = parser.nextToken()) != Token.END_OBJECT) {
        if (token == Token.FIELD_NAME) {
          String field = parser.currentName();

          if (parseContext.isDeprecatedSetting(field)) {
            // skip
          } else if (parseContext.parseFieldMatcher().match(field, PRECISION_FIELD)) {
            token = parser.nextToken();
            if (token == Token.VALUE_NUMBER) {
              levels = parser.intValue();
            } else if (token == Token.VALUE_STRING) {
              double meters =
                  DistanceUnit.parse(parser.text(), DistanceUnit.DEFAULT, DistanceUnit.METERS);
              levels = GeoUtils.geoHashLevelsForPrecision(meters);
            }
          } else if (parseContext.parseFieldMatcher().match(field, NEIGHBORS_FIELD)) {
            parser.nextToken();
            neighbors = parser.booleanValue();
          } else if (parseContext
              .parseFieldMatcher()
              .match(field, AbstractQueryBuilder.NAME_FIELD)) {
            parser.nextToken();
            queryName = parser.text();
          } else if (parseContext
              .parseFieldMatcher()
              .match(field, AbstractQueryBuilder.BOOST_FIELD)) {
            parser.nextToken();
            boost = parser.floatValue();
          } else {
            if (fieldName == null) {
              fieldName = field;
              token = parser.nextToken();
              if (token == Token.VALUE_STRING) {
                // A string indicates either a geohash or a
                // lat/lon
                // string
                String location = parser.text();
                if (location.indexOf(",") > 0) {
                  geohash = GeoUtils.parseGeoPoint(parser).geohash();
                } else {
                  geohash = location;
                }
              } else {
                geohash = GeoUtils.parseGeoPoint(parser).geohash();
              }
            } else {
              throw new ParsingException(
                  parser.getTokenLocation(),
                  "["
                      + NAME
                      + "] field name already set to ["
                      + fieldName
                      + "] but found ["
                      + field
                      + "]");
            }
          }
        } else {
          throw new ElasticsearchParseException(
              "failed to parse [{}] query. unexpected token [{}]", NAME, token);
        }
      }
      Builder builder = new Builder(fieldName, geohash);
      if (levels != null) {
        builder.precision(levels);
      }
      if (neighbors != null) {
        builder.neighbors(neighbors);
      }
      if (queryName != null) {
        builder.queryName(queryName);
      }
      if (boost != null) {
        builder.boost(boost);
      }
      return builder;
    }
Пример #22
0
  @Override
  public Mapper parse(ParseContext context) throws IOException {
    byte[] content = null;
    String contentType = null;
    int indexedChars = defaultIndexedChars;
    boolean langDetect = defaultLangDetect;
    String name = null;
    String language = null;

    XContentParser parser = context.parser();
    XContentParser.Token token = parser.currentToken();
    if (token == XContentParser.Token.VALUE_STRING) {
      content = parser.binaryValue();
    } else {
      String currentFieldName = null;
      while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
          currentFieldName = parser.currentName();
        } else if (token == XContentParser.Token.VALUE_STRING) {
          if ("_content".equals(currentFieldName)) {
            content = parser.binaryValue();
          } else if ("_content_type".equals(currentFieldName)) {
            contentType = parser.text();
          } else if ("_name".equals(currentFieldName)) {
            name = parser.text();
          } else if ("_language".equals(currentFieldName)) {
            language = parser.text();
          }
        } else if (token == XContentParser.Token.VALUE_NUMBER) {
          if ("_indexed_chars".equals(currentFieldName)
              || "_indexedChars".equals(currentFieldName)) {
            indexedChars = parser.intValue();
          }
        } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
          if ("_detect_language".equals(currentFieldName)
              || "_detectLanguage".equals(currentFieldName)) {
            langDetect = parser.booleanValue();
          }
        }
      }
    }

    // Throw clean exception when no content is provided Fix #23
    if (content == null) {
      throw new MapperParsingException("No content is provided.");
    }

    Metadata metadata = new Metadata();
    if (contentType != null) {
      metadata.add(Metadata.CONTENT_TYPE, contentType);
    }
    if (name != null) {
      metadata.add(Metadata.RESOURCE_NAME_KEY, name);
    }

    String parsedContent;
    try {
      parsedContent = TikaImpl.parse(content, metadata, indexedChars);
    } catch (Throwable e) {
      // #18: we could ignore errors when Tika does not parse data
      if (!ignoreErrors) {
        logger.trace("exception caught", e);
        throw new MapperParsingException(
            "Failed to extract ["
                + indexedChars
                + "] characters of text for ["
                + name
                + "] : "
                + e.getMessage(),
            e);
      } else {
        logger.debug(
            "Failed to extract [{}] characters of text for [{}]: [{}]",
            indexedChars,
            name,
            e.getMessage());
        logger.trace("exception caught", e);
      }
      return null;
    }

    context = context.createExternalValueContext(parsedContent);
    contentMapper.parse(context);

    if (langDetect) {
      try {
        if (language != null) {
          metadata.add(Metadata.CONTENT_LANGUAGE, language);
        } else {
          LanguageIdentifier identifier = new LanguageIdentifier(parsedContent);
          language = identifier.getLanguage();
        }
        context = context.createExternalValueContext(language);
        languageMapper.parse(context);
      } catch (Throwable t) {
        logger.debug("Cannot detect language: [{}]", t.getMessage());
      }
    }

    if (name != null) {
      try {
        context = context.createExternalValueContext(name);
        nameMapper.parse(context);
      } catch (MapperParsingException e) {
        if (!ignoreErrors) throw e;
        if (logger.isDebugEnabled())
          logger.debug(
              "Ignoring MapperParsingException catch while parsing name: [{}]", e.getMessage());
      }
    }

    if (metadata.get(Metadata.DATE) != null) {
      try {
        context = context.createExternalValueContext(metadata.get(Metadata.DATE));
        dateMapper.parse(context);
      } catch (MapperParsingException e) {
        if (!ignoreErrors) throw e;
        if (logger.isDebugEnabled())
          logger.debug(
              "Ignoring MapperParsingException catch while parsing date: [{}]: [{}]",
              e.getMessage(),
              context.externalValue());
      }
    }

    if (metadata.get(Metadata.TITLE) != null) {
      try {
        context = context.createExternalValueContext(metadata.get(Metadata.TITLE));
        titleMapper.parse(context);
      } catch (MapperParsingException e) {
        if (!ignoreErrors) throw e;
        if (logger.isDebugEnabled())
          logger.debug(
              "Ignoring MapperParsingException catch while parsing title: [{}]: [{}]",
              e.getMessage(),
              context.externalValue());
      }
    }

    if (metadata.get(Metadata.AUTHOR) != null) {
      try {
        context = context.createExternalValueContext(metadata.get(Metadata.AUTHOR));
        authorMapper.parse(context);
      } catch (MapperParsingException e) {
        if (!ignoreErrors) throw e;
        if (logger.isDebugEnabled())
          logger.debug(
              "Ignoring MapperParsingException catch while parsing author: [{}]: [{}]",
              e.getMessage(),
              context.externalValue());
      }
    }

    if (metadata.get(Metadata.KEYWORDS) != null) {
      try {
        context = context.createExternalValueContext(metadata.get(Metadata.KEYWORDS));
        keywordsMapper.parse(context);
      } catch (MapperParsingException e) {
        if (!ignoreErrors) throw e;
        if (logger.isDebugEnabled())
          logger.debug(
              "Ignoring MapperParsingException catch while parsing keywords: [{}]: [{}]",
              e.getMessage(),
              context.externalValue());
      }
    }

    if (contentType == null) {
      contentType = metadata.get(Metadata.CONTENT_TYPE);
    }
    if (contentType != null) {
      try {
        context = context.createExternalValueContext(contentType);
        contentTypeMapper.parse(context);
      } catch (MapperParsingException e) {
        if (!ignoreErrors) throw e;
        if (logger.isDebugEnabled())
          logger.debug(
              "Ignoring MapperParsingException catch while parsing content_type: [{}]: [{}]",
              e.getMessage(),
              context.externalValue());
      }
    }

    int length = content.length;
    // If we have CONTENT_LENGTH from Tika we use it
    if (metadata.get(Metadata.CONTENT_LENGTH) != null) {
      length = Integer.parseInt(metadata.get(Metadata.CONTENT_LENGTH));
    }

    try {
      context = context.createExternalValueContext(length);
      contentLengthMapper.parse(context);
    } catch (MapperParsingException e) {
      if (!ignoreErrors) throw e;
      if (logger.isDebugEnabled())
        logger.debug(
            "Ignoring MapperParsingException catch while parsing content_length: [{}]: [{}]",
            e.getMessage(),
            context.externalValue());
    }

    //        multiFields.parse(this, context);

    return null;
  }