示例#1
0
 @Override
 protected ShardSuggestResponse shardOperation(ShardSuggestRequest request)
     throws ElasticSearchException {
   IndexService indexService = indicesService.indexServiceSafe(request.index());
   IndexShard indexShard = indexService.shardSafe(request.shardId());
   final Engine.Searcher searcher = indexShard.searcher();
   XContentParser parser = null;
   try {
     BytesReference suggest = request.suggest();
     if (suggest != null && suggest.length() > 0) {
       parser = XContentFactory.xContent(suggest).createParser(suggest);
       if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
         throw new ElasticSearchIllegalArgumentException("suggest content missing");
       }
       final SuggestionSearchContext context =
           suggestPhase
               .parseElement()
               .parseInternal(
                   parser, indexService.mapperService(), request.index(), request.shardId());
       final Suggest result = suggestPhase.execute(context, searcher.reader());
       return new ShardSuggestResponse(request.index(), request.shardId(), result);
     }
     return new ShardSuggestResponse(request.index(), request.shardId(), new Suggest());
   } catch (Throwable ex) {
     throw new ElasticSearchException("failed to execute suggest", ex);
   } finally {
     searcher.release();
     if (parser != null) {
       parser.close();
     }
   }
 }
  protected ExplainResponse shardOperation(ExplainRequest request, int shardId)
      throws ElasticSearchException {
    IndexService indexService = indicesService.indexService(request.index());
    IndexShard indexShard = indexService.shardSafe(shardId);
    Term uidTerm =
        new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.getType(), request.getId()));
    Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
    if (!result.exists()) {
      return new ExplainResponse(false);
    }

    SearchContext context =
        new SearchContext(
            0,
            new ShardSearchRequest()
                .types(new String[] {request.getType()})
                .filteringAliases(request.getFilteringAlias()),
            null,
            result.searcher(),
            indexService,
            indexShard,
            scriptService);
    SearchContext.setCurrent(context);

    try {
      context.parsedQuery(parseQuery(request, indexService));
      context.preProcess();
      int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().reader.docBase;
      Explanation explanation;
      if (context.rescore() != null) {
        RescoreSearchContext ctx = context.rescore();
        Rescorer rescorer = ctx.rescorer();
        explanation = rescorer.explain(topLevelDocId, context, ctx);
      } else {
        explanation = context.searcher().explain(context.query(), topLevelDocId);
      }
      if (request.getFields() != null) {
        if (request.getFields().length == 1 && "_source".equals(request.getFields()[0])) {
          request.setFields(null); // Load the _source field
        }
        // Advantage is that we're not opening a second searcher to retrieve the _source. Also
        // because we are working in the same searcher in engineGetResult we can be sure that a
        // doc isn't deleted between the initial get and this call.
        GetResult getResult =
            indexShard
                .getService()
                .get(result, request.getId(), request.getType(), request.getFields());
        return new ExplainResponse(true, explanation, getResult);
      } else {
        return new ExplainResponse(true, explanation);
      }
    } catch (IOException e) {
      throw new ElasticSearchException("Could not explain", e);
    } finally {
      context.release();
      SearchContext.removeCurrent();
    }
  }
  @Override
  protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, int shardId)
      throws ElasticSearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(shardId);

    if (request.refresh() && !request.realtime()) {
      indexShard.refresh(
          new Engine.Refresh("refresh_flag_mget").force(TransportGetAction.REFRESH_FORCE));
    }

    MultiGetShardResponse response = new MultiGetShardResponse();
    for (int i = 0; i < request.locations.size(); i++) {
      String type = request.types.get(i);
      String id = request.ids.get(i);
      String[] fields = request.fields.get(i);

      long version = request.versions.get(i);
      VersionType versionType = request.versionTypes.get(i);
      if (versionType == null) {
        versionType = VersionType.INTERNAL;
      }

      FetchSourceContext fetchSourceContext = request.fetchSourceContexts.get(i);
      try {
        GetResult getResult =
            indexShard
                .getService()
                .get(
                    type, id, fields, request.realtime(), version, versionType, fetchSourceContext);
        response.add(request.locations.get(i), new GetResponse(getResult));
      } catch (Throwable t) {
        if (TransportActions.isShardNotAvailableException(t)) {
          throw (ElasticSearchException) t;
        } else {
          logger.debug(
              "[{}][{}] failed to execute multi_get for [{}]/[{}]",
              t,
              request.index(),
              shardId,
              type,
              id);
          response.add(
              request.locations.get(i),
              new MultiGetResponse.Failure(
                  request.index(), type, id, ExceptionsHelper.detailedMessage(t)));
        }
      }
    }

    return response;
  }
  private ParsedDocument parseDocument(String index, String type, BytesReference doc) {
    MapperService mapperService = indexShard.mapperService();
    IndexService indexService = indexShard.indexService();

    // TODO: make parsing not dynamically create fields not in the original mapping
    Tuple<DocumentMapper, Boolean> docMapper = mapperService.documentMapperWithAutoCreate(type);
    ParsedDocument parsedDocument =
        docMapper.v1().parse(source(doc).type(type).flyweight(true)).setMappingsModified(docMapper);
    if (parsedDocument.mappingsModified()) {
      mappingUpdatedAction.updateMappingOnMaster(index, docMapper.v1(), indexService.indexUUID());
    }
    return parsedDocument;
  }
 @Override
 protected ShardClearIndicesCacheResponse shardOperation(ShardClearIndicesCacheRequest request)
     throws ElasticSearchException {
   IndexService service = indicesService.indexService(request.index());
   if (service != null) {
     // we always clear the query cache
     service.cache().queryParserCache().clear();
     boolean clearedAtLeastOne = false;
     if (request.filterCache()) {
       clearedAtLeastOne = true;
       service.cache().filter().clear();
     }
     if (request.fieldDataCache()) {
       clearedAtLeastOne = true;
       service.cache().fieldData().clear();
     }
     if (request.idCache()) {
       clearedAtLeastOne = true;
       service.cache().idCache().clear();
     }
     if (request.bloomCache()) {
       clearedAtLeastOne = true;
       service.cache().bloomCache().clear();
     }
     if (!clearedAtLeastOne) {
       service.cache().clear();
     }
     service.cache().invalidateCache();
   }
   return new ShardClearIndicesCacheResponse(request.index(), request.shardId());
 }
 @Before
 public void setup() throws Exception {
   Settings settings = ImmutableSettings.builder().put("index.fielddata.cache", "none").build();
   indexService = createIndex("test", settings);
   mapperService = indexService.mapperService();
   indicesFieldDataCache = indexService.injector().getInstance(IndicesFieldDataCache.class);
   ifdService = indexService.fieldData();
   // LogByteSizeMP to preserve doc ID order
   writer =
       new IndexWriter(
           new RAMDirectory(),
           new IndexWriterConfig(new StandardAnalyzer())
               .setMergePolicy(new LogByteSizeMergePolicy()));
 }
  public DefaultSearchContext(
      long id,
      ShardSearchRequest request,
      SearchShardTarget shardTarget,
      Engine.Searcher engineSearcher,
      IndexService indexService,
      IndexShard indexShard,
      ScriptService scriptService,
      CacheRecycler cacheRecycler,
      PageCacheRecycler pageCacheRecycler,
      BigArrays bigArrays) {
    this.id = id;
    this.request = request;
    this.searchType = request.searchType();
    this.shardTarget = shardTarget;
    this.engineSearcher = engineSearcher;
    this.scriptService = scriptService;
    this.cacheRecycler = cacheRecycler;
    this.pageCacheRecycler = pageCacheRecycler;
    this.bigArrays = bigArrays;
    this.dfsResult = new DfsSearchResult(id, shardTarget);
    this.queryResult = new QuerySearchResult(id, shardTarget);
    this.fetchResult = new FetchSearchResult(id, shardTarget);
    this.indexShard = indexShard;
    this.indexService = indexService;

    this.searcher = new ContextIndexSearcher(this, engineSearcher);

    // initialize the filtering alias based on the provided filters
    aliasFilter = indexService.aliasesService().aliasFilter(request.filteringAliases());
  }
  private ParsedDocument parseFetchedDoc(
      PercolateContext context,
      BytesReference fetchedDoc,
      IndexService documentIndexService,
      String type) {
    ParsedDocument doc = null;
    XContentParser parser = null;
    try {
      parser = XContentFactory.xContent(fetchedDoc).createParser(fetchedDoc);
      MapperService mapperService = documentIndexService.mapperService();
      DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(type);
      doc = docMapper.parse(source(parser).type(type).flyweight(true));

      if (context.highlight() != null) {
        doc.setSource(fetchedDoc);
      }
    } catch (Throwable e) {
      throw new ElasticsearchParseException("failed to parse request", e);
    } finally {
      if (parser != null) {
        parser.close();
      }
    }

    if (doc == null) {
      throw new ElasticsearchParseException("No doc to percolate in the request");
    }

    return doc;
  }
 @Override
 public void afterIndexShardStarted(IndexShard indexShard) {
   if (indexShard.shardId().index().name().equals(INDEX_NAME)) {
     // percolator index has started, fetch what we can from it and initialize the indices
     // we have
     synchronized (mutex) {
       if (initialQueriesFetchDone) {
         return;
       }
       // we load the queries for all existing indices
       for (IndexService indexService : indicesService) {
         // only load queries for "this" index percolator service
         if (indexService.index().equals(index())) {
           logger.debug(
               "loading percolator queries for index [{}]...", indexService.index().name());
           loadQueries(indexService.index().name());
           logger.trace(
               "done loading percolator queries for index [{}]", indexService.index().name());
         }
       }
       initialQueriesFetchDone = true;
     }
   }
   if (!indexShard.shardId().index().equals(index())) {
     // not our index, bail
     return;
   }
   if (!percolatorAllocated()) {
     return;
   }
   // we are only interested when the first shard on this node has been created for an index
   // when it does, fetch the relevant queries if not fetched already
   IndexService indexService = indicesService.indexService(indexShard.shardId().index().name());
   if (indexService == null) {
     return;
   }
   if (indexService.numberOfShards() != 1) {
     return;
   }
   synchronized (mutex) {
     if (initialQueriesFetchDone) {
       return;
     }
     // we load queries for this index
     logger.debug("loading percolator queries for index [{}]...", indexService.index().name());
     loadQueries(index.name());
     logger.trace("done loading percolator queries for index [{}]", indexService.index().name());
     initialQueriesFetchDone = true;
   }
 }
示例#10
0
 private void loadQueries(String indexName) {
   IndexService indexService = percolatorIndexService();
   IndexShard shard = indexService.shard(0);
   Engine.Searcher searcher = shard.searcher();
   try {
     // create a query to fetch all queries that are registered under the index name (which is the
     // type
     // in the percolator).
     Query query = new DeletionAwareConstantScoreQuery(indexQueriesFilter(indexName));
     QueriesLoaderCollector queries = new QueriesLoaderCollector();
     searcher.searcher().search(query, queries);
     percolator.addQueries(queries.queries());
   } catch (IOException e) {
     throw new PercolatorException(index, "failed to load queries from percolator index");
   } finally {
     searcher.release();
   }
 }
  @Override
  protected GetFieldMappingsResponse shardOperation(
      final GetFieldMappingsIndexRequest request, ShardId shardId) throws ElasticsearchException {
    assert shardId != null;
    IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
    Collection<String> typeIntersection;
    if (request.types().length == 0) {
      typeIntersection = indexService.mapperService().types();

    } else {
      typeIntersection =
          Collections2.filter(
              indexService.mapperService().types(),
              new Predicate<String>() {

                @Override
                public boolean apply(String type) {
                  return Regex.simpleMatch(request.types(), type);
                }
              });
      if (typeIntersection.isEmpty()) {
        throw new TypeMissingException(shardId.index(), request.types());
      }
    }

    MapBuilder<String, ImmutableMap<String, FieldMappingMetaData>> typeMappings =
        new MapBuilder<>();
    for (String type : typeIntersection) {
      DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
      ImmutableMap<String, FieldMappingMetaData> fieldMapping =
          findFieldMappingsByType(documentMapper, request);
      if (!fieldMapping.isEmpty()) {
        typeMappings.put(type, fieldMapping);
      }
    }

    return new GetFieldMappingsResponse(
        ImmutableMap.of(shardId.getIndex(), typeMappings.immutableMap()));
  }
  @Override
  protected MultiTermVectorsShardResponse shardOperation(
      MultiTermVectorsShardRequest request, int shardId) throws ElasticsearchException {

    MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
    for (int i = 0; i < request.locations.size(); i++) {
      TermVectorRequest termVectorRequest = request.requests.get(i);

      try {
        IndexService indexService = indicesService.indexServiceSafe(request.index());
        IndexShard indexShard = indexService.shardSafe(shardId);
        TermVectorResponse termVectorResponse =
            indexShard.termVectorService().getTermVector(termVectorRequest);
        response.add(request.locations.get(i), termVectorResponse);
      } catch (Throwable t) {
        if (TransportActions.isShardNotAvailableException(t)) {
          throw (ElasticsearchException) t;
        } else {
          logger.debug(
              "[{}][{}] failed to execute multi term vectors for [{}]/[{}]",
              t,
              request.index(),
              shardId,
              termVectorRequest.type(),
              termVectorRequest.id());
          response.add(
              request.locations.get(i),
              new MultiTermVectorsResponse.Failure(
                  request.index(),
                  termVectorRequest.type(),
                  termVectorRequest.id(),
                  ExceptionsHelper.detailedMessage(t)));
        }
      }
    }

    return response;
  }
  @Override
  protected GetResponse shardOperation(GetRequest request, int shardId)
      throws ElasticsearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(shardId);

    if (request.refresh() && !request.realtime()) {
      indexShard.refresh(new Engine.Refresh("refresh_flag_get").force(REFRESH_FORCE));
    }

    GetResult result =
        indexShard
            .getService()
            .get(
                request.type(),
                request.id(),
                request.fields(),
                request.realtime(),
                request.version(),
                request.versionType(),
                request.fetchSourceContext());
    return new GetResponse(result);
  }
示例#14
0
 public <IFD extends IndexFieldData<?>> IFD getForField(FieldDataType type, String fieldName) {
   final FieldMapper<?> mapper;
   final BuilderContext context =
       new BuilderContext(indexService.settingsService().getSettings(), new ContentPath(1));
   if (type.getType().equals("string")) {
     mapper =
         MapperBuilders.stringField(fieldName)
             .tokenized(false)
             .fieldDataSettings(type.getSettings())
             .build(context);
   } else if (type.getType().equals("float")) {
     mapper =
         MapperBuilders.floatField(fieldName).fieldDataSettings(type.getSettings()).build(context);
   } else if (type.getType().equals("double")) {
     mapper =
         MapperBuilders.doubleField(fieldName)
             .fieldDataSettings(type.getSettings())
             .build(context);
   } else if (type.getType().equals("long")) {
     mapper =
         MapperBuilders.longField(fieldName).fieldDataSettings(type.getSettings()).build(context);
   } else if (type.getType().equals("int")) {
     mapper =
         MapperBuilders.integerField(fieldName)
             .fieldDataSettings(type.getSettings())
             .build(context);
   } else if (type.getType().equals("short")) {
     mapper =
         MapperBuilders.shortField(fieldName).fieldDataSettings(type.getSettings()).build(context);
   } else if (type.getType().equals("byte")) {
     mapper =
         MapperBuilders.byteField(fieldName).fieldDataSettings(type.getSettings()).build(context);
   } else if (type.getType().equals("geo_point")) {
     mapper =
         MapperBuilders.geoPointField(fieldName)
             .fieldDataSettings(type.getSettings())
             .build(context);
   } else if (type.getType().equals("_parent")) {
     mapper = MapperBuilders.parent().type(fieldName).build(context);
   } else if (type.getType().equals("binary")) {
     mapper =
         MapperBuilders.binaryField(fieldName)
             .fieldDataSettings(type.getSettings())
             .build(context);
   } else {
     throw new UnsupportedOperationException(type.getType());
   }
   return ifdService.getForField(mapper);
 }
  private ParsedQuery parseQuery(ExplainRequest request, IndexService indexService) {
    try {
      XContentParser parser = XContentHelper.createParser(request.getSource());
      for (XContentParser.Token token = parser.nextToken();
          token != XContentParser.Token.END_OBJECT;
          token = parser.nextToken()) {
        if (token == XContentParser.Token.FIELD_NAME) {
          String fieldName = parser.currentName();
          if ("query".equals(fieldName)) {
            return indexService.queryParserService().parse(parser);
          } else if ("query_binary".equals(fieldName)) {
            byte[] querySource = parser.binaryValue();
            XContentParser qSourceParser =
                XContentFactory.xContent(querySource).createParser(querySource);
            return indexService.queryParserService().parse(qSourceParser);
          }
        }
      }
    } catch (Exception e) {
      throw new ElasticSearchException("Couldn't parse query from source.", e);
    }

    throw new ElasticSearchException("No query specified");
  }
  @Override
  protected GetResponse shardOperation(GetRequest request, int shardId)
      throws ElasticSearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    BloomCache bloomCache = indexService.cache().bloomCache();
    IndexShard indexShard = indexService.shardSafe(shardId);

    DocumentMapper docMapper = indexService.mapperService().documentMapper(request.type());
    if (docMapper == null) {
      throw new TypeMissingException(new Index(request.index()), request.type());
    }

    if (request.refresh()) {
      indexShard.refresh(new Engine.Refresh(false));
    }

    Engine.Searcher searcher = indexShard.searcher();
    boolean exists = false;
    byte[] source = null;
    Map<String, GetField> fields = null;
    long version = -1;
    try {
      UidField.DocIdAndVersion docIdAndVersion =
          loadCurrentVersionFromIndex(
              bloomCache, searcher, docMapper.uidMapper().term(request.type(), request.id()));
      if (docIdAndVersion != null && docIdAndVersion.docId != Lucene.NO_DOC) {
        if (docIdAndVersion.version > 0) {
          version = docIdAndVersion.version;
        }
        exists = true;
        FieldSelector fieldSelector = buildFieldSelectors(docMapper, request.fields());
        if (fieldSelector != null) {
          Document doc = docIdAndVersion.reader.document(docIdAndVersion.docId, fieldSelector);
          source = extractSource(doc, docMapper);

          for (Object oField : doc.getFields()) {
            Fieldable field = (Fieldable) oField;
            String name = field.name();
            Object value = null;
            FieldMappers fieldMappers = docMapper.mappers().indexName(field.name());
            if (fieldMappers != null) {
              FieldMapper mapper = fieldMappers.mapper();
              if (mapper != null) {
                name = mapper.names().fullName();
                value = mapper.valueForSearch(field);
              }
            }
            if (value == null) {
              if (field.isBinary()) {
                value = field.getBinaryValue();
              } else {
                value = field.stringValue();
              }
            }

            if (fields == null) {
              fields = newHashMapWithExpectedSize(2);
            }

            GetField getField = fields.get(name);
            if (getField == null) {
              getField = new GetField(name, new ArrayList<Object>(2));
              fields.put(name, getField);
            }
            getField.values().add(value);
          }
        }

        // now, go and do the script thingy if needed
        if (request.fields() != null && request.fields().length > 0) {
          SearchLookup searchLookup = null;
          for (String field : request.fields()) {
            String script = null;
            if (field.contains("_source.") || field.contains("doc[")) {
              script = field;
            } else {
              FieldMappers x = docMapper.mappers().smartName(field);
              if (x != null && !x.mapper().stored()) {
                script = "_source." + x.mapper().names().fullName();
              }
            }
            if (script != null) {
              if (searchLookup == null) {
                searchLookup =
                    new SearchLookup(
                        indexService.mapperService(), indexService.cache().fieldData());
              }
              SearchScript searchScript = scriptService.search(searchLookup, "mvel", script, null);
              searchScript.setNextReader(docIdAndVersion.reader);
              searchScript.setNextDocId(docIdAndVersion.docId);

              try {
                Object value = searchScript.run();
                if (fields == null) {
                  fields = newHashMapWithExpectedSize(2);
                }
                GetField getField = fields.get(field);
                if (getField == null) {
                  getField = new GetField(field, new ArrayList<Object>(2));
                  fields.put(field, getField);
                }
                getField.values().add(value);
              } catch (RuntimeException e) {
                if (logger.isTraceEnabled()) {
                  logger.trace("failed to execute get request script field [{}]", e, script);
                }
                // ignore
              }
            }
          }
        }
      }
    } catch (IOException e) {
      throw new ElasticSearchException(
          "Failed to get type [" + request.type() + "] and id [" + request.id() + "]", e);
    } finally {
      searcher.release();
    }
    return new GetResponse(
        request.index(), request.type(), request.id(), version, exists, source, fields);
  }
 public IndexFieldDataService fieldData() {
   return indexService.fieldData();
 }
 public DocSetCache docSetCache() {
   return indexService.cache().docSet();
 }
 public FilterCache filterCache() {
   return indexService.cache().filter();
 }
 public SimilarityService similarityService() {
   return indexService.similarityService();
 }
  @Override
  protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId)
      throws ElasticsearchException {
    IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
    IndexShard indexShard = indexService.shardSafe(shardId.id());
    Term uidTerm =
        new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
    Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
    if (!result.exists()) {
      return new ExplainResponse(shardId.getIndex(), request.type(), request.id(), false);
    }

    SearchContext context =
        new DefaultSearchContext(
            0,
            new ShardSearchRequest(request)
                .types(new String[] {request.type()})
                .filteringAliases(request.filteringAlias())
                .nowInMillis(request.nowInMillis),
            null,
            result.searcher(),
            indexService,
            indexShard,
            scriptService,
            pageCacheRecycler,
            bigArrays,
            threadPool.estimatedTimeInMillisCounter());
    SearchContext.setCurrent(context);

    try {
      context.parsedQuery(indexService.queryParserService().parseQuery(request.source()));
      context.preProcess();
      int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
      Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
      for (RescoreSearchContext ctx : context.rescore()) {
        Rescorer rescorer = ctx.rescorer();
        explanation = rescorer.explain(topLevelDocId, context, ctx, explanation);
      }
      if (request.fields() != null
          || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
        // Advantage is that we're not opening a second searcher to retrieve the _source. Also
        // because we are working in the same searcher in engineGetResult we can be sure that a
        // doc isn't deleted between the initial get and this call.
        GetResult getResult =
            indexShard
                .getService()
                .get(
                    result,
                    request.id(),
                    request.type(),
                    request.fields(),
                    request.fetchSourceContext(),
                    false);
        return new ExplainResponse(
            shardId.getIndex(), request.type(), request.id(), true, explanation, getResult);
      } else {
        return new ExplainResponse(
            shardId.getIndex(), request.type(), request.id(), true, explanation);
      }
    } catch (IOException e) {
      throw new ElasticsearchException("Could not explain", e);
    } finally {
      context.close();
      SearchContext.removeCurrent();
    }
  }
示例#22
0
 protected Nested createNested(Filter parentFilter, Filter childFilter) {
   BitsetFilterCache s = indexService.bitsetFilterCache();
   return new Nested(s.getBitDocIdSetFilter(parentFilter), s.getBitDocIdSetFilter(childFilter));
 }
  private ParsedDocument parseRequest(
      IndexService documentIndexService, PercolateShardRequest request, PercolateContext context)
      throws ElasticsearchException {
    BytesReference source = request.source();
    if (source == null || source.length() == 0) {
      return null;
    }

    // TODO: combine all feature parse elements into one map
    Map<String, ? extends SearchParseElement> hlElements = highlightPhase.parseElements();
    Map<String, ? extends SearchParseElement> facetElements = facetPhase.parseElements();
    Map<String, ? extends SearchParseElement> aggregationElements =
        aggregationPhase.parseElements();

    ParsedDocument doc = null;
    XContentParser parser = null;

    // Some queries (function_score query when for decay functions) rely on a SearchContext being
    // set:
    // We switch types because this context needs to be in the context of the percolate queries in
    // the shard and
    // not the in memory percolate doc
    String[] previousTypes = context.types();
    context.types(new String[] {TYPE_NAME});
    SearchContext.setCurrent(context);
    try {
      parser = XContentFactory.xContent(source).createParser(source);
      String currentFieldName = null;
      XContentParser.Token token;
      while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
          currentFieldName = parser.currentName();
          // we need to check the "doc" here, so the next token will be START_OBJECT which is
          // the actual document starting
          if ("doc".equals(currentFieldName)) {
            if (doc != null) {
              throw new ElasticsearchParseException("Either specify doc or get, not both");
            }

            MapperService mapperService = documentIndexService.mapperService();
            DocumentMapper docMapper =
                mapperService.documentMapperWithAutoCreate(request.documentType());
            doc = docMapper.parse(source(parser).type(request.documentType()).flyweight(true));
            // the document parsing exists the "doc" object, so we need to set the new current
            // field.
            currentFieldName = parser.currentName();
          }
        } else if (token == XContentParser.Token.START_OBJECT) {
          SearchParseElement element = hlElements.get(currentFieldName);
          if (element == null) {
            element = facetElements.get(currentFieldName);
            if (element == null) {
              element = aggregationElements.get(currentFieldName);
            }
          }

          if ("query".equals(currentFieldName)) {
            if (context.percolateQuery() != null) {
              throw new ElasticsearchParseException("Either specify query or filter, not both");
            }
            context.percolateQuery(documentIndexService.queryParserService().parse(parser).query());
          } else if ("filter".equals(currentFieldName)) {
            if (context.percolateQuery() != null) {
              throw new ElasticsearchParseException("Either specify query or filter, not both");
            }
            Filter filter =
                documentIndexService.queryParserService().parseInnerFilter(parser).filter();
            context.percolateQuery(new XConstantScoreQuery(filter));
          } else if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          } else if (element != null) {
            element.parse(parser, context);
          }
        } else if (token == XContentParser.Token.START_ARRAY) {
          if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          }
        } else if (token == null) {
          break;
        } else if (token.isValue()) {
          if ("size".equals(currentFieldName)) {
            context.size(parser.intValue());
            if (context.size() < 0) {
              throw new ElasticsearchParseException(
                  "size is set to ["
                      + context.size()
                      + "] and is expected to be higher or equal to 0");
            }
          } else if ("sort".equals(currentFieldName)) {
            parseSort(parser, context);
          } else if ("track_scores".equals(currentFieldName)
              || "trackScores".equals(currentFieldName)) {
            context.trackScores(parser.booleanValue());
          }
        }
      }

      // We need to get the actual source from the request body for highlighting, so parse the
      // request body again
      // and only get the doc source.
      if (context.highlight() != null) {
        parser.close();
        currentFieldName = null;
        parser = XContentFactory.xContent(source).createParser(source);
        token = parser.nextToken();
        assert token == XContentParser.Token.START_OBJECT;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
          if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
          } else if (token == XContentParser.Token.START_OBJECT) {
            if ("doc".equals(currentFieldName)) {
              BytesStreamOutput bStream = new BytesStreamOutput();
              XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream);
              builder.copyCurrentStructure(parser);
              builder.close();
              doc.setSource(bStream.bytes());
              break;
            } else {
              parser.skipChildren();
            }
          } else if (token == null) {
            break;
          }
        }
      }

    } catch (Throwable e) {
      throw new ElasticsearchParseException("failed to parse request", e);
    } finally {
      context.types(previousTypes);
      SearchContext.removeCurrent();
      if (parser != null) {
        parser.close();
      }
    }

    return doc;
  }
  public PercolateShardResponse percolate(PercolateShardRequest request) {
    IndexService percolateIndexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = percolateIndexService.shardSafe(request.shardId());

    ShardPercolateService shardPercolateService = indexShard.shardPercolateService();
    shardPercolateService.prePercolate();
    long startTime = System.nanoTime();

    SearchShardTarget searchShardTarget =
        new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());
    final PercolateContext context =
        new PercolateContext(
            request,
            searchShardTarget,
            indexShard,
            percolateIndexService,
            cacheRecycler,
            pageCacheRecycler,
            bigArrays,
            scriptService);
    try {
      ParsedDocument parsedDocument = parseRequest(percolateIndexService, request, context);
      if (context.percolateQueries().isEmpty()) {
        return new PercolateShardResponse(context, request.index(), request.shardId());
      }

      if (request.docSource() != null && request.docSource().length() != 0) {
        parsedDocument =
            parseFetchedDoc(
                context, request.docSource(), percolateIndexService, request.documentType());
      } else if (parsedDocument == null) {
        throw new ElasticsearchIllegalArgumentException("Nothing to percolate");
      }

      if (context.percolateQuery() == null
          && (context.trackScores()
              || context.doSort
              || context.facets() != null
              || context.aggregations() != null)) {
        context.percolateQuery(new MatchAllDocsQuery());
      }

      if (context.doSort && !context.limit) {
        throw new ElasticsearchIllegalArgumentException("Can't sort if size isn't specified");
      }

      if (context.highlight() != null && !context.limit) {
        throw new ElasticsearchIllegalArgumentException("Can't highlight if size isn't specified");
      }

      if (context.size() < 0) {
        context.size(0);
      }

      // parse the source either into one MemoryIndex, if it is a single document or index multiple
      // docs if nested
      PercolatorIndex percolatorIndex;
      if (indexShard.mapperService().documentMapper(request.documentType()).hasNestedObjects()) {
        percolatorIndex = multi;
      } else {
        percolatorIndex = single;
      }

      PercolatorType action;
      if (request.onlyCount()) {
        action = context.percolateQuery() != null ? queryCountPercolator : countPercolator;
      } else {
        if (context.doSort) {
          action = topMatchingPercolator;
        } else if (context.percolateQuery() != null) {
          action = context.trackScores() ? scoringPercolator : queryPercolator;
        } else {
          action = matchPercolator;
        }
      }
      context.percolatorTypeId = action.id();

      percolatorIndex.prepare(context, parsedDocument);

      indexShard.readAllowed();
      return action.doPercolate(request, context);
    } finally {
      context.release();
      shardPercolateService.postPercolate(System.nanoTime() - startTime);
    }
  }
  @Override
  protected PrimaryResponse<IndexResponse, IndexRequest> shardOperationOnPrimary(
      ClusterState clusterState, PrimaryOperationRequest shardRequest) {
    final IndexRequest request = shardRequest.request;

    // validate, if routing is required, that we got routing
    IndexMetaData indexMetaData = clusterState.metaData().index(shardRequest.shardId.getIndex());
    MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
    if (mappingMd != null && mappingMd.routing().required()) {
      if (request.routing() == null) {
        throw new RoutingMissingException(
            shardRequest.shardId.getIndex(), request.type(), request.id());
      }
    }

    IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex());
    IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id());
    SourceToParse sourceToParse =
        SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source())
            .type(request.type())
            .id(request.id())
            .routing(request.routing())
            .parent(request.parent())
            .timestamp(request.timestamp())
            .ttl(request.ttl());
    long version;
    boolean created;
    Engine.IndexingOperation op;
    if (request.opType() == IndexRequest.OpType.INDEX) {
      Engine.Index index =
          indexShard.prepareIndex(
              sourceToParse,
              request.version(),
              request.versionType(),
              Engine.Operation.Origin.PRIMARY,
              request.canHaveDuplicates());
      if (index.parsedDoc().mappingsModified()) {
        mappingUpdatedAction.updateMappingOnMaster(
            shardRequest.shardId.getIndex(), index.docMapper(), indexService.indexUUID());
      }
      indexShard.index(index);
      version = index.version();
      op = index;
      created = index.created();
    } else {
      Engine.Create create =
          indexShard.prepareCreate(
              sourceToParse,
              request.version(),
              request.versionType(),
              Engine.Operation.Origin.PRIMARY,
              request.canHaveDuplicates(),
              request.autoGeneratedId());
      if (create.parsedDoc().mappingsModified()) {
        mappingUpdatedAction.updateMappingOnMaster(
            shardRequest.shardId.getIndex(), create.docMapper(), indexService.indexUUID());
      }
      indexShard.create(create);
      version = create.version();
      op = create;
      created = true;
    }
    if (request.refresh()) {
      try {
        indexShard.refresh(new Engine.Refresh("refresh_flag_index").force(false));
      } catch (Throwable e) {
        // ignore
      }
    }

    // update the version on the request, so it will be used for the replicas
    request.version(version);
    request.versionType(request.versionType().versionTypeForReplicationAndRecovery());

    assert request.versionType().validateVersionForWrites(request.version());

    IndexResponse response =
        new IndexResponse(
            shardRequest.shardId.getIndex(), request.type(), request.id(), version, created);
    return new PrimaryResponse<>(shardRequest.request, response, op);
  }
 public MapperService mapperService() {
   return indexService.mapperService();
 }
 public AnalysisService analysisService() {
   return indexService.analysisService();
 }
 public IndexQueryParserService queryParserService() {
   return indexService.queryParserService();
 }
示例#29
0
 public IdCache idCache() {
   return indexService.cache().idCache();
 }
  public <IFD extends IndexFieldData<?>> IFD getForField(FieldMapper<?> mapper) {
    final FieldMapper.Names fieldNames = mapper.names();
    final FieldDataType type = mapper.fieldDataType();
    final boolean docValues = mapper.hasDocValues();
    IndexFieldData<?> fieldData = loadedFieldData.get(fieldNames.indexName());
    if (fieldData == null) {
      synchronized (loadedFieldData) {
        fieldData = loadedFieldData.get(fieldNames.indexName());
        if (fieldData == null) {
          IndexFieldData.Builder builder = null;
          String format = type.getFormat(indexSettings);
          if (format != null
              && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format)
              && !docValues) {
            logger.warn(
                "field ["
                    + fieldNames.fullName()
                    + "] has no doc values, will use default field data format");
            format = null;
          }
          if (format != null) {
            builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format));
            if (builder == null) {
              logger.warn(
                  "failed to find format ["
                      + format
                      + "] for field ["
                      + fieldNames.fullName()
                      + "], will use default");
            }
          }
          if (builder == null && docValues) {
            builder = docValuesBuildersByType.get(type.getType());
          }
          if (builder == null) {
            builder = buildersByType.get(type.getType());
          }
          if (builder == null) {
            throw new ElasticsearchIllegalArgumentException(
                "failed to find field data builder for field "
                    + fieldNames.fullName()
                    + ", and type "
                    + type.getType());
          }

          IndexFieldDataCache cache = fieldDataCaches.get(fieldNames.indexName());
          if (cache == null) {
            //  we default to node level cache, which in turn defaults to be unbounded
            // this means changing the node level settings is simple, just set the bounds there
            String cacheType =
                type.getSettings().get("cache", indexSettings.get("index.fielddata.cache", "node"));
            if ("resident".equals(cacheType)) {
              cache =
                  new IndexFieldDataCache.Resident(
                      indexService, fieldNames, type, indicesFieldDataCacheListener);
            } else if ("soft".equals(cacheType)) {
              cache =
                  new IndexFieldDataCache.Soft(
                      indexService, fieldNames, type, indicesFieldDataCacheListener);
            } else if ("node".equals(cacheType)) {
              cache =
                  indicesFieldDataCache.buildIndexFieldDataCache(
                      indexService, index, fieldNames, type);
            } else {
              throw new ElasticsearchIllegalArgumentException(
                  "cache type not supported ["
                      + cacheType
                      + "] for field ["
                      + fieldNames.fullName()
                      + "]");
            }
            fieldDataCaches.put(fieldNames.indexName(), cache);
          }

          GlobalOrdinalsBuilder globalOrdinalBuilder =
              new InternalGlobalOrdinalsBuilder(index(), indexSettings);
          fieldData =
              builder.build(
                  index,
                  indexSettings,
                  mapper,
                  cache,
                  circuitBreakerService,
                  indexService.mapperService(),
                  globalOrdinalBuilder);
          loadedFieldData.put(fieldNames.indexName(), fieldData);
        }
      }
    }
    return (IFD) fieldData;
  }