Пример #1
0
 @Override
 protected ShardSuggestResponse shardOperation(ShardSuggestRequest request)
     throws ElasticSearchException {
   IndexService indexService = indicesService.indexServiceSafe(request.index());
   IndexShard indexShard = indexService.shardSafe(request.shardId());
   final Engine.Searcher searcher = indexShard.searcher();
   XContentParser parser = null;
   try {
     BytesReference suggest = request.suggest();
     if (suggest != null && suggest.length() > 0) {
       parser = XContentFactory.xContent(suggest).createParser(suggest);
       if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
         throw new ElasticSearchIllegalArgumentException("suggest content missing");
       }
       final SuggestionSearchContext context =
           suggestPhase
               .parseElement()
               .parseInternal(
                   parser, indexService.mapperService(), request.index(), request.shardId());
       final Suggest result = suggestPhase.execute(context, searcher.reader());
       return new ShardSuggestResponse(request.index(), request.shardId(), result);
     }
     return new ShardSuggestResponse(request.index(), request.shardId(), new Suggest());
   } catch (Throwable ex) {
     throw new ElasticSearchException("failed to execute suggest", ex);
   } finally {
     searcher.release();
     if (parser != null) {
       parser.close();
     }
   }
 }
  @Override
  protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
    ShardDeleteRequest request = shardRequest.request;
    IndexShard indexShard =
        indicesService
            .indexServiceSafe(shardRequest.request.index())
            .shardSafe(shardRequest.shardId);
    Engine.Delete delete =
        indexShard
            .prepareDelete(request.type(), request.id(), request.version())
            .origin(Engine.Operation.Origin.REPLICA);

    // IndexDeleteAction doesn't support version type at the moment. Hard coded for the INTERNAL
    // version
    delete.versionType(VersionType.INTERNAL.versionTypeForReplicationAndRecovery());

    assert delete.versionType().validateVersion(delete.version());

    indexShard.delete(delete);

    if (request.refresh()) {
      try {
        indexShard.refresh(new Engine.Refresh("refresh_flag_delete").force(false));
      } catch (Exception e) {
        // ignore
      }
    }
  }
  @Override
  protected PrimaryResponse<ShardDeleteResponse, ShardDeleteRequest> shardOperationOnPrimary(
      ClusterState clusterState, PrimaryOperationRequest shardRequest) {
    ShardDeleteRequest request = shardRequest.request;
    IndexShard indexShard =
        indicesService
            .indexServiceSafe(shardRequest.request.index())
            .shardSafe(shardRequest.shardId);
    Engine.Delete delete =
        indexShard
            .prepareDelete(request.type(), request.id(), request.version())
            .origin(Engine.Operation.Origin.PRIMARY);
    indexShard.delete(delete);
    // update the version to happen on the replicas
    request.version(delete.version());

    if (request.refresh()) {
      try {
        indexShard.refresh(new Engine.Refresh("refresh_flag_delete").force(false));
      } catch (Exception e) {
        // ignore
      }
    }

    ShardDeleteResponse response = new ShardDeleteResponse(delete.version(), delete.found());
    return new PrimaryResponse<ShardDeleteResponse, ShardDeleteRequest>(
        shardRequest.request, response, null);
  }
  @Inject
  public PercolatorService(
      Index index,
      @IndexSettings Settings indexSettings,
      IndicesService indicesService,
      PercolatorExecutor percolator) {
    super(index, indexSettings);
    this.indicesService = indicesService;
    this.percolator = percolator;
    this.shardLifecycleListener = new ShardLifecycleListener();
    this.indicesService.indicesLifecycle().addListener(shardLifecycleListener);
    this.percolator.setIndicesService(indicesService);

    // if percolator is already allocated, make sure to register real time percolation
    if (percolatorAllocated()) {
      IndexService percolatorIndexService = percolatorIndexService();
      if (percolatorIndexService != null) {
        for (IndexShard indexShard : percolatorIndexService) {
          try {
            indexShard.addListener(realTimePercolatorOperationListener);
          } catch (Exception e) {
            // ignore
          }
        }
      }
    }
  }
Пример #5
0
 private void loadQueries(IndexShard shard) {
   try {
     shard.refresh(new Engine.Refresh("percolator_load_queries").force(true));
     // Maybe add a mode load? This isn't really a write. We need write b/c state=post_recovery
     Engine.Searcher searcher =
         shard.acquireSearcher("percolator_load_queries", IndexShard.Mode.WRITE);
     try {
       Query query =
           new XConstantScoreQuery(
               indexCache
                   .filter()
                   .cache(
                       new TermFilter(
                           new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME))));
       QueriesLoaderCollector queryCollector =
           new QueriesLoaderCollector(
               PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
       searcher.searcher().search(query, queryCollector);
       Map<HashedBytesRef, Query> queries = queryCollector.queries();
       for (Map.Entry<HashedBytesRef, Query> entry : queries.entrySet()) {
         Query previousQuery = percolateQueries.put(entry.getKey(), entry.getValue());
         shardPercolateService.addedQuery(entry.getKey(), previousQuery, entry.getValue());
       }
     } finally {
       searcher.release();
     }
   } catch (Exception e) {
     throw new PercolatorException(
         shardId.index(), "failed to load queries from percolator index", e);
   }
 }
  private Fields generateTermVectorsFromDoc(TermVectorRequest request, boolean doAllFields)
      throws IOException {
    // parse the document, at the moment we do update the mapping, just like percolate
    ParsedDocument parsedDocument =
        parseDocument(indexShard.shardId().getIndex(), request.type(), request.doc());

    // select the right fields and generate term vectors
    ParseContext.Document doc = parsedDocument.rootDoc();
    Collection<String> seenFields = new HashSet<>();
    Collection<GetField> getFields = new HashSet<>();
    for (IndexableField field : doc.getFields()) {
      FieldMapper fieldMapper = indexShard.mapperService().smartNameFieldMapper(field.name());
      if (seenFields.contains(field.name())) {
        continue;
      } else {
        seenFields.add(field.name());
      }
      if (!isValidField(fieldMapper)) {
        continue;
      }
      if (request.selectedFields() == null
          && !doAllFields
          && !fieldMapper.fieldType().storeTermVectors()) {
        continue;
      }
      if (request.selectedFields() != null && !request.selectedFields().contains(field.name())) {
        continue;
      }
      String[] values = doc.getValues(field.name());
      getFields.add(new GetField(field.name(), Arrays.asList((Object[]) values)));
    }
    return generateTermVectors(getFields, request.offsets(), request.perFieldAnalyzer());
  }
 @Override
 protected ShardRefreshResponse shardOperation(ShardRefreshRequest request)
     throws ElasticSearchException {
   IndexShard indexShard =
       indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId());
   indexShard.refresh(new Engine.Refresh(request.waitForOperations()));
   return new ShardRefreshResponse(request.index(), request.shardId());
 }
Пример #8
0
 @Override
 public void afterIndexShardCreated(IndexShard indexShard) {
   // add a listener that will update based on changes done to the _percolate index
   // the relevant indices with loaded queries
   if (indexShard.shardId().index().name().equals(INDEX_NAME)) {
     indexShard.addListener(realTimePercolatorOperationListener);
   }
 }
  protected ExplainResponse shardOperation(ExplainRequest request, int shardId)
      throws ElasticSearchException {
    IndexService indexService = indicesService.indexService(request.index());
    IndexShard indexShard = indexService.shardSafe(shardId);
    Term uidTerm =
        new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.getType(), request.getId()));
    Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
    if (!result.exists()) {
      return new ExplainResponse(false);
    }

    SearchContext context =
        new SearchContext(
            0,
            new ShardSearchRequest()
                .types(new String[] {request.getType()})
                .filteringAliases(request.getFilteringAlias()),
            null,
            result.searcher(),
            indexService,
            indexShard,
            scriptService);
    SearchContext.setCurrent(context);

    try {
      context.parsedQuery(parseQuery(request, indexService));
      context.preProcess();
      int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().reader.docBase;
      Explanation explanation;
      if (context.rescore() != null) {
        RescoreSearchContext ctx = context.rescore();
        Rescorer rescorer = ctx.rescorer();
        explanation = rescorer.explain(topLevelDocId, context, ctx);
      } else {
        explanation = context.searcher().explain(context.query(), topLevelDocId);
      }
      if (request.getFields() != null) {
        if (request.getFields().length == 1 && "_source".equals(request.getFields()[0])) {
          request.setFields(null); // Load the _source field
        }
        // Advantage is that we're not opening a second searcher to retrieve the _source. Also
        // because we are working in the same searcher in engineGetResult we can be sure that a
        // doc isn't deleted between the initial get and this call.
        GetResult getResult =
            indexShard
                .getService()
                .get(result, request.getId(), request.getType(), request.getFields());
        return new ExplainResponse(true, explanation, getResult);
      } else {
        return new ExplainResponse(true, explanation);
      }
    } catch (IOException e) {
      throw new ElasticSearchException("Could not explain", e);
    } finally {
      context.release();
      SearchContext.removeCurrent();
    }
  }
 @Override
 protected ShardFlushResponse shardOperation(ShardFlushRequest request)
     throws ElasticSearchException {
   IndexShard indexShard =
       indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId());
   indexShard.flush(
       new Engine.Flush().refresh(request.refresh()).full(request.full()).force(request.force()));
   return new ShardFlushResponse(request.index(), request.shardId());
 }
  @Override
  protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, int shardId)
      throws ElasticSearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(shardId);

    if (request.refresh() && !request.realtime()) {
      indexShard.refresh(
          new Engine.Refresh("refresh_flag_mget").force(TransportGetAction.REFRESH_FORCE));
    }

    MultiGetShardResponse response = new MultiGetShardResponse();
    for (int i = 0; i < request.locations.size(); i++) {
      String type = request.types.get(i);
      String id = request.ids.get(i);
      String[] fields = request.fields.get(i);

      long version = request.versions.get(i);
      VersionType versionType = request.versionTypes.get(i);
      if (versionType == null) {
        versionType = VersionType.INTERNAL;
      }

      FetchSourceContext fetchSourceContext = request.fetchSourceContexts.get(i);
      try {
        GetResult getResult =
            indexShard
                .getService()
                .get(
                    type, id, fields, request.realtime(), version, versionType, fetchSourceContext);
        response.add(request.locations.get(i), new GetResponse(getResult));
      } catch (Throwable t) {
        if (TransportActions.isShardNotAvailableException(t)) {
          throw (ElasticSearchException) t;
        } else {
          logger.debug(
              "[{}][{}] failed to execute multi_get for [{}]/[{}]",
              t,
              request.index(),
              shardId,
              type,
              id);
          response.add(
              request.locations.get(i),
              new MultiGetResponse.Failure(
                  request.index(), type, id, ExceptionsHelper.detailedMessage(t)));
        }
      }
    }

    return response;
  }
  private ParsedDocument parseDocument(String index, String type, BytesReference doc) {
    MapperService mapperService = indexShard.mapperService();
    IndexService indexService = indexShard.indexService();

    // TODO: make parsing not dynamically create fields not in the original mapping
    Tuple<DocumentMapper, Boolean> docMapper = mapperService.documentMapperWithAutoCreate(type);
    ParsedDocument parsedDocument =
        docMapper.v1().parse(source(doc).type(type).flyweight(true)).setMappingsModified(docMapper);
    if (parsedDocument.mappingsModified()) {
      mappingUpdatedAction.updateMappingOnMaster(index, docMapper.v1(), indexService.indexUUID());
    }
    return parsedDocument;
  }
 @Override
 public void afterIndexShardStarted(IndexShard indexShard) {
   if (indexShard.shardId().index().name().equals(INDEX_NAME)) {
     // percolator index has started, fetch what we can from it and initialize the indices
     // we have
     synchronized (mutex) {
       if (initialQueriesFetchDone) {
         return;
       }
       // we load the queries for all existing indices
       for (IndexService indexService : indicesService) {
         // only load queries for "this" index percolator service
         if (indexService.index().equals(index())) {
           logger.debug(
               "loading percolator queries for index [{}]...", indexService.index().name());
           loadQueries(indexService.index().name());
           logger.trace(
               "done loading percolator queries for index [{}]", indexService.index().name());
         }
       }
       initialQueriesFetchDone = true;
     }
   }
   if (!indexShard.shardId().index().equals(index())) {
     // not our index, bail
     return;
   }
   if (!percolatorAllocated()) {
     return;
   }
   // we are only interested when the first shard on this node has been created for an index
   // when it does, fetch the relevant queries if not fetched already
   IndexService indexService = indicesService.indexService(indexShard.shardId().index().name());
   if (indexService == null) {
     return;
   }
   if (indexService.numberOfShards() != 1) {
     return;
   }
   synchronized (mutex) {
     if (initialQueriesFetchDone) {
       return;
     }
     // we load queries for this index
     logger.debug("loading percolator queries for index [{}]...", indexService.index().name());
     loadQueries(index.name());
     logger.trace("done loading percolator queries for index [{}]", indexService.index().name());
     initialQueriesFetchDone = true;
   }
 }
 @Override
 protected ShardOptimizeResponse shardOperation(ShardOptimizeRequest request)
     throws ElasticsearchException {
   IndexShard indexShard =
       indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId());
   indexShard.optimize(
       new Engine.Optimize()
           .waitForMerge(request.waitForMerge())
           .maxNumSegments(request.maxNumSegments())
           .onlyExpungeDeletes(request.onlyExpungeDeletes())
           .flush(request.flush())
           .force(request.force()));
   return new ShardOptimizeResponse(request.index(), request.shardId());
 }
  private Fields addGeneratedTermVectors(
      Engine.GetResult get,
      Fields termVectorsByField,
      TermVectorRequest request,
      Set<String> selectedFields)
      throws IOException {
    /* only keep valid fields */
    Set<String> validFields = new HashSet<>();
    for (String field : selectedFields) {
      FieldMapper fieldMapper = indexShard.mapperService().smartNameFieldMapper(field);
      if (!isValidField(fieldMapper)) {
        continue;
      }
      // already retrieved, only if the analyzer hasn't been overridden at the field
      if (fieldMapper.fieldType().storeTermVectors()
          && (request.perFieldAnalyzer() == null
              || !request.perFieldAnalyzer().containsKey(field))) {
        continue;
      }
      validFields.add(field);
    }

    if (validFields.isEmpty()) {
      return termVectorsByField;
    }

    /* generate term vectors from fetched document fields */
    GetResult getResult =
        indexShard
            .getService()
            .get(
                get,
                request.id(),
                request.type(),
                validFields.toArray(Strings.EMPTY_ARRAY),
                null,
                false);
    Fields generatedTermVectors =
        generateTermVectors(
            getResult.getFields().values(), request.offsets(), request.perFieldAnalyzer());

    /* merge with existing Fields */
    if (termVectorsByField == null) {
      return generatedTermVectors;
    } else {
      return mergeFields(termVectorsByField, generatedTermVectors);
    }
  }
Пример #16
0
  public void close() {
    this.indicesService.indicesLifecycle().removeListener(shardLifecycleListener);

    // clean up any index that has registered real time updated from the percolator shards allocated
    // on this node
    IndexService percolatorIndexService = percolatorIndexService();
    if (percolatorIndexService != null) {
      for (IndexShard indexShard : percolatorIndexService) {
        try {
          indexShard.removeListener(realTimePercolatorOperationListener);
        } catch (Exception e) {
          // ignore
        }
      }
    }
  }
 private void handleFieldWildcards(TermVectorRequest request) {
   Set<String> fieldNames = new HashSet<>();
   for (String pattern : request.selectedFields()) {
     fieldNames.addAll(indexShard.mapperService().simpleMatchToIndexNames(pattern));
   }
   request.selectedFields(fieldNames.toArray(Strings.EMPTY_ARRAY));
 }
Пример #18
0
 private void loadQueries(String indexName) {
   IndexService indexService = percolatorIndexService();
   IndexShard shard = indexService.shard(0);
   Engine.Searcher searcher = shard.searcher();
   try {
     // create a query to fetch all queries that are registered under the index name (which is the
     // type
     // in the percolator).
     Query query = new DeletionAwareConstantScoreQuery(indexQueriesFilter(indexName));
     QueriesLoaderCollector queries = new QueriesLoaderCollector();
     searcher.searcher().search(query, queries);
     percolator.addQueries(queries.queries());
   } catch (IOException e) {
     throw new PercolatorException(index, "failed to load queries from percolator index");
   } finally {
     searcher.release();
   }
 }
  @Override
  protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
    IndexShard indexShard =
        indicesService
            .indexServiceSafe(shardRequest.request.index())
            .shardSafe(shardRequest.shardId);
    final IngestShardRequest request = shardRequest.request;
    int size = request.items().size();
    for (int i = 0; i < size; i++) {
      IngestItemRequest item = request.items().get(i);
      if (item == null) {
        continue;
      }
      if (item.request() instanceof IndexRequest) {
        IndexRequest indexRequest = (IndexRequest) item.request();
        try {
          SourceToParse sourceToParse =
              SourceToParse.source(SourceToParse.Origin.REPLICA, indexRequest.source())
                  .type(indexRequest.type())
                  .id(indexRequest.id())
                  .routing(indexRequest.routing())
                  .parent(indexRequest.parent())
                  .timestamp(indexRequest.timestamp())
                  .ttl(indexRequest.ttl());

          if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
            Engine.Index index =
                indexShard
                    .prepareIndex(sourceToParse)
                    .version(indexRequest.version())
                    .origin(Engine.Operation.Origin.REPLICA);
            indexShard.index(index);
          } else {
            Engine.Create create =
                indexShard
                    .prepareCreate(sourceToParse)
                    .version(indexRequest.version())
                    .origin(Engine.Operation.Origin.REPLICA);
            indexShard.create(create);
          }
        } catch (Exception e) {
          // ignore, we are on backup
        }
      } else if (item.request() instanceof DeleteRequest) {
        DeleteRequest deleteRequest = (DeleteRequest) item.request();
        try {
          Engine.Delete delete =
              indexShard
                  .prepareDelete(deleteRequest.type(), deleteRequest.id(), deleteRequest.version())
                  .origin(Engine.Operation.Origin.REPLICA);
          indexShard.delete(delete);
        } catch (Exception e) {
          // ignore, we are on backup
        }
      }
    }
  }
  @Override
  protected MultiTermVectorsShardResponse shardOperation(
      MultiTermVectorsShardRequest request, int shardId) throws ElasticsearchException {

    MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
    for (int i = 0; i < request.locations.size(); i++) {
      TermVectorRequest termVectorRequest = request.requests.get(i);

      try {
        IndexService indexService = indicesService.indexServiceSafe(request.index());
        IndexShard indexShard = indexService.shardSafe(shardId);
        TermVectorResponse termVectorResponse =
            indexShard.termVectorService().getTermVector(termVectorRequest);
        response.add(request.locations.get(i), termVectorResponse);
      } catch (Throwable t) {
        if (TransportActions.isShardNotAvailableException(t)) {
          throw (ElasticsearchException) t;
        } else {
          logger.debug(
              "[{}][{}] failed to execute multi term vectors for [{}]/[{}]",
              t,
              request.index(),
              shardId,
              termVectorRequest.type(),
              termVectorRequest.id());
          response.add(
              request.locations.get(i),
              new MultiTermVectorsResponse.Failure(
                  request.index(),
                  termVectorRequest.type(),
                  termVectorRequest.id(),
                  ExceptionsHelper.detailedMessage(t)));
        }
      }
    }

    return response;
  }
  @Override
  protected GetResponse shardOperation(GetRequest request, int shardId)
      throws ElasticsearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(shardId);

    if (request.refresh() && !request.realtime()) {
      indexShard.refresh(new Engine.Refresh("refresh_flag_get").force(REFRESH_FORCE));
    }

    GetResult result =
        indexShard
            .getService()
            .get(
                request.type(),
                request.id(),
                request.fields(),
                request.realtime(),
                request.version(),
                request.versionType(),
                request.fetchSourceContext());
    return new GetResponse(result);
  }
 private Analyzer getAnalyzerAtField(
     String field, @Nullable Map<String, String> perFieldAnalyzer) {
   MapperService mapperService = indexShard.mapperService();
   Analyzer analyzer;
   if (perFieldAnalyzer != null && perFieldAnalyzer.containsKey(field)) {
     analyzer = mapperService.analysisService().analyzer(perFieldAnalyzer.get(field).toString());
   } else {
     analyzer = mapperService.smartNameFieldMapper(field).indexAnalyzer();
   }
   if (analyzer == null) {
     analyzer = mapperService.analysisService().defaultIndexAnalyzer();
   }
   return analyzer;
 }
 @Override
 public void indexShardStateChanged(
     IndexShard indexShard,
     @Nullable IndexShardState previousState,
     IndexShardState newState,
     @Nullable String reason) {
   List<IndexShardState> shardStates =
       this.shardStates.putIfAbsent(
           indexShard.shardId(),
           new CopyOnWriteArrayList<IndexShardState>(new IndexShardState[] {newState}));
   if (shardStates != null) {
     shardStates.add(newState);
   }
 }
 @Override
 protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
   IndexShard indexShard =
       indicesService
           .indexServiceSafe(shardRequest.shardId.getIndex())
           .shardSafe(shardRequest.shardId.id());
   IndexRequest request = shardRequest.request;
   SourceToParse sourceToParse =
       SourceToParse.source(SourceToParse.Origin.REPLICA, request.source())
           .type(request.type())
           .id(request.id())
           .routing(request.routing())
           .parent(request.parent())
           .timestamp(request.timestamp())
           .ttl(request.ttl());
   if (request.opType() == IndexRequest.OpType.INDEX) {
     Engine.Index index =
         indexShard.prepareIndex(
             sourceToParse,
             request.version(),
             request.versionType(),
             Engine.Operation.Origin.REPLICA,
             request.canHaveDuplicates());
     indexShard.index(index);
   } else {
     Engine.Create create =
         indexShard.prepareCreate(
             sourceToParse,
             request.version(),
             request.versionType(),
             Engine.Operation.Origin.REPLICA,
             request.canHaveDuplicates(),
             request.autoGeneratedId());
     indexShard.create(create);
   }
   if (request.refresh()) {
     try {
       indexShard.refresh(new Engine.Refresh("refresh_flag_index").force(false));
     } catch (Exception e) {
       // ignore
     }
   }
 }
  @Override
  protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId)
      throws ElasticsearchException {
    IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
    IndexShard indexShard = indexService.shardSafe(shardId.id());
    Term uidTerm =
        new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
    Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
    if (!result.exists()) {
      return new ExplainResponse(shardId.getIndex(), request.type(), request.id(), false);
    }

    SearchContext context =
        new DefaultSearchContext(
            0,
            new ShardSearchRequest(request)
                .types(new String[] {request.type()})
                .filteringAliases(request.filteringAlias())
                .nowInMillis(request.nowInMillis),
            null,
            result.searcher(),
            indexService,
            indexShard,
            scriptService,
            pageCacheRecycler,
            bigArrays,
            threadPool.estimatedTimeInMillisCounter());
    SearchContext.setCurrent(context);

    try {
      context.parsedQuery(indexService.queryParserService().parseQuery(request.source()));
      context.preProcess();
      int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
      Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
      for (RescoreSearchContext ctx : context.rescore()) {
        Rescorer rescorer = ctx.rescorer();
        explanation = rescorer.explain(topLevelDocId, context, ctx, explanation);
      }
      if (request.fields() != null
          || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
        // Advantage is that we're not opening a second searcher to retrieve the _source. Also
        // because we are working in the same searcher in engineGetResult we can be sure that a
        // doc isn't deleted between the initial get and this call.
        GetResult getResult =
            indexShard
                .getService()
                .get(
                    result,
                    request.id(),
                    request.type(),
                    request.fields(),
                    request.fetchSourceContext(),
                    false);
        return new ExplainResponse(
            shardId.getIndex(), request.type(), request.id(), true, explanation, getResult);
      } else {
        return new ExplainResponse(
            shardId.getIndex(), request.type(), request.id(), true, explanation);
      }
    } catch (IOException e) {
      throw new ElasticsearchException("Could not explain", e);
    } finally {
      context.close();
      SearchContext.removeCurrent();
    }
  }
  public TermVectorResponse getTermVector(TermVectorRequest request, String concreteIndex) {
    final Engine.Searcher searcher = indexShard.acquireSearcher("term_vector");
    IndexReader topLevelReader = searcher.reader();
    final TermVectorResponse termVectorResponse =
        new TermVectorResponse(concreteIndex, request.type(), request.id());

    final Term uidTerm =
        new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
    Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), uidTerm));
    boolean docFromTranslog = get.source() != null;
    AggregatedDfs dfs = null;

    /* fetched from translog is treated as an artificial document */
    if (docFromTranslog) {
      request.doc(get.source().source, false);
      termVectorResponse.setDocVersion(get.version());
    }

    /* handle potential wildcards in fields */
    if (request.selectedFields() != null) {
      handleFieldWildcards(request);
    }

    try {
      Fields topLevelFields = MultiFields.getFields(topLevelReader);
      Versions.DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
      /* from an artificial document */
      if (request.doc() != null) {
        Fields termVectorsByField = generateTermVectorsFromDoc(request, !docFromTranslog);
        // if no document indexed in shard, take the queried document itself for stats
        if (topLevelFields == null) {
          topLevelFields = termVectorsByField;
        }
        if (termVectorsByField != null && useDfs(request)) {
          dfs = getAggregatedDfs(termVectorsByField, request);
        }
        termVectorResponse.setFields(
            termVectorsByField, request.selectedFields(), request.getFlags(), topLevelFields, dfs);
        termVectorResponse.setExists(true);
        termVectorResponse.setArtificial(!docFromTranslog);
      }
      /* or from an existing document */
      else if (docIdAndVersion != null) {
        // fields with stored term vectors
        Fields termVectorsByField =
            docIdAndVersion.context.reader().getTermVectors(docIdAndVersion.docId);
        Set<String> selectedFields = request.selectedFields();
        // generate tvs for fields where analyzer is overridden
        if (selectedFields == null && request.perFieldAnalyzer() != null) {
          selectedFields = getFieldsToGenerate(request.perFieldAnalyzer(), termVectorsByField);
        }
        // fields without term vectors
        if (selectedFields != null) {
          termVectorsByField =
              addGeneratedTermVectors(get, termVectorsByField, request, selectedFields);
        }
        if (termVectorsByField != null && useDfs(request)) {
          dfs = getAggregatedDfs(termVectorsByField, request);
        }
        termVectorResponse.setFields(
            termVectorsByField, request.selectedFields(), request.getFlags(), topLevelFields, dfs);
        termVectorResponse.setDocVersion(docIdAndVersion.version);
        termVectorResponse.setExists(true);
      } else {
        termVectorResponse.setExists(false);
      }
    } catch (Throwable ex) {
      throw new ElasticsearchException("failed to execute term vector request", ex);
    } finally {
      searcher.close();
      get.release();
    }
    return termVectorResponse;
  }
  @Override
  protected PrimaryResponse<IndexResponse, IndexRequest> shardOperationOnPrimary(
      ClusterState clusterState, PrimaryOperationRequest shardRequest) {
    final IndexRequest request = shardRequest.request;

    // validate, if routing is required, that we got routing
    IndexMetaData indexMetaData = clusterState.metaData().index(shardRequest.shardId.getIndex());
    MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
    if (mappingMd != null && mappingMd.routing().required()) {
      if (request.routing() == null) {
        throw new RoutingMissingException(
            shardRequest.shardId.getIndex(), request.type(), request.id());
      }
    }

    IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex());
    IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id());
    SourceToParse sourceToParse =
        SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source())
            .type(request.type())
            .id(request.id())
            .routing(request.routing())
            .parent(request.parent())
            .timestamp(request.timestamp())
            .ttl(request.ttl());
    long version;
    boolean created;
    Engine.IndexingOperation op;
    if (request.opType() == IndexRequest.OpType.INDEX) {
      Engine.Index index =
          indexShard.prepareIndex(
              sourceToParse,
              request.version(),
              request.versionType(),
              Engine.Operation.Origin.PRIMARY,
              request.canHaveDuplicates());
      if (index.parsedDoc().mappingsModified()) {
        mappingUpdatedAction.updateMappingOnMaster(
            shardRequest.shardId.getIndex(), index.docMapper(), indexService.indexUUID());
      }
      indexShard.index(index);
      version = index.version();
      op = index;
      created = index.created();
    } else {
      Engine.Create create =
          indexShard.prepareCreate(
              sourceToParse,
              request.version(),
              request.versionType(),
              Engine.Operation.Origin.PRIMARY,
              request.canHaveDuplicates(),
              request.autoGeneratedId());
      if (create.parsedDoc().mappingsModified()) {
        mappingUpdatedAction.updateMappingOnMaster(
            shardRequest.shardId.getIndex(), create.docMapper(), indexService.indexUUID());
      }
      indexShard.create(create);
      version = create.version();
      op = create;
      created = true;
    }
    if (request.refresh()) {
      try {
        indexShard.refresh(new Engine.Refresh("refresh_flag_index").force(false));
      } catch (Throwable e) {
        // ignore
      }
    }

    // update the version on the request, so it will be used for the replicas
    request.version(version);
    request.versionType(request.versionType().versionTypeForReplicationAndRecovery());

    assert request.versionType().validateVersionForWrites(request.version());

    IndexResponse response =
        new IndexResponse(
            shardRequest.shardId.getIndex(), request.type(), request.id(), version, created);
    return new PrimaryResponse<>(shardRequest.request, response, op);
  }
  private void deleteShard(
      int shardId, boolean delete, boolean snapshotGateway, boolean deleteGateway)
      throws ElasticSearchException {
    Injector shardInjector;
    IndexShard indexShard;
    synchronized (this) {
      Map<Integer, Injector> tmpShardInjectors = newHashMap(shardsInjectors);
      shardInjector = tmpShardInjectors.remove(shardId);
      if (shardInjector == null) {
        if (!delete) {
          return;
        }
        throw new IndexShardMissingException(new ShardId(index, shardId));
      }
      shardsInjectors = ImmutableMap.copyOf(tmpShardInjectors);
      if (delete) {
        logger.debug("deleting shard_id [{}]", shardId);
      }

      Map<Integer, IndexShard> tmpShardsMap = newHashMap(shards);
      indexShard = tmpShardsMap.remove(shardId);
      shards = ImmutableMap.copyOf(tmpShardsMap);
    }

    ShardId sId = new ShardId(index, shardId);

    indicesLifecycle.beforeIndexShardClosed(sId, indexShard, delete);

    for (Class<? extends CloseableIndexComponent> closeable : pluginsService.shardServices()) {
      try {
        shardInjector.getInstance(closeable).close(delete);
      } catch (Exception e) {
        logger.debug("failed to clean plugin shard service [{}]", e, closeable);
      }
    }

    try {
      // now we can close the translog service, we need to close it before the we close the shard
      shardInjector.getInstance(TranslogService.class).close();
    } catch (Exception e) {
      // ignore
    }

    // close shard actions
    if (indexShard != null) {
      shardInjector.getInstance(IndexShardManagement.class).close();
    }

    // this logic is tricky, we want to close the engine so we rollback the changes done to it
    // and close the shard so no operations are allowed to it
    if (indexShard != null) {
      indexShard.close();
    }
    try {
      shardInjector.getInstance(Engine.class).close();
    } catch (Exception e) {
      // ignore
    }

    try {
      // now, we can snapshot to the gateway, it will be only the translog
      if (snapshotGateway) {
        shardInjector.getInstance(IndexShardGatewayService.class).snapshotOnClose();
      }
    } catch (Exception e) {
      // ignore
    }
    try {
      shardInjector.getInstance(IndexShardGatewayService.class).close(deleteGateway);
    } catch (Exception e) {
      // ignore
    }
    try {
      // now we can close the translog
      shardInjector.getInstance(Translog.class).close(delete);
    } catch (Exception e) {
      // ignore
    }

    // call this before we close the store, so we can release resources for it
    indicesLifecycle.afterIndexShardClosed(sId, delete);

    // if we delete or have no gateway or the store is not persistent, clean the store...
    Store store = shardInjector.getInstance(Store.class);
    if (delete || indexGateway.type().equals(NoneGateway.TYPE) || !indexStore.persistent()) {
      try {
        store.fullDelete();
      } catch (IOException e) {
        logger.warn("failed to clean store on shard deletion", e);
      }
    }
    // and close it
    try {
      store.close();
    } catch (IOException e) {
      logger.warn("failed to close store on shard deletion", e);
    }

    Injectors.close(injector);

    // delete the shard location if needed
    if (delete || indexGateway.type().equals(NoneGateway.TYPE)) {
      FileSystemUtils.deleteRecursively(nodeEnv.shardLocation(sId));
    }
  }
Пример #29
0
 private boolean hasPercolatorType(IndexShard indexShard) {
   ShardId otherShardId = indexShard.shardId();
   return shardId.equals(otherShardId) && mapperService.hasMapping(PercolatorService.TYPE_NAME);
 }
Пример #30
0
  public PercolateShardResponse percolate(PercolateShardRequest request) {
    IndexService percolateIndexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = percolateIndexService.shardSafe(request.shardId());

    ShardPercolateService shardPercolateService = indexShard.shardPercolateService();
    shardPercolateService.prePercolate();
    long startTime = System.nanoTime();

    SearchShardTarget searchShardTarget =
        new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());
    final PercolateContext context =
        new PercolateContext(
            request,
            searchShardTarget,
            indexShard,
            percolateIndexService,
            cacheRecycler,
            pageCacheRecycler,
            bigArrays,
            scriptService);
    try {
      ParsedDocument parsedDocument = parseRequest(percolateIndexService, request, context);
      if (context.percolateQueries().isEmpty()) {
        return new PercolateShardResponse(context, request.index(), request.shardId());
      }

      if (request.docSource() != null && request.docSource().length() != 0) {
        parsedDocument =
            parseFetchedDoc(
                context, request.docSource(), percolateIndexService, request.documentType());
      } else if (parsedDocument == null) {
        throw new ElasticsearchIllegalArgumentException("Nothing to percolate");
      }

      if (context.percolateQuery() == null
          && (context.trackScores()
              || context.doSort
              || context.facets() != null
              || context.aggregations() != null)) {
        context.percolateQuery(new MatchAllDocsQuery());
      }

      if (context.doSort && !context.limit) {
        throw new ElasticsearchIllegalArgumentException("Can't sort if size isn't specified");
      }

      if (context.highlight() != null && !context.limit) {
        throw new ElasticsearchIllegalArgumentException("Can't highlight if size isn't specified");
      }

      if (context.size() < 0) {
        context.size(0);
      }

      // parse the source either into one MemoryIndex, if it is a single document or index multiple
      // docs if nested
      PercolatorIndex percolatorIndex;
      if (indexShard.mapperService().documentMapper(request.documentType()).hasNestedObjects()) {
        percolatorIndex = multi;
      } else {
        percolatorIndex = single;
      }

      PercolatorType action;
      if (request.onlyCount()) {
        action = context.percolateQuery() != null ? queryCountPercolator : countPercolator;
      } else {
        if (context.doSort) {
          action = topMatchingPercolator;
        } else if (context.percolateQuery() != null) {
          action = context.trackScores() ? scoringPercolator : queryPercolator;
        } else {
          action = matchPercolator;
        }
      }
      context.percolatorTypeId = action.id();

      percolatorIndex.prepare(context, parsedDocument);

      indexShard.readAllowed();
      return action.doPercolate(request, context);
    } finally {
      context.release();
      shardPercolateService.postPercolate(System.nanoTime() - startTime);
    }
  }