Exemplo n.º 1
0
 public PercolateContext(
     PercolateShardRequest request,
     SearchShardTarget searchShardTarget,
     IndexShard indexShard,
     IndexService indexService,
     PageCacheRecycler pageCacheRecycler,
     BigArrays bigArrays,
     ScriptService scriptService,
     Query aliasFilter,
     ParseFieldMatcher parseFieldMatcher) {
   super(parseFieldMatcher, request);
   this.indexShard = indexShard;
   this.indexService = indexService;
   this.fieldDataService = indexService.fieldData();
   this.searchShardTarget = searchShardTarget;
   this.percolateQueryRegistry = indexShard.percolateRegistry();
   this.types = new String[] {request.documentType()};
   this.pageCacheRecycler = pageCacheRecycler;
   this.bigArrays = bigArrays.withCircuitBreaking();
   this.querySearchResult = new QuerySearchResult(0, searchShardTarget);
   this.engineSearcher = indexShard.acquireSearcher("percolate");
   this.searcher = new ContextIndexSearcher(this, engineSearcher);
   this.scriptService = scriptService;
   this.numberOfShards = request.getNumberOfShards();
   this.aliasFilter = aliasFilter;
   this.startTime = request.getStartTime();
 }
  @Override
  protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
    IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
    IndexShard indexShard = indexService.getShard(request.shardId().id());
    final QueryShardContext queryShardContext = indexService.newQueryShardContext();
    queryShardContext.setTypes(request.types());

    boolean valid;
    String explanation = null;
    String error = null;
    Engine.Searcher searcher = indexShard.acquireSearcher("validate_query");

    DefaultSearchContext searchContext =
        new DefaultSearchContext(
            0,
            new ShardSearchLocalRequest(
                request.types(), request.nowInMillis(), request.filteringAliases()),
            null,
            searcher,
            indexService,
            indexShard,
            scriptService,
            pageCacheRecycler,
            bigArrays,
            threadPool.estimatedTimeInMillisCounter(),
            parseFieldMatcher,
            SearchService.NO_TIMEOUT);
    SearchContext.setCurrent(searchContext);
    try {
      searchContext.parsedQuery(queryShardContext.toQuery(request.query()));
      searchContext.preProcess();

      valid = true;
      if (request.rewrite()) {
        explanation = getRewrittenQuery(searcher.searcher(), searchContext.query());
      } else if (request.explain()) {
        explanation = searchContext.filteredQuery().query().toString();
      }
    } catch (QueryShardException | ParsingException e) {
      valid = false;
      error = e.getDetailedMessage();
    } catch (AssertionError | IOException e) {
      valid = false;
      error = e.getMessage();
    } finally {
      searchContext.close();
      SearchContext.removeCurrent();
    }

    return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error);
  }
 private Set<Uid> getShardDocUIDs(final IndexShard shard) throws IOException {
   shard.refresh("get_uids");
   try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
     Set<Uid> ids = new HashSet<>();
     for (LeafReaderContext leafContext : searcher.reader().leaves()) {
       LeafReader reader = leafContext.reader();
       Bits liveDocs = reader.getLiveDocs();
       for (int i = 0; i < reader.maxDoc(); i++) {
         if (liveDocs == null || liveDocs.get(i)) {
           Document uuid = reader.document(i, Collections.singleton(UidFieldMapper.NAME));
           ids.add(Uid.createUid(uuid.get(UidFieldMapper.NAME)));
         }
       }
     }
     return ids;
   }
 }
 @Override
 protected ShardSuggestResponse shardOperation(ShardSuggestRequest request)
     throws ElasticsearchException {
   IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
   IndexShard indexShard = indexService.shardSafe(request.shardId().id());
   final Engine.Searcher searcher = indexShard.acquireSearcher("suggest");
   ShardSuggestService shardSuggestService = indexShard.shardSuggestService();
   shardSuggestService.preSuggest();
   long startTime = System.nanoTime();
   XContentParser parser = null;
   try {
     BytesReference suggest = request.suggest();
     if (suggest != null && suggest.length() > 0) {
       parser = XContentFactory.xContent(suggest).createParser(suggest);
       if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
         throw new ElasticsearchIllegalArgumentException("suggest content missing");
       }
       final SuggestionSearchContext context =
           suggestPhase
               .parseElement()
               .parseInternal(
                   parser,
                   indexService.mapperService(),
                   request.shardId().getIndex(),
                   request.shardId().id());
       final Suggest result = suggestPhase.execute(context, searcher.reader());
       return new ShardSuggestResponse(request.shardId(), result);
     }
     return new ShardSuggestResponse(request.shardId(), new Suggest());
   } catch (Throwable ex) {
     throw new ElasticsearchException("failed to execute suggest", ex);
   } finally {
     searcher.close();
     if (parser != null) {
       parser.close();
     }
     shardSuggestService.postSuggest(System.nanoTime() - startTime);
   }
 }
  private void purgeShards(List<IndexShard> shardsToPurge) {
    for (IndexShard shardToPurge : shardsToPurge) {
      Query query =
          shardToPurge
              .mapperService()
              .fullName(TTLFieldMapper.NAME)
              .rangeQuery(null, System.currentTimeMillis(), false, true);
      Engine.Searcher searcher = shardToPurge.acquireSearcher("indices_ttl");
      try {
        logger.debug(
            "[{}][{}] purging shard",
            shardToPurge.routingEntry().index(),
            shardToPurge.routingEntry().id());
        ExpiredDocsCollector expiredDocsCollector = new ExpiredDocsCollector();
        searcher.searcher().search(query, expiredDocsCollector);
        List<DocToPurge> docsToPurge = expiredDocsCollector.getDocsToPurge();

        BulkRequest bulkRequest = new BulkRequest();
        for (DocToPurge docToPurge : docsToPurge) {

          bulkRequest.add(
              new DeleteRequest()
                  .index(shardToPurge.routingEntry().getIndexName())
                  .type(docToPurge.type)
                  .id(docToPurge.id)
                  .version(docToPurge.version)
                  .routing(docToPurge.routing));
          bulkRequest = processBulkIfNeeded(bulkRequest, false);
        }
        processBulkIfNeeded(bulkRequest, true);
      } catch (Exception e) {
        logger.warn("failed to purge", e);
      } finally {
        searcher.close();
      }
    }
  }
  @Override
  protected ShardCountResponse shardOperation(ShardCountRequest request)
      throws ElasticsearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
    IndexShard indexShard = indexService.shardSafe(request.shardId().id());

    SearchShardTarget shardTarget =
        new SearchShardTarget(
            clusterService.localNode().id(), request.shardId().getIndex(), request.shardId().id());
    SearchContext context =
        new DefaultSearchContext(
            0,
            new ShardSearchLocalRequest(
                request.types(), request.nowInMillis(), request.filteringAliases()),
            shardTarget,
            indexShard.acquireSearcher("count"),
            indexService,
            indexShard,
            scriptService,
            cacheRecycler,
            pageCacheRecycler,
            bigArrays,
            threadPool.estimatedTimeInMillisCounter());
    SearchContext.setCurrent(context);

    try {
      // TODO: min score should move to be "null" as a value that is not initialized...
      if (request.minScore() != -1) {
        context.minimumScore(request.minScore());
      }
      BytesReference source = request.querySource();
      if (source != null && source.length() > 0) {
        try {
          QueryParseContext.setTypes(request.types());
          context.parsedQuery(indexService.queryParserService().parseQuery(source));
        } finally {
          QueryParseContext.removeTypes();
        }
      }
      final boolean hasTerminateAfterCount = request.terminateAfter() != DEFAULT_TERMINATE_AFTER;
      boolean terminatedEarly = false;
      context.preProcess();
      try {
        long count;
        if (hasTerminateAfterCount) {
          final Lucene.EarlyTerminatingCollector countCollector =
              Lucene.createCountBasedEarlyTerminatingCollector(request.terminateAfter());
          terminatedEarly =
              Lucene.countWithEarlyTermination(context.searcher(), context.query(), countCollector);
          count = countCollector.count();
        } else {
          count = Lucene.count(context.searcher(), context.query());
        }
        return new ShardCountResponse(request.shardId(), count, terminatedEarly);
      } catch (Exception e) {
        throw new QueryPhaseExecutionException(context, "failed to execute count", e);
      }
    } finally {
      // this will also release the index searcher
      context.close();
      SearchContext.removeCurrent();
    }
  }
Exemplo n.º 7
0
  final SearchContext createContext(
      ShardSearchRequest request, @Nullable Engine.Searcher searcher) {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(request.shardId());

    SearchShardTarget shardTarget =
        new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());

    Engine.Searcher engineSearcher =
        searcher == null ? indexShard.acquireSearcher("search") : searcher;
    SearchContext context =
        new DefaultSearchContext(
            idGenerator.incrementAndGet(),
            request,
            shardTarget,
            engineSearcher,
            indexService,
            indexShard,
            scriptService,
            pageCacheRecycler,
            bigArrays,
            threadPool.estimatedTimeInMillisCounter());
    SearchContext.setCurrent(context);
    try {
      context.scroll(request.scroll());

      parseTemplate(request);
      parseSource(context, request.source());
      parseSource(context, request.extraSource());

      // if the from and size are still not set, default them
      if (context.from() == -1) {
        context.from(0);
      }
      if (context.searchType() == SearchType.COUNT) {
        // so that the optimizations we apply to size=0 also apply to search_type=COUNT
        // and that we close contexts when done with the query phase
        context.searchType(SearchType.QUERY_THEN_FETCH);
        context.size(0);
      } else if (context.size() == -1) {
        context.size(10);
      }

      // pre process
      dfsPhase.preProcess(context);
      queryPhase.preProcess(context);
      fetchPhase.preProcess(context);

      // compute the context keep alive
      long keepAlive = defaultKeepAlive;
      if (request.scroll() != null && request.scroll().keepAlive() != null) {
        keepAlive = request.scroll().keepAlive().millis();
      }
      context.keepAlive(keepAlive);
    } catch (Throwable e) {
      context.close();
      throw ExceptionsHelper.convertToRuntime(e);
    }

    return context;
  }