예제 #1
0
 public static Version parseAnalysisVersion(
     @IndexSettings Settings indexSettings, Settings settings, ESLogger logger) {
   // check for explicit version on the specific analyzer component
   String sVersion = settings.get("version");
   if (sVersion != null) {
     return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger);
   }
   // check for explicit version on the index itself as default for all analysis components
   sVersion = indexSettings.get("index.analysis.version");
   if (sVersion != null) {
     return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger);
   }
   // resolve the analysis version based on the version the index was created with
   return org.elasticsearch.Version.indexCreated(indexSettings).luceneVersion;
 }
 private FiltersFunctionFactorScorer functionScorer(LeafReaderContext context)
     throws IOException {
   Scorer subQueryScorer = subQueryWeight.scorer(context);
   if (subQueryScorer == null) {
     return null;
   }
   final LeafScoreFunction[] functions = new LeafScoreFunction[filterFunctions.length];
   final Bits[] docSets = new Bits[filterFunctions.length];
   for (int i = 0; i < filterFunctions.length; i++) {
     FilterFunction filterFunction = filterFunctions[i];
     functions[i] = filterFunction.function.getLeafScoreFunction(context);
     Scorer filterScorer = filterWeights[i].scorer(context);
     docSets[i] = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorer);
   }
   return new FiltersFunctionFactorScorer(
       this,
       subQueryScorer,
       scoreMode,
       filterFunctions,
       maxBoost,
       functions,
       docSets,
       combineFunction,
       needsScores);
 }
 /**
  * Restores shard from {@link RestoreSource} associated with this shard in routing table
  *
  * @param recoveryState recovery state
  */
 public void restore(final RecoveryState recoveryState) {
   RestoreSource restoreSource = indexShard.routingEntry().restoreSource();
   if (restoreSource == null) {
     throw new IndexShardRestoreFailedException(shardId, "empty restore source");
   }
   if (logger.isTraceEnabled()) {
     logger.trace("[{}] restoring shard  [{}]", restoreSource.snapshotId(), shardId);
   }
   try {
     recoveryState.getTranslog().totalOperations(0);
     recoveryState.getTranslog().totalOperationsOnStart(0);
     indexShard.prepareForIndexRecovery();
     IndexShardRepository indexShardRepository =
         repositoriesService.indexShardRepository(restoreSource.snapshotId().getRepository());
     ShardId snapshotShardId = shardId;
     if (!shardId.getIndex().equals(restoreSource.index())) {
       snapshotShardId = new ShardId(restoreSource.index(), shardId.id());
     }
     indexShardRepository.restore(
         restoreSource.snapshotId(), shardId, snapshotShardId, recoveryState);
     indexShard.prepareForTranslogRecovery();
     indexShard.finalizeRecovery();
     indexShard.postRecovery("restore done");
     restoreService.indexShardRestoreCompleted(restoreSource.snapshotId(), shardId);
   } catch (Throwable t) {
     if (Lucene.isCorruptionException(t)) {
       restoreService.failRestore(restoreSource.snapshotId(), shardId());
     }
     throw new IndexShardRestoreFailedException(shardId, "restore failed", t);
   }
 }
 /** Read from a stream. */
 public StoreFileMetaData(StreamInput in) throws IOException {
   name = in.readString();
   length = in.readVLong();
   checksum = in.readString();
   // TODO Why not Version.parse?
   writtenBy = Lucene.parseVersionLenient(in.readString(), FIRST_LUCENE_CHECKSUM_VERSION);
   hash = in.readBytesRef();
 }
예제 #5
0
 @Override
 public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel)
     throws Exception {
   try (RecoveriesCollection.StatusRef statusRef =
       onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
     final RecoveryStatus recoveryStatus = statusRef.status();
     recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
     // first, we go and move files that were created with the recovery id suffix to
     // the actual names, its ok if we have a corrupted index here, since we have replicas
     // to recover from in case of a full cluster shutdown just when this code executes...
     recoveryStatus
         .indexShard()
         .deleteShardState(); // we have to delete it first since even if we fail to rename the
                              // shard might be invalid
     recoveryStatus.renameAllTempFiles();
     final Store store = recoveryStatus.store();
     // now write checksums
     recoveryStatus.legacyChecksums().write(store);
     Store.MetadataSnapshot sourceMetaData = request.sourceMetaSnapshot();
     try {
       store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
     } catch (CorruptIndexException
         | IndexFormatTooNewException
         | IndexFormatTooOldException ex) {
       // this is a fatal exception at this stage.
       // this means we transferred files from the remote that have not be checksummed and they
       // are
       // broken. We have to clean up this shard entirely, remove all files and bubble it up to
       // the
       // source shard since this index might be broken there as well? The Source can handle this
       // and checks
       // its content on disk if possible.
       try {
         try {
           store.removeCorruptionMarker();
         } finally {
           Lucene.cleanLuceneIndex(store.directory()); // clean up and delete all files
         }
       } catch (Throwable e) {
         logger.debug("Failed to clean lucene index", e);
         ex.addSuppressed(e);
       }
       RecoveryFailedException rfe =
           new RecoveryFailedException(
               recoveryStatus.state(), "failed to clean after recovery", ex);
       recoveryStatus.fail(rfe, true);
       throw rfe;
     } catch (Exception ex) {
       RecoveryFailedException rfe =
           new RecoveryFailedException(
               recoveryStatus.state(), "failed to clean after recovery", ex);
       recoveryStatus.fail(rfe, true);
       throw rfe;
     }
     channel.sendResponse(TransportResponse.Empty.INSTANCE);
   }
 }
 @Override
 public boolean matchesSafely(Engine.Searcher searcher) {
   try {
     long count = Lucene.count(searcher.searcher(), query);
     return count == totalHits;
   } catch (IOException e) {
     return false;
   }
 }
  @Override
  public void search(Query query, Collector collector) throws IOException {
    // Wrap the caller's collector with various wrappers e.g. those used to siphon
    // matches off for aggregation or to impose a time-limit on collection.
    final boolean timeoutSet = searchContext.timeoutInMillis() != -1;
    final boolean terminateAfterSet =
        searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER;

    if (timeoutSet) {
      // TODO: change to use our own counter that uses the scheduler in ThreadPool
      // throws TimeLimitingCollector.TimeExceededException when timeout has reached
      collector =
          Lucene.wrapTimeLimitingCollector(
              collector, searchContext.timeEstimateCounter(), searchContext.timeoutInMillis());
    }
    if (terminateAfterSet) {
      // throws Lucene.EarlyTerminationException when given count is reached
      collector =
          Lucene.wrapCountBasedEarlyTerminatingCollector(collector, searchContext.terminateAfter());
    }
    if (currentState == Stage.MAIN_QUERY) {
      if (searchContext.parsedPostFilter() != null) {
        // this will only get applied to the actual search collector and not
        // to any scoped collectors, also, it will only be applied to the main collector
        // since that is where the filter should only work
        final Weight filterWeight =
            createNormalizedWeight(searchContext.parsedPostFilter().query(), false);
        collector = new FilteredCollector(collector, filterWeight);
      }
      if (queryCollectors != null && !queryCollectors.isEmpty()) {
        ArrayList<Collector> allCollectors = new ArrayList<>(queryCollectors.values());
        allCollectors.add(collector);
        collector = MultiCollector.wrap(allCollectors);
      }

      // apply the minimum score after multi collector so we filter aggs as well
      if (searchContext.minimumScore() != null) {
        collector = new MinimumScoreCollector(collector, searchContext.minimumScore());
      }
    }
    super.search(query, collector);
  }
예제 #8
0
 /*
  * simple test that ensures that we bumb the version on Upgrade
  */
 @Test
 public void testVersion() {
   ESLogger logger = ESLoggerFactory.getLogger(LuceneTest.class.getName());
   Version[] values = Version.values();
   assertThat(Version.LUCENE_CURRENT, equalTo(values[values.length - 1]));
   assertThat(
       "Latest Lucene Version is not set after upgrade",
       Lucene.VERSION,
       equalTo(values[values.length - 2]));
   assertThat(Lucene.parseVersion(null, Lucene.VERSION, null), equalTo(Lucene.VERSION));
   for (int i = 0; i < values.length - 1; i++) {
     // this should fail if the lucene version is not mapped as a string in Lucene.java
     assertThat(
         Lucene.parseVersion(
             values[i].name().replaceFirst("^LUCENE_(\\d)(\\d)$", "$1.$2"),
             Version.LUCENE_CURRENT,
             logger),
         equalTo(values[i]));
   }
 }
    @Override
    public Explanation explain(LeafReaderContext context, int doc) throws IOException {

      Explanation expl = subQueryWeight.explain(context, doc);
      if (!expl.isMatch()) {
        return expl;
      }
      // First: Gather explanations for all filters
      List<Explanation> filterExplanations = new ArrayList<>();
      for (int i = 0; i < filterFunctions.length; ++i) {
        Bits docSet =
            Lucene.asSequentialAccessBits(
                context.reader().maxDoc(), filterWeights[i].scorer(context));
        if (docSet.get(doc)) {
          FilterFunction filterFunction = filterFunctions[i];
          Explanation functionExplanation =
              filterFunction.function.getLeafScoreFunction(context).explainScore(doc, expl);
          double factor = functionExplanation.getValue();
          float sc = CombineFunction.toFloat(factor);
          Explanation filterExplanation =
              Explanation.match(
                  sc,
                  "function score, product of:",
                  Explanation.match(1.0f, "match filter: " + filterFunction.filter.toString()),
                  functionExplanation);
          filterExplanations.add(filterExplanation);
        }
      }
      if (filterExplanations.size() > 0) {
        FiltersFunctionFactorScorer scorer = functionScorer(context);
        int actualDoc = scorer.iterator().advance(doc);
        assert (actualDoc == doc);
        double score = scorer.computeScore(doc, expl.getValue());
        Explanation factorExplanation =
            Explanation.match(
                CombineFunction.toFloat(score),
                "function score, score mode ["
                    + scoreMode.toString().toLowerCase(Locale.ROOT)
                    + "]",
                filterExplanations);
        expl = combineFunction.explain(expl, factorExplanation, maxBoost);
      }
      if (minScore != null && minScore > expl.getValue()) {
        expl =
            Explanation.noMatch(
                "Score value is too low, expected at least "
                    + minScore
                    + " but got "
                    + expl.getValue(),
                expl);
      }
      return expl;
    }
예제 #10
0
 private void processFailure(SearchContext context, Throwable t) {
   freeContext(context.id());
   try {
     if (Lucene.isCorruptionException(t)) {
       context.indexShard().failShard("search execution corruption failure", t);
     }
   } catch (Throwable e) {
     logger.warn(
         "failed to process shard failure to (potentially) send back shard failure on corruption",
         e);
   }
 }
예제 #11
0
 public CommitStats(SegmentInfos segmentInfos) {
   // clone the map to protect against concurrent changes
   userData =
       MapBuilder.<String, String>newMapBuilder()
           .putAll(segmentInfos.getUserData())
           .immutableMap();
   // lucene calls the current generation, last generation.
   generation = segmentInfos.getLastGeneration();
   if (segmentInfos.getId() != null) { // id is only written starting with Lucene 5.0
     id = Base64.encodeBytes(segmentInfos.getId());
   }
   numDocs = Lucene.getNumDocs(segmentInfos);
 }
 public static void checkIndex(ESLogger logger, Store store, ShardId shardId) {
   if (store.tryIncRef()) {
     logger.info("start check index");
     try {
       Directory dir = store.directory();
       if (!Lucene.indexExists(dir)) {
         return;
       }
       if (IndexWriter.isLocked(dir)) {
         ESTestCase.checkIndexFailed = true;
         throw new IllegalStateException("IndexWriter is still open on shard " + shardId);
       }
       try (CheckIndex checkIndex = new CheckIndex(dir)) {
         BytesStreamOutput os = new BytesStreamOutput();
         PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
         checkIndex.setInfoStream(out);
         out.flush();
         CheckIndex.Status status = checkIndex.checkIndex();
         if (!status.clean) {
           ESTestCase.checkIndexFailed = true;
           logger.warn(
               "check index [failure] index files={}\n{}",
               Arrays.toString(dir.listAll()),
               new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
           throw new IOException("index check failure");
         } else {
           if (logger.isDebugEnabled()) {
             logger.debug(
                 "check index [success]\n{}",
                 new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
           }
         }
       }
     } catch (Exception e) {
       logger.warn("failed to check index", e);
     } finally {
       logger.info("end check index");
       store.decRef();
     }
   }
 }
예제 #13
0
 /**
  * Acquires, then releases, all {@code write.lock} files in the given shard paths. The
  * "write.lock" file is assumed to be under the shard path's "index" directory as used by
  * Elasticsearch.
  *
  * @throws LockObtainFailedException if any of the locks could not be acquired
  */
 public static void acquireFSLockForPaths(
     @IndexSettings Settings indexSettings, Path... shardPaths) throws IOException {
   Lock[] locks = new Lock[shardPaths.length];
   Directory[] dirs = new Directory[shardPaths.length];
   try {
     for (int i = 0; i < shardPaths.length; i++) {
       // resolve the directory the shard actually lives in
       Path p = shardPaths[i].resolve("index");
       // open a directory (will be immediately closed) on the shard's location
       dirs[i] = new SimpleFSDirectory(p, FsDirectoryService.buildLockFactory(indexSettings));
       // create a lock for the "write.lock" file
       try {
         locks[i] = Lucene.acquireWriteLock(dirs[i]);
       } catch (IOException ex) {
         throw new LockObtainFailedException(
             "unable to acquire " + IndexWriter.WRITE_LOCK_NAME + " for " + p);
       }
     }
   } finally {
     IOUtils.closeWhileHandlingException(locks);
     IOUtils.closeWhileHandlingException(dirs);
   }
 }
 @Override
 public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub)
     throws IOException {
   // no need to provide deleted docs to the filter
   final Bits[] bits = new Bits[filters.length];
   for (int i = 0; i < filters.length; ++i) {
     bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx));
   }
   return new LeafBucketCollectorBase(sub, null) {
     @Override
     public void collect(int doc, long bucket) throws IOException {
       boolean matched = false;
       for (int i = 0; i < bits.length; i++) {
         if (bits[i].get(doc)) {
           collectBucket(sub, doc, bucketOrd(bucket, i));
           matched = true;
         }
       }
       if (showOtherBucket && !matched) {
         collectBucket(sub, doc, bucketOrd(bucket, bits.length));
       }
     }
   };
 }
  /*
   * More Ideas:
   *   - add ability to find whitespace problems -> we can build a poor mans decompounder with our index based on a automaton?
   *   - add ability to build different error models maybe based on a confusion matrix?
   *   - try to combine a token with its subsequent token to find / detect word splits (optional)
   *      - for this to work we need some way to defined the position length of a candidate
   *   - phonetic filters could be interesting here too for candidate selection
   */
  @Override
  public Suggestion<? extends Entry<? extends Option>> innerExecute(
      String name,
      PhraseSuggestionContext suggestion,
      IndexSearcher searcher,
      CharsRefBuilder spare)
      throws IOException {
    double realWordErrorLikelihood = suggestion.realworldErrorLikelyhood();
    final PhraseSuggestion response = new PhraseSuggestion(name, suggestion.getSize());
    final IndexReader indexReader = searcher.getIndexReader();
    List<PhraseSuggestionContext.DirectCandidateGenerator> generators = suggestion.generators();
    final int numGenerators = generators.size();
    final List<CandidateGenerator> gens = new ArrayList<>(generators.size());
    for (int i = 0; i < numGenerators; i++) {
      PhraseSuggestionContext.DirectCandidateGenerator generator = generators.get(i);
      DirectSpellChecker directSpellChecker = SuggestUtils.getDirectSpellChecker(generator);
      Terms terms = MultiFields.getTerms(indexReader, generator.field());
      if (terms != null) {
        gens.add(
            new DirectCandidateGenerator(
                directSpellChecker,
                generator.field(),
                generator.suggestMode(),
                indexReader,
                realWordErrorLikelihood,
                generator.size(),
                generator.preFilter(),
                generator.postFilter(),
                terms));
      }
    }
    final String suggestField = suggestion.getField();
    final Terms suggestTerms = MultiFields.getTerms(indexReader, suggestField);
    if (gens.size() > 0 && suggestTerms != null) {
      final NoisyChannelSpellChecker checker =
          new NoisyChannelSpellChecker(
              realWordErrorLikelihood, suggestion.getRequireUnigram(), suggestion.getTokenLimit());
      final BytesRef separator = suggestion.separator();
      TokenStream stream =
          checker.tokenStream(
              suggestion.getAnalyzer(), suggestion.getText(), spare, suggestion.getField());

      WordScorer wordScorer =
          suggestion
              .model()
              .newScorer(
                  indexReader, suggestTerms, suggestField, realWordErrorLikelihood, separator);
      Result checkerResult =
          checker.getCorrections(
              stream,
              new MultiCandidateGeneratorWrapper(
                  suggestion.getShardSize(), gens.toArray(new CandidateGenerator[gens.size()])),
              suggestion.maxErrors(),
              suggestion.getShardSize(),
              wordScorer,
              suggestion.confidence(),
              suggestion.gramSize());

      PhraseSuggestion.Entry resultEntry =
          buildResultEntry(suggestion, spare, checkerResult.cutoffScore);
      response.addTerm(resultEntry);

      final BytesRefBuilder byteSpare = new BytesRefBuilder();
      final EarlyTerminatingCollector collector = Lucene.createExistsCollector();
      final CompiledScript collateScript;
      if (suggestion.getCollateQueryScript() != null) {
        collateScript = suggestion.getCollateQueryScript();
      } else if (suggestion.getCollateFilterScript() != null) {
        collateScript = suggestion.getCollateFilterScript();
      } else {
        collateScript = null;
      }
      final boolean collatePrune = (collateScript != null) && suggestion.collatePrune();
      for (int i = 0; i < checkerResult.corrections.length; i++) {
        Correction correction = checkerResult.corrections[i];
        spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, null, null));
        boolean collateMatch = true;
        if (collateScript != null) {
          // Checks if the template query collateScript yields any documents
          // from the index for a correction, collateMatch is updated
          final Map<String, Object> vars = suggestion.getCollateScriptParams();
          vars.put(SUGGESTION_TEMPLATE_VAR_NAME, spare.toString());
          final ExecutableScript executable = scriptService.executable(collateScript, vars);
          final BytesReference querySource = (BytesReference) executable.run();
          final ParsedQuery parsedQuery;
          if (suggestion.getCollateFilterScript() != null) {
            parsedQuery =
                suggestion
                    .getQueryParserService()
                    .parse(
                        QueryBuilders.constantScoreQuery(QueryBuilders.wrapperQuery(querySource)));
          } else {
            parsedQuery = suggestion.getQueryParserService().parse(querySource);
          }
          collateMatch = Lucene.exists(searcher, parsedQuery.query(), collector);
        }
        if (!collateMatch && !collatePrune) {
          continue;
        }
        Text phrase = new StringText(spare.toString());
        Text highlighted = null;
        if (suggestion.getPreTag() != null) {
          spare.copyUTF8Bytes(
              correction.join(
                  SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag()));
          highlighted = new StringText(spare.toString());
        }
        if (collatePrune) {
          resultEntry.addOption(
              new Suggestion.Entry.Option(
                  phrase, highlighted, (float) (correction.score), collateMatch));
        } else {
          resultEntry.addOption(
              new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score)));
        }
      }
    } else {
      response.addTerm(buildResultEntry(suggestion, spare, Double.MIN_VALUE));
    }
    return response;
  }
예제 #16
0
  @Inject
  @SuppressForbidden(reason = "System.out.*")
  public NodeEnvironment(Settings settings, Environment environment) throws IOException {
    super(settings);

    this.addNodeId = settings.getAsBoolean(ADD_NODE_ID_TO_CUSTOM_PATH, true);
    this.customPathsEnabled = settings.getAsBoolean(SETTING_CUSTOM_DATA_PATH_ENABLED, false);

    if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) {
      nodePaths = null;
      locks = null;
      localNodeId = -1;
      return;
    }

    final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length];
    final Lock[] locks = new Lock[nodePaths.length];

    int localNodeId = -1;
    IOException lastException = null;
    int maxLocalStorageNodes = settings.getAsInt("node.max_local_storage_nodes", 50);
    for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) {
      for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) {
        Path dir =
            environment
                .dataWithClusterFiles()[dirIndex]
                .resolve(NODES_FOLDER)
                .resolve(Integer.toString(possibleLockId));
        Files.createDirectories(dir);

        try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
          logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
          try {
            locks[dirIndex] = Lucene.acquireLock(luceneDir, NODE_LOCK_FILENAME, 0);
            nodePaths[dirIndex] = new NodePath(dir, environment);
            localNodeId = possibleLockId;
          } catch (LockObtainFailedException ex) {
            logger.trace("failed to obtain node lock on {}", dir.toAbsolutePath());
            // release all the ones that were obtained up until now
            releaseAndNullLocks(locks);
            break;
          }

        } catch (IOException e) {
          logger.trace("failed to obtain node lock on {}", e, dir.toAbsolutePath());
          lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
          // release all the ones that were obtained up until now
          releaseAndNullLocks(locks);
          break;
        }
      }
      if (locks[0] != null) {
        // we found a lock, break
        break;
      }
    }

    if (locks[0] == null) {
      throw new IllegalStateException(
          "Failed to obtain node lock, is the following location writable?: "
              + Arrays.toString(environment.dataWithClusterFiles()),
          lastException);
    }

    this.localNodeId = localNodeId;
    this.locks = locks;
    this.nodePaths = nodePaths;

    if (logger.isDebugEnabled()) {
      logger.debug("using node location [{}], local_node_id [{}]", nodePaths, localNodeId);
    }

    maybeLogPathDetails();

    if (settings.getAsBoolean(SETTING_ENABLE_LUCENE_SEGMENT_INFOS_TRACE, false)) {
      SegmentInfos.setInfoStream(System.out);
    }
  }
  @Override
  protected ShardCountResponse shardOperation(ShardCountRequest request)
      throws ElasticsearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
    IndexShard indexShard = indexService.shardSafe(request.shardId().id());

    SearchShardTarget shardTarget =
        new SearchShardTarget(
            clusterService.localNode().id(), request.shardId().getIndex(), request.shardId().id());
    SearchContext context =
        new DefaultSearchContext(
            0,
            new ShardSearchLocalRequest(
                request.types(), request.nowInMillis(), request.filteringAliases()),
            shardTarget,
            indexShard.acquireSearcher("count"),
            indexService,
            indexShard,
            scriptService,
            cacheRecycler,
            pageCacheRecycler,
            bigArrays,
            threadPool.estimatedTimeInMillisCounter());
    SearchContext.setCurrent(context);

    try {
      // TODO: min score should move to be "null" as a value that is not initialized...
      if (request.minScore() != -1) {
        context.minimumScore(request.minScore());
      }
      BytesReference source = request.querySource();
      if (source != null && source.length() > 0) {
        try {
          QueryParseContext.setTypes(request.types());
          context.parsedQuery(indexService.queryParserService().parseQuery(source));
        } finally {
          QueryParseContext.removeTypes();
        }
      }
      final boolean hasTerminateAfterCount = request.terminateAfter() != DEFAULT_TERMINATE_AFTER;
      boolean terminatedEarly = false;
      context.preProcess();
      try {
        long count;
        if (hasTerminateAfterCount) {
          final Lucene.EarlyTerminatingCollector countCollector =
              Lucene.createCountBasedEarlyTerminatingCollector(request.terminateAfter());
          terminatedEarly =
              Lucene.countWithEarlyTermination(context.searcher(), context.query(), countCollector);
          count = countCollector.count();
        } else {
          count = Lucene.count(context.searcher(), context.query());
        }
        return new ShardCountResponse(request.shardId(), count, terminatedEarly);
      } catch (Exception e) {
        throw new QueryPhaseExecutionException(context, "failed to execute count", e);
      }
    } finally {
      // this will also release the index searcher
      context.close();
      SearchContext.removeCurrent();
    }
  }
예제 #18
0
 @Override
 public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException {
   ensureOpen();
   final byte[] newCommitId;
   /*
    * Unfortunately the lock order is important here. We have to acquire the readlock first otherwise
    * if we are flushing at the end of the recovery while holding the write lock we can deadlock if:
    *  Thread 1: flushes via API and gets the flush lock but blocks on the readlock since Thread 2 has the writeLock
    *  Thread 2: flushes at the end of the recovery holding the writeLock and blocks on the flushLock owned by Thread 1
    */
   try (ReleasableLock lock = readLock.acquire()) {
     ensureOpen();
     if (flushLock.tryLock() == false) {
       // if we can't get the lock right away we block if needed otherwise barf
       if (waitIfOngoing) {
         logger.trace("waiting for in-flight flush to finish");
         flushLock.lock();
         logger.trace("acquired flush lock after blocking");
       } else {
         throw new FlushNotAllowedEngineException(shardId, "already flushing...");
       }
     } else {
       logger.trace("acquired flush lock immediately");
     }
     try {
       if (indexWriter.hasUncommittedChanges() || force) {
         ensureCanFlush();
         try {
           translog.prepareCommit();
           logger.trace("starting commit for flush; commitTranslog=true");
           commitIndexWriter(indexWriter, translog, null);
           logger.trace("finished commit for flush");
           // we need to refresh in order to clear older version values
           refresh("version_table_flush");
           // after refresh documents can be retrieved from the index so we can now commit the
           // translog
           translog.commit();
         } catch (Throwable e) {
           throw new FlushFailedEngineException(shardId, e);
         }
       }
       /*
        * we have to inc-ref the store here since if the engine is closed by a tragic event
        * we don't acquire the write lock and wait until we have exclusive access. This might also
        * dec the store reference which can essentially close the store and unless we can inc the reference
        * we can't use it.
        */
       store.incRef();
       try {
         // reread the last committed segment infos
         lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
       } catch (Throwable e) {
         if (isClosed.get() == false) {
           logger.warn("failed to read latest segment infos on flush", e);
           if (Lucene.isCorruptionException(e)) {
             throw new FlushFailedEngineException(shardId, e);
           }
         }
       } finally {
         store.decRef();
       }
       newCommitId = lastCommittedSegmentInfos.getId();
     } catch (FlushFailedEngineException ex) {
       maybeFailEngine("flush", ex);
       throw ex;
     } finally {
       flushLock.unlock();
     }
   }
   // We don't have to do this here; we do it defensively to make sure that even if wall clock time
   // is misbehaving
   // (e.g., moves backwards) we will at least still sometimes prune deleted tombstones:
   if (engineConfig.isEnableGcDeletes()) {
     pruneDeletedTombstones();
   }
   return new CommitId(newCommitId);
 }
  @Override
  public void search(List<LeafReaderContext> leaves, Weight weight, Collector collector)
      throws IOException {
    final boolean timeoutSet = searchContext.timeoutInMillis() != -1;
    final boolean terminateAfterSet =
        searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER;

    if (timeoutSet) {
      // TODO: change to use our own counter that uses the scheduler in ThreadPool
      // throws TimeLimitingCollector.TimeExceededException when timeout has reached
      collector =
          Lucene.wrapTimeLimitingCollector(
              collector, searchContext.timeEstimateCounter(), searchContext.timeoutInMillis());
    }
    if (terminateAfterSet) {
      // throws Lucene.EarlyTerminationException when given count is reached
      collector =
          Lucene.wrapCountBasedEarlyTerminatingCollector(collector, searchContext.terminateAfter());
    }
    if (currentState == Stage.MAIN_QUERY) {
      if (searchContext.parsedPostFilter() != null) {
        // this will only get applied to the actual search collector and not
        // to any scoped collectors, also, it will only be applied to the main collector
        // since that is where the filter should only work
        collector = new FilteredCollector(collector, searchContext.parsedPostFilter().filter());
      }
      if (queryCollectors != null && !queryCollectors.isEmpty()) {
        ArrayList<Collector> allCollectors = new ArrayList<>(queryCollectors.values());
        allCollectors.add(collector);
        collector = MultiCollector.wrap(allCollectors);
      }

      // apply the minimum score after multi collector so we filter aggs as well
      if (searchContext.minimumScore() != null) {
        collector = new MinimumScoreCollector(collector, searchContext.minimumScore());
      }
    }

    // we only compute the doc id set once since within a context, we execute the same query
    // always...
    try {
      if (timeoutSet || terminateAfterSet) {
        try {
          super.search(leaves, weight, collector);
        } catch (TimeLimitingCollector.TimeExceededException e) {
          assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set";
          searchContext.queryResult().searchTimedOut(true);
        } catch (Lucene.EarlyTerminationException e) {
          assert terminateAfterSet
              : "EarlyTerminationException thrown even though terminateAfter wasn't set";
          searchContext.queryResult().terminatedEarly(true);
        }
        if (terminateAfterSet && searchContext.queryResult().terminatedEarly() == null) {
          searchContext.queryResult().terminatedEarly(false);
        }
      } else {
        super.search(leaves, weight, collector);
      }
    } finally {
      searchContext.clearReleasables(Lifetime.COLLECTION);
    }
  }