private PossiblyLimitedTopDocs getTopDocs(Query query, Sort sort) throws IOException { final TopFieldCollector topCollector = TopFieldCollector.create(sort, maxHits, true, false, false, false); final Counter clock = Counter.newCounter(true); final int waitMillis = 1000; // TODO: if we interrupt the whole thread anyway, do we still need the TimeLimitingCollector? final TimeLimitingCollector collector = new TimeLimitingCollector(topCollector, clock, maxSearchTimeMillis / waitMillis); collector.setBaseline(0); final Thread counterThread = new Thread() { @Override public void run() { final long startTime = System.currentTimeMillis(); while (true) { final long runTimeMillis = System.currentTimeMillis() - startTime; if (runTimeMillis > maxSearchTimeMillis) { // make sure there's no lingering thread for too long return; } clock.addAndGet(1); try { Thread.sleep(waitMillis); } catch (InterruptedException e) { throw new RuntimeException(e); } } } }; counterThread.setName("LuceneSearchTimeoutThread"); counterThread.start(); boolean timeLimitActivated = false; try { indexSearcher.search(query, collector); } catch (TimeLimitingCollector.TimeExceededException e) { timeLimitActivated = true; } return new PossiblyLimitedTopDocs(topCollector.topDocs(), timeLimitActivated); }
@Override public void search(List<AtomicReaderContext> leaves, Weight weight, Collector collector) throws IOException { if (searchContext.timeoutInMillis() != -1) { // TODO: change to use our own counter that uses the scheduler in ThreadPool collector = new TimeLimitingCollector( collector, TimeLimitingCollector.getGlobalCounter(), searchContext.timeoutInMillis()); } if (currentState == Stage.MAIN_QUERY) { if (enableMainDocIdSetCollector) { // TODO should we create a cache of segment->docIdSets so we won't create one each time? collector = this.mainDocIdSetCollector = new DocIdSetCollector(searchContext.docSetCache(), collector); } if (searchContext.parsedFilter() != null) { // this will only get applied to the actual search collector and not // to any scoped collectors, also, it will only be applied to the main collector // since that is where the filter should only work collector = new FilteredCollector(collector, searchContext.parsedFilter().filter()); } if (queryCollectors != null && !queryCollectors.isEmpty()) { collector = new MultiCollector( collector, queryCollectors.toArray(new Collector[queryCollectors.size()])); } // apply the minimum score after multi collector so we filter facets as well if (searchContext.minimumScore() != null) { collector = new MinimumScoreCollector(collector, searchContext.minimumScore()); } } // we only compute the doc id set once since within a context, we execute the same query // always... try { if (searchContext.timeoutInMillis() != -1) { try { super.search(leaves, weight, collector); } catch (TimeLimitingCollector.TimeExceededException e) { searchContext.queryResult().searchTimedOut(true); } } else { super.search(leaves, weight, collector); } if (currentState == Stage.MAIN_QUERY) { if (enableMainDocIdSetCollector) { enableMainDocIdSetCollector = false; mainDocIdSetCollector.postCollection(); } if (queryCollectors != null && !queryCollectors.isEmpty()) { for (Collector queryCollector : queryCollectors) { if (queryCollector instanceof XCollector) { ((XCollector) queryCollector).postCollection(); } } } } } finally { searchContext.clearReleasables(); } }