public void testLiveMaxMergeCount() throws Exception { Directory d = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); TieredMergePolicy tmp = new TieredMergePolicy(); tmp.setSegmentsPerTier(1000); tmp.setMaxMergeAtOnce(1000); tmp.setMaxMergeAtOnceExplicit(10); iwc.setMergePolicy(tmp); iwc.setMaxBufferedDocs(2); iwc.setRAMBufferSizeMB(-1); final AtomicInteger maxRunningMergeCount = new AtomicInteger(); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler() { final AtomicInteger runningMergeCount = new AtomicInteger(); @Override public void doMerge(MergePolicy.OneMerge merge) throws IOException { int count = runningMergeCount.incrementAndGet(); // evil? synchronized (this) { if (count > maxRunningMergeCount.get()) { maxRunningMergeCount.set(count); } } try { super.doMerge(merge); } finally { runningMergeCount.decrementAndGet(); } } }; cms.setMaxMergesAndThreads(5, 3); iwc.setMergeScheduler(cms); IndexWriter w = new IndexWriter(d, iwc); // Makes 100 segments for (int i = 0; i < 200; i++) { w.addDocument(new Document()); } // No merges should have run so far, because TMP has high segmentsPerTier: assertEquals(0, maxRunningMergeCount.get()); w.forceMerge(1); // At most 5 merge threads should have launched at once: assertTrue("maxRunningMergeCount=" + maxRunningMergeCount, maxRunningMergeCount.get() <= 5); maxRunningMergeCount.set(0); // Makes another 100 segments for (int i = 0; i < 200; i++) { w.addDocument(new Document()); } ((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).setMaxMergesAndThreads(1, 1); w.forceMerge(1); // At most 1 merge thread should have launched at once: assertEquals(1, maxRunningMergeCount.get()); w.close(); d.close(); }
// LUCENE-4544 public void testMaxMergeCount() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); final int maxMergeCount = TestUtil.nextInt(random(), 1, 5); final int maxMergeThreads = TestUtil.nextInt(random(), 1, maxMergeCount); final CountDownLatch enoughMergesWaiting = new CountDownLatch(maxMergeCount); final AtomicInteger runningMergeCount = new AtomicInteger(0); final AtomicBoolean failed = new AtomicBoolean(); if (VERBOSE) { System.out.println( "TEST: maxMergeCount=" + maxMergeCount + " maxMergeThreads=" + maxMergeThreads); } ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler() { @Override protected void doMerge(MergePolicy.OneMerge merge) throws IOException { try { // Stall all incoming merges until we see // maxMergeCount: int count = runningMergeCount.incrementAndGet(); try { assertTrue( "count=" + count + " vs maxMergeCount=" + maxMergeCount, count <= maxMergeCount); enoughMergesWaiting.countDown(); // Stall this merge until we see exactly // maxMergeCount merges waiting while (true) { if (enoughMergesWaiting.await(10, TimeUnit.MILLISECONDS) || failed.get()) { break; } } // Then sleep a bit to give a chance for the bug // (too many pending merges) to appear: Thread.sleep(20); super.doMerge(merge); } finally { runningMergeCount.decrementAndGet(); } } catch (Throwable t) { failed.set(true); writer.mergeFinish(merge); throw new RuntimeException(t); } } }; cms.setMaxMergesAndThreads(maxMergeCount, maxMergeThreads); iwc.setMergeScheduler(cms); iwc.setMaxBufferedDocs(2); TieredMergePolicy tmp = new TieredMergePolicy(); iwc.setMergePolicy(tmp); tmp.setMaxMergeAtOnce(2); tmp.setSegmentsPerTier(2); IndexWriter w = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(newField("field", "field", TextField.TYPE_NOT_STORED)); while (enoughMergesWaiting.getCount() != 0 && !failed.get()) { for (int i = 0; i < 10; i++) { w.addDocument(doc); } } w.close(false); dir.close(); }