@Test public void testFetchProjection() throws Exception { setUpCharacters(); Plan plan = analyzeAndPlan("select id, name, substr(name, 2) from characters order by id"); assertThat(plan, instanceOf(QueryThenFetch.class)); QueryThenFetch qtf = (QueryThenFetch) plan; assertThat(qtf.collectNode().keepContextForFetcher(), is(true)); assertThat( ((FetchProjection) qtf.mergeNode().projections().get(1)).jobSearchContextIdToNode(), notNullValue()); assertThat( ((FetchProjection) qtf.mergeNode().projections().get(1)).jobSearchContextIdToShard(), notNullValue()); Job job = executor.newJob(plan); ListenableFuture<List<TaskResult>> results = Futures.allAsList(executor.execute(job)); final List<Object[]> resultingRows = new ArrayList<>(); final CountDownLatch latch = new CountDownLatch(1); Futures.addCallback( results, new FutureCallback<List<TaskResult>>() { @Override public void onSuccess(List<TaskResult> resultList) { for (Row row : resultList.get(0).rows()) { resultingRows.add(row.materialize()); } latch.countDown(); } @Override public void onFailure(Throwable t) { latch.countDown(); fail(t.getMessage()); } }); latch.await(); assertThat(resultingRows.size(), is(2)); assertThat(resultingRows.get(0).length, is(3)); assertThat((Integer) resultingRows.get(0)[0], is(1)); assertThat((BytesRef) resultingRows.get(0)[1], is(new BytesRef("Arthur"))); assertThat((BytesRef) resultingRows.get(0)[2], is(new BytesRef("rthur"))); assertThat((Integer) resultingRows.get(1)[0], is(2)); assertThat((BytesRef) resultingRows.get(1)[1], is(new BytesRef("Ford"))); assertThat((BytesRef) resultingRows.get(1)[2], is(new BytesRef("ord"))); }
@Test // Uses of JMockit API: 3 public void returningElementsFromAList() { final List<String> list = asList("a", "b", "c"); new Expectations() { { mockedList.get(anyInt); result = list; } }; assertEquals("a", mockedList.get(0)); assertEquals("b", mockedList.get(1)); assertEquals("c", mockedList.get(2)); assertEquals("c", mockedList.get(3)); }
@Test // Uses of JMockit API: 8 public void useArgumentMatchers() { new Expectations() { { // Using built-in matchers: mockedList.get(anyInt); result = "element"; // Using Hamcrest matchers: mockedList.get(withArgThat(is(equalTo(5)))); result = new IllegalArgumentException(); minTimes = 0; mockedList.contains(withArgThat(hasProperty("bytes"))); result = true; mockedList.containsAll(withArgThat(hasSize(2))); result = true; } }; assertEquals("element", mockedList.get(999)); assertTrue(mockedList.contains("abc")); assertTrue(mockedList.containsAll(asList("a", "b"))); new Verifications() { { mockedList.get(anyInt); } }; }
@Test // Uses of JMockit API: 3 public void stubAndVerifyInvocationWithoutRepeatingItInExpectationAndVerificationBlocks() { new Expectations() { { // Notice that this can't be done in Mockito, which requires the repetition of // "mockedList.get(0);" in the verification phase. mockedList.get(0); result = "first"; times = 1; } }; assertEquals("first", mockedList.get(0)); }
@Test // Uses of JMockit API: 3 public void stubAndVerifyInvocation() { // A recorded expectation is expected to occur at least once, by default. new Expectations() { { mockedList.get(0); result = "first"; } }; assertEquals("first", mockedList.get(0)); // Note that verifying a stubbed invocation isn't "just redundant" if the test cares that the // invocation occurs at least once. If this is the case, then it's not safe to expect the test // to break without an explicit verification, because the method under test may never call the // stubbed one, and that would be a bug that the test should detect. }
@Test public void reads_a_list_of_entries() throws Exception { ZipContents zipContents = new ZipContents(new File("./src/test/resource/test.zip")); try { zipContents.open(); List<ZipContentsEntry> entries = zipContents.entries(); assertThat(entries.size(), is(6)); ZipContentsEntry firstEntry = entries.get(0); assertThat(firstEntry.directory(), is("/")); assertThat(firstEntry.name(), is("file_1.txt")); assertThat(firstEntry.type(), is("txt")); } finally { zipContents.close(); } }
public OrderWS buildOrder(int userId, List<Integer> itemIds, BigDecimal linePrice) { OrderWS order = new OrderWS(); order.setUserId(userId); order.setBillingTypeId(Constants.ORDER_BILLING_POST_PAID); order.setPeriod(ORDER_PERIOD_ONCE); // once order.setCurrencyId(CURRENCY_USD); order.setActiveSince(new Date()); order.setProrateFlag(Boolean.FALSE); ArrayList<OrderLineWS> lines = new ArrayList<OrderLineWS>(itemIds.size()); for (int i = 0; i < itemIds.size(); i++) { OrderLineWS nextLine = new OrderLineWS(); nextLine.setTypeId(Constants.ORDER_LINE_TYPE_ITEM); nextLine.setDescription("Order line: " + i); nextLine.setItemId(itemIds.get(i)); nextLine.setQuantity(1); nextLine.setPrice(linePrice); nextLine.setAmount(nextLine.getQuantityAsDecimal().multiply(linePrice)); lines.add(nextLine); } order.setOrderLines(lines.toArray(new OrderLineWS[lines.size()])); return order; }
@Test public void testRecoveryDiff() throws IOException, InterruptedException { int numDocs = 2 + random().nextInt(100); List<Document> docs = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add( new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add( new TextField( "body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add( new SortedDocValuesField( "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); docs.add(doc); } long seed = random().nextLong(); Store.MetadataSnapshot first; { Random random = new Random(seed); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(random.nextBoolean()); iwc.setMaxThreadStates(1); final ShardId shardId = new ShardId(new Index("index"), 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random); Store store = new Store( shardId, ImmutableSettings.EMPTY, directoryService, randomDistributor(random, directoryService), new DummyShardLock(shardId)); IndexWriter writer = new IndexWriter(store.directory(), iwc); final boolean lotsOfSegments = rarely(random); for (Document d : docs) { writer.addDocument(d); if (lotsOfSegments && random.nextBoolean()) { writer.commit(); } else if (rarely(random)) { writer.commit(); } } writer.commit(); writer.close(); first = store.getMetadata(); assertDeleteContent(store, directoryService); store.close(); } long time = new Date().getTime(); while (time == new Date().getTime()) { Thread.sleep(10); // bump the time } Store.MetadataSnapshot second; Store store; { Random random = new Random(seed); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(random.nextBoolean()); iwc.setMaxThreadStates(1); final ShardId shardId = new ShardId(new Index("index"), 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random); store = new Store( shardId, ImmutableSettings.EMPTY, directoryService, randomDistributor(random, directoryService), new DummyShardLock(shardId)); IndexWriter writer = new IndexWriter(store.directory(), iwc); final boolean lotsOfSegments = rarely(random); for (Document d : docs) { writer.addDocument(d); if (lotsOfSegments && random.nextBoolean()) { writer.commit(); } else if (rarely(random)) { writer.commit(); } } writer.commit(); writer.close(); second = store.getMetadata(); } Store.RecoveryDiff diff = first.recoveryDiff(second); assertThat(first.size(), equalTo(second.size())); for (StoreFileMetaData md : first) { assertThat(second.get(md.name()), notNullValue()); // si files are different - containing timestamps etc assertThat(second.get(md.name()).isSame(md), equalTo(false)); } assertThat(diff.different.size(), equalTo(first.size())); assertThat( diff.identical.size(), equalTo(0)); // in lucene 5 nothing is identical - we use random ids in file headers assertThat(diff.missing, empty()); // check the self diff Store.RecoveryDiff selfDiff = first.recoveryDiff(first); assertThat(selfDiff.identical.size(), equalTo(first.size())); assertThat(selfDiff.different, empty()); assertThat(selfDiff.missing, empty()); // lets add some deletes Random random = new Random(seed); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(random.nextBoolean()); iwc.setMaxThreadStates(1); iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); IndexWriter writer = new IndexWriter(store.directory(), iwc); writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs)))); writer.commit(); writer.close(); Store.MetadataSnapshot metadata = store.getMetadata(); StoreFileMetaData delFile = null; for (StoreFileMetaData md : metadata) { if (md.name().endsWith(".liv")) { delFile = md; break; } } Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second); if (delFile != null) { assertThat( afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2)); // segments_N + del file assertThat(afterDeleteDiff.different.size(), equalTo(0)); assertThat(afterDeleteDiff.missing.size(), equalTo(2)); } else { // an entire segment must be missing (single doc segment got dropped) assertThat(afterDeleteDiff.identical.size(), greaterThan(0)); assertThat(afterDeleteDiff.different.size(), equalTo(0)); assertThat(afterDeleteDiff.missing.size(), equalTo(1)); // the commit file is different } // check the self diff selfDiff = metadata.recoveryDiff(metadata); assertThat(selfDiff.identical.size(), equalTo(metadata.size())); assertThat(selfDiff.different, empty()); assertThat(selfDiff.missing, empty()); // add a new commit iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile( true); // force CFS - easier to test here since we know it will add 3 files iwc.setMaxThreadStates(1); iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); writer = new IndexWriter(store.directory(), iwc); writer.addDocument(docs.get(0)); writer.close(); Store.MetadataSnapshot newCommitMetaData = store.getMetadata(); Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata); if (delFile != null) { assertThat( newCommitDiff.identical.size(), equalTo( newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv")); assertThat( newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment } else { assertThat( newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment assertThat(newCommitDiff.different.size(), equalTo(0)); assertThat( newCommitDiff.missing.size(), equalTo( 4)); // an entire segment must be missing (single doc segment got dropped) plus the // commit is different } store.deleteContent(); IOUtils.close(store); }
@Test public void testPendingUpdateTask() throws Exception { Settings settings = settingsBuilder().put("discovery.type", "local").build(); String node_0 = internalCluster().startNode(settings); internalCluster().startNodeClient(settings); final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node_0); final CountDownLatch block1 = new CountDownLatch(1); final CountDownLatch invoked1 = new CountDownLatch(1); clusterService.submitStateUpdateTask( "1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { invoked1.countDown(); try { block1.await(); } catch (InterruptedException e) { fail(); } return currentState; } @Override public void onFailure(String source, Throwable t) { invoked1.countDown(); fail(); } }); invoked1.await(); final CountDownLatch invoked2 = new CountDownLatch(9); for (int i = 2; i <= 10; i++) { clusterService.submitStateUpdateTask( Integer.toString(i), new ProcessedClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return currentState; } @Override public void onFailure(String source, Throwable t) { fail(); } @Override public void clusterStateProcessed( String source, ClusterState oldState, ClusterState newState) { invoked2.countDown(); } }); } // there might be other tasks in this node, make sure to only take the ones we add into account // in this test // The tasks can be re-ordered, so we need to check out-of-order Set<String> controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10")); List<PendingClusterTask> pendingClusterTasks = clusterService.pendingTasks(); assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(10)); assertThat(pendingClusterTasks.get(0).getSource().string(), equalTo("1")); assertThat(pendingClusterTasks.get(0).isExecuting(), equalTo(true)); for (PendingClusterTask task : pendingClusterTasks) { controlSources.remove(task.getSource().string()); } assertTrue(controlSources.isEmpty()); controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10")); PendingClusterTasksResponse response = internalCluster() .clientNodeClient() .admin() .cluster() .preparePendingClusterTasks() .execute() .actionGet(); assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(10)); assertThat(response.pendingTasks().get(0).getSource().string(), equalTo("1")); assertThat(response.pendingTasks().get(0).isExecuting(), equalTo(true)); for (PendingClusterTask task : response) { controlSources.remove(task.getSource().string()); } assertTrue(controlSources.isEmpty()); block1.countDown(); invoked2.await(); // whenever we test for no tasks, we need to awaitBusy since this is a live node assertTrue( awaitBusy( new Predicate<Object>() { @Override public boolean apply(Object input) { return clusterService.pendingTasks().isEmpty(); } })); waitNoPendingTasksOnAll(); final CountDownLatch block2 = new CountDownLatch(1); final CountDownLatch invoked3 = new CountDownLatch(1); clusterService.submitStateUpdateTask( "1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { invoked3.countDown(); try { block2.await(); } catch (InterruptedException e) { fail(); } return currentState; } @Override public void onFailure(String source, Throwable t) { invoked3.countDown(); fail(); } }); invoked3.await(); for (int i = 2; i <= 5; i++) { clusterService.submitStateUpdateTask( Integer.toString(i), new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return currentState; } @Override public void onFailure(String source, Throwable t) { fail(); } }); } Thread.sleep(100); pendingClusterTasks = clusterService.pendingTasks(); assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(5)); controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); for (PendingClusterTask task : pendingClusterTasks) { controlSources.remove(task.getSource().string()); } assertTrue(controlSources.isEmpty()); response = internalCluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().get(); assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(5)); controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); for (PendingClusterTask task : response) { if (controlSources.remove(task.getSource().string())) { assertThat(task.getTimeInQueueInMillis(), greaterThan(0l)); } } assertTrue(controlSources.isEmpty()); block2.countDown(); }