private void testParallelCollectionAPICalls() throws IOException, SolrServerException { SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))); for (int i = 1; i <= NUM_COLLECTIONS; i++) { CollectionAdminRequest.createCollection("ocptest" + i, 4, "conf1", server, i + ""); } boolean pass = false; int counter = 0; while (true) { int numRunningTasks = 0; for (int i = 1; i <= NUM_COLLECTIONS; i++) if (getRequestState(i + "", server).equals("running")) numRunningTasks++; if (numRunningTasks > 1) { pass = true; break; } else if (counter++ > 100) break; try { Thread.sleep(100); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } assertTrue( "More than one tasks were supposed to be running in parallel but they weren't.", pass); for (int i = 1; i <= NUM_COLLECTIONS; i++) { String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server); assertTrue( "Task " + i + " did not complete, final state: " + state, state.equals("completed")); } }
public void deleteAllCollections() throws Exception { try (ZkStateReader reader = new ZkStateReader(solrClient.getZkStateReader().getZkClient())) { reader.createClusterStateWatchersAndUpdate(); for (String collection : reader.getClusterState().getCollectionStates().keySet()) { CollectionAdminRequest.deleteCollection(collection).process(solrClient); } } }
private void testTaskExclusivity() throws IOException, SolrServerException { SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))); CollectionAdminRequest.createCollection("ocptest_shardsplit", 4, "conf1", server, "1000"); CollectionAdminRequest.splitShard("ocptest_shardsplit", SHARD1, server, "1001"); CollectionAdminRequest.splitShard("ocptest_shardsplit", SHARD2, server, "1002"); int iterations = 0; while (true) { int runningTasks = 0; int completedTasks = 0; for (int i = 1001; i <= 1002; i++) { String state = getRequestState(i, server); if (state.equals("running")) runningTasks++; if (state.equals("completed")) completedTasks++; assertTrue("We have a failed SPLITSHARD task", !state.equals("failed")); } // TODO: REQUESTSTATUS might come back with more than 1 running tasks over multiple calls. // The only way to fix this is to support checking of multiple requestids in a single // REQUESTSTATUS task. assertTrue( "Mutual exclusion failed. Found more than one task running for the same collection", runningTasks < 2); if (completedTasks == 2 || iterations++ > REQUEST_STATUS_TIMEOUT) break; try { Thread.sleep(1000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return; } } for (int i = 1001; i <= 1002; i++) { String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server); assertTrue( "Task " + i + " did not complete, final state: " + state, state.equals("completed")); } }
@BeforeClass public static void setupCluster() throws Exception { final Path configDir = Paths.get(TEST_HOME(), "collection1", "conf"); String configName = "solrCloudCollectionConfig"; int nodeCount = 5; configureCluster(nodeCount).addConfig(configName, configDir).configure(); Map<String, String> collectionProperties = new HashMap<>(); collectionProperties.put("config", "solrconfig-tlog.xml"); collectionProperties.put("schema", "schema.xml"); // create a collection holding data for the "to" side of the JOIN int shards = 2; int replicas = 2; CollectionAdminRequest.createCollection(toColl, configName, shards, replicas) .setProperties(collectionProperties) .process(cluster.getSolrClient()); // get the set of nodes where replicas for the "to" collection exist Set<String> nodeSet = new HashSet<>(); ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader(); ClusterState cs = zkStateReader.getClusterState(); for (Slice slice : cs.getCollection(toColl).getActiveSlices()) for (Replica replica : slice.getReplicas()) nodeSet.add(replica.getNodeName()); assertTrue(nodeSet.size() > 0); // deploy the "from" collection to all nodes where the "to" collection exists CollectionAdminRequest.createCollection(fromColl, configName, 1, 4) .setCreateNodeSet(StringUtils.join(nodeSet, ",")) .setProperties(collectionProperties) .process(cluster.getSolrClient()); toDocId = indexDoc(toColl, 1001, "a", null, "b"); indexDoc(fromColl, 2001, "a", "c", null); Thread.sleep(1000); // so the commits fire }
@Test public void test() throws Exception { final String collectionName = "customcollreplicadeletion"; CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1) .setMaxShardsPerNode(5) .process(cluster.getSolrClient()); DocCollection collectionState = getCollectionState(collectionName); Replica replica = getRandomReplica(collectionState.getSlice("a")); CollectionAdminRequest.deleteReplica(collectionName, "a", replica.getName()) .process(cluster.getSolrClient()); waitForState( "Expected shard 'a' to have no replicas", collectionName, (n, c) -> { return c.getSlice("a") == null || c.getSlice("a").getReplicas().size() == 0; }); }
private void testDeduplicationOfSubmittedTasks() throws IOException, SolrServerException { SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))); CollectionAdminRequest.createCollection("ocptest_shardsplit2", 4, "conf1", server, "3000"); CollectionAdminRequest.splitShard("ocptest_shardsplit2", SHARD1, server, "3001"); CollectionAdminRequest.splitShard("ocptest_shardsplit2", SHARD2, server, "3002"); // Now submit another task with the same id. At this time, hopefully the previous 3002 should // still be in the queue. CollectionAdminResponse response = CollectionAdminRequest.splitShard("ocptest_shardsplit2", SHARD1, server, "3002"); NamedList r = response.getResponse(); assertEquals( "Duplicate request was supposed to exist but wasn't found. De-duplication of submitted task failed.", "Task with the same requestid already exists.", r.get("error")); for (int i = 3001; i <= 3002; i++) { String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server); assertTrue( "Task " + i + " did not complete, final state: " + state, state.equals("completed")); } }
private void deleteAlias(String alias) throws SolrServerException, IOException { if (random().nextBoolean()) { ModifiableSolrParams params = new ModifiableSolrParams(); params.set("name", alias); params.set("action", CollectionAction.DELETEALIAS.toString()); QueryRequest request = new QueryRequest(params); request.setPath("/admin/collections"); NamedList<Object> result = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))).request(request); } else { CollectionAdminResponse resp = CollectionAdminRequest.deleteAlias( alias, createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)))); } }
@BeforeClass public static void setupCluster() throws Exception { configureCluster(2) .addConfig( "conf", TEST_PATH().resolve("configsets").resolve("exitable-directory").resolve("conf")) .configure(); CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1) .processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT); cluster .getSolrClient() .waitForState( COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS, (n, c) -> DocCollection.isFullyActive(n, c, 2, 1)); }
@AfterClass public static void shutdown() { log.info( "DistribJoinFromCollectionTest logic complete ... deleting the " + toColl + " and " + fromColl + " collections"); // try to clean up for (String c : new String[] {toColl, fromColl}) { try { CollectionAdminRequest.Delete req = CollectionAdminRequest.deleteCollection(c); req.process(cluster.getSolrClient()); } catch (Exception e) { // don't fail the test log.warn("Could not delete collection {} after test completed due to: " + e, c); } } log.info("DistribJoinFromCollectionTest succeeded ... shutting down now!"); }
private void testJoins(String toColl, String fromColl, Integer toDocId, boolean isScoresTest) throws SolrServerException, IOException { // verify the join with fromIndex works final String fromQ = "match_s:c^2"; CloudSolrClient client = cluster.getSolrClient(); { final String joinQ = "{!join " + anyScoreMode(isScoresTest) + "from=join_s fromIndex=" + fromColl + " to=join_s}" + fromQ; QueryRequest qr = new QueryRequest(params("collection", toColl, "q", joinQ, "fl", "id,get_s,score")); QueryResponse rsp = new QueryResponse(client.request(qr), client); SolrDocumentList hits = rsp.getResults(); assertTrue("Expected 1 doc, got " + hits, hits.getNumFound() == 1); SolrDocument doc = hits.get(0); assertEquals(toDocId, doc.getFirstValue("id")); assertEquals("b", doc.getFirstValue("get_s")); assertScore(isScoresTest, doc); } // negative test before creating an alias checkAbsentFromIndex(fromColl, toColl, isScoresTest); // create an alias for the fromIndex and then query through the alias String alias = fromColl + "Alias"; CollectionAdminRequest.CreateAlias request = CollectionAdminRequest.createAlias(alias, fromColl); request.process(client); { final String joinQ = "{!join " + anyScoreMode(isScoresTest) + "from=join_s fromIndex=" + alias + " to=join_s}" + fromQ; final QueryRequest qr = new QueryRequest(params("collection", toColl, "q", joinQ, "fl", "id,get_s,score")); final QueryResponse rsp = new QueryResponse(client.request(qr), client); final SolrDocumentList hits = rsp.getResults(); assertTrue("Expected 1 doc", hits.getNumFound() == 1); SolrDocument doc = hits.get(0); assertEquals(toDocId, doc.getFirstValue("id")); assertEquals("b", doc.getFirstValue("get_s")); assertScore(isScoresTest, doc); } // negative test after creating an alias checkAbsentFromIndex(fromColl, toColl, isScoresTest); { // verify join doesn't work if no match in the "from" index final String joinQ = "{!join " + (anyScoreMode(isScoresTest)) + "from=join_s fromIndex=" + fromColl + " to=join_s}match_s:d"; final QueryRequest qr = new QueryRequest(params("collection", toColl, "q", joinQ, "fl", "id,get_s,score")); final QueryResponse rsp = new QueryResponse(client.request(qr), client); final SolrDocumentList hits = rsp.getResults(); assertTrue("Expected no hits", hits.getNumFound() == 0); } }
private void testSolrJAPICalls() throws Exception { SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))); CollectionAdminResponse response; Map<String, NamedList<Integer>> coresStatus; Map<String, NamedList<Integer>> nodesStatus; response = CollectionAdminRequest.createCollection( "solrj_collection", 2, 2, null, null, "conf1", "myOwnField", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); coresStatus = response.getCollectionCoresStatus(); assertEquals(4, coresStatus.size()); for (int i = 0; i < 4; i++) { NamedList<Integer> status = coresStatus.get("solrj_collection_shard" + (i / 2 + 1) + "_replica" + (i % 2 + 1)); assertEquals(0, (int) status.get("status")); assertTrue(status.get("QTime") > 0); } response = CollectionAdminRequest.createCollection("solrj_implicit", "shardA,shardB", "conf1", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); coresStatus = response.getCollectionCoresStatus(); assertEquals(2, coresStatus.size()); response = CollectionAdminRequest.createShard("solrj_implicit", "shardC", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); coresStatus = response.getCollectionCoresStatus(); assertEquals(1, coresStatus.size()); assertEquals(0, (int) coresStatus.get("solrj_implicit_shardC_replica1").get("status")); response = CollectionAdminRequest.deleteShard("solrj_implicit", "shardC", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); nodesStatus = response.getCollectionNodesStatus(); assertEquals(1, nodesStatus.size()); response = CollectionAdminRequest.deleteCollection("solrj_implicit", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); nodesStatus = response.getCollectionNodesStatus(); assertEquals(2, nodesStatus.size()); response = CollectionAdminRequest.createCollection("conf1", 4, "conf1", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); response = CollectionAdminRequest.reloadCollection("conf1", server); assertEquals(0, response.getStatus()); response = CollectionAdminRequest.createAlias("solrj_alias", "conf1,solrj_collection", server); assertEquals(0, response.getStatus()); response = CollectionAdminRequest.deleteAlias("solrj_alias", server); assertEquals(0, response.getStatus()); response = CollectionAdminRequest.splitShard("conf1", "shard1", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); coresStatus = response.getCollectionCoresStatus(); assertEquals(0, (int) coresStatus.get("conf1_shard1_0_replica1").get("status")); assertEquals(0, (int) coresStatus.get("conf1_shard1_0_replica1").get("status")); response = CollectionAdminRequest.deleteCollection("conf1", server); assertEquals(0, response.getStatus()); nodesStatus = response.getCollectionNodesStatus(); assertTrue(response.isSuccess()); assertEquals(4, nodesStatus.size()); response = CollectionAdminRequest.deleteCollection("solrj_collection", server); assertEquals(0, response.getStatus()); nodesStatus = response.getCollectionNodesStatus(); assertTrue(response.isSuccess()); assertEquals(4, nodesStatus.size()); }
/** * Starts a source cluster with no CDCR configuration, indexes enough documents such that the at * least one old tlog is closed and thrown away so that the source cluster does not have all * updates available in tlogs only. * * <p>Then we start a target cluster with CDCR configuration and we change the source cluster * configuration to use CDCR (i.e. CdcrUpdateLog, CdcrRequestHandler and CdcrUpdateProcessor) and * restart it. * * <p>We test that all updates eventually make it to the target cluster and that the * collectioncheckpoint call returns the same version as the last update indexed on the source. */ @Test public void testConvertClusterToCdcrAndBootstrap() throws Exception { // start the target first so that we know its zkhost MiniSolrCloudCluster target = new MiniSolrCloudCluster(1, createTempDir("cdcr-target"), buildJettyConfig("/solr")); try { target.waitForAllNodes(30); System.out.println("Target zkHost = " + target.getZkServer().getZkAddress()); System.setProperty("cdcr.target.zkHost", target.getZkServer().getZkAddress()); // start a cluster with no cdcr MiniSolrCloudCluster source = new MiniSolrCloudCluster(1, createTempDir("cdcr-source"), buildJettyConfig("/solr")); try { source.waitForAllNodes(30); source.uploadConfigSet(configset("cdcr-source-disabled"), "cdcr-source"); // create a collection with the cdcr-source-disabled configset CollectionAdminRequest.createCollection("cdcr-source", "cdcr-source", 1, 1) // todo investigate why this is necessary??? because by default it selects a ram // directory which deletes the tlogs on reloads? .withProperty("solr.directoryFactory", "solr.StandardDirectoryFactory") .process(source.getSolrClient()); // index 10000 docs with a hard commit every 1000 documents CloudSolrClient sourceSolrClient = source.getSolrClient(); sourceSolrClient.setDefaultCollection("cdcr-source"); int numDocs = 0; for (int k = 0; k < 100; k++) { UpdateRequest req = new UpdateRequest(); for (; numDocs < (k + 1) * 100; numDocs++) { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", "source_" + numDocs); doc.addField("xyz", numDocs); req.add(doc); } req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); System.out.println("Adding 100 docs with commit=true, numDocs=" + numDocs); req.process(sourceSolrClient); } QueryResponse response = sourceSolrClient.query(new SolrQuery("*:*")); assertEquals("", numDocs, response.getResults().getNumFound()); // lets find and keep the maximum version assigned by source cluster across all our updates long maxVersion = Long.MIN_VALUE; ModifiableSolrParams params = new ModifiableSolrParams(); params.set(CommonParams.QT, "/get"); params.set("getVersions", numDocs); response = sourceSolrClient.query(params); List<Long> versions = (List<Long>) response.getResponse().get("versions"); for (Long version : versions) { maxVersion = Math.max(maxVersion, version); } // upload the cdcr-enabled config and restart source cluster source.uploadConfigSet(configset("cdcr-source"), "cdcr-source"); JettySolrRunner runner = source.stopJettySolrRunner(0); source.startJettySolrRunner(runner); assertTrue(runner.isRunning()); AbstractDistribZkTestBase.waitForRecoveriesToFinish( "cdcr-source", source.getSolrClient().getZkStateReader(), true, true, 330); response = sourceSolrClient.query(new SolrQuery("*:*")); assertEquals( "Document mismatch on source after restart", numDocs, response.getResults().getNumFound()); // setup the target cluster target.uploadConfigSet(configset("cdcr-target"), "cdcr-target"); CollectionAdminRequest.createCollection("cdcr-target", "cdcr-target", 1, 1) .process(target.getSolrClient()); CloudSolrClient targetSolrClient = target.getSolrClient(); targetSolrClient.setDefaultCollection("cdcr-target"); Thread.sleep(1000); cdcrStart(targetSolrClient); cdcrStart(sourceSolrClient); response = getCdcrQueue(sourceSolrClient); System.out.println("Cdcr queue response: " + response.getResponse()); long foundDocs = waitForTargetToSync(numDocs, targetSolrClient); assertEquals("Document mismatch on target after sync", numDocs, foundDocs); params = new ModifiableSolrParams(); params.set(CommonParams.ACTION, CdcrParams.CdcrAction.COLLECTIONCHECKPOINT.toString()); params.set(CommonParams.QT, "/cdcr"); response = targetSolrClient.query(params); Long checkpoint = (Long) response.getResponse().get(CdcrParams.CHECKPOINT); assertNotNull(checkpoint); assertEquals( "COLLECTIONCHECKPOINT from target cluster should have returned the maximum " + "version across all updates made to source", maxVersion, checkpoint.longValue()); } finally { source.shutdown(); } } finally { target.shutdown(); } }
public void testBootstrapWithContinousIndexingOnSourceCluster() throws Exception { // start the target first so that we know its zkhost MiniSolrCloudCluster target = new MiniSolrCloudCluster(1, createTempDir("cdcr-target"), buildJettyConfig("/solr")); target.waitForAllNodes(30); try { System.out.println("Target zkHost = " + target.getZkServer().getZkAddress()); System.setProperty("cdcr.target.zkHost", target.getZkServer().getZkAddress()); MiniSolrCloudCluster source = new MiniSolrCloudCluster(1, createTempDir("cdcr-source"), buildJettyConfig("/solr")); try { source.waitForAllNodes(30); source.uploadConfigSet(configset("cdcr-source"), "cdcr-source"); CollectionAdminRequest.createCollection("cdcr-source", "cdcr-source", 1, 1) .withProperty("solr.directoryFactory", "solr.StandardDirectoryFactory") .process(source.getSolrClient()); // index 10000 docs with a hard commit every 1000 documents CloudSolrClient sourceSolrClient = source.getSolrClient(); sourceSolrClient.setDefaultCollection("cdcr-source"); int numDocs = 0; for (int k = 0; k < 100; k++) { UpdateRequest req = new UpdateRequest(); for (; numDocs < (k + 1) * 100; numDocs++) { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", "source_" + numDocs); doc.addField("xyz", numDocs); req.add(doc); } req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); System.out.println("Adding 100 docs with commit=true, numDocs=" + numDocs); req.process(sourceSolrClient); } QueryResponse response = sourceSolrClient.query(new SolrQuery("*:*")); assertEquals("", numDocs, response.getResults().getNumFound()); // setup the target cluster target.uploadConfigSet(configset("cdcr-target"), "cdcr-target"); CollectionAdminRequest.createCollection("cdcr-target", "cdcr-target", 1, 1) .process(target.getSolrClient()); CloudSolrClient targetSolrClient = target.getSolrClient(); targetSolrClient.setDefaultCollection("cdcr-target"); Thread.sleep(1000); cdcrStart(targetSolrClient); cdcrStart(sourceSolrClient); int c = 0; for (int k = 0; k < 100; k++) { UpdateRequest req = new UpdateRequest(); for (; c < (k + 1) * 100; c++, numDocs++) { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", "source_" + numDocs); doc.addField("xyz", numDocs); req.add(doc); } req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); System.out.println("Adding 100 docs with commit=true, numDocs=" + numDocs); req.process(sourceSolrClient); } response = sourceSolrClient.query(new SolrQuery("*:*")); assertEquals("", numDocs, response.getResults().getNumFound()); response = getCdcrQueue(sourceSolrClient); System.out.println("Cdcr queue response: " + response.getResponse()); long foundDocs = waitForTargetToSync(numDocs, targetSolrClient); assertEquals("Document mismatch on target after sync", numDocs, foundDocs); } finally { source.shutdown(); } } finally { target.shutdown(); } }
private String getRequestState(String requestId, SolrServer server) throws IOException, SolrServerException { CollectionAdminResponse response = CollectionAdminRequest.requestStatus(requestId, server); NamedList innerResponse = (NamedList) response.getResponse().get("status"); return (String) innerResponse.get("state"); }
private void testLongAndShortRunningParallelApiCalls() throws InterruptedException, IOException, SolrServerException { Thread indexThread = new Thread() { @Override public void run() { Random random = random(); int max = atLeast(random, 200); for (int id = 101; id < max; id++) { try { doAddDoc(String.valueOf(id)); } catch (Exception e) { log.error("Exception while adding docs", e); } } } }; indexThread.start(); try { SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))); CollectionAdminRequest.splitShard("collection1", SHARD1, server, "2000"); String state = getRequestState("2000", server); while (state.equals("submitted")) { state = getRequestState("2000", server); Thread.sleep(10); } assertTrue( "SplitShard task [2000] was supposed to be in [running] but isn't. It is [" + state + "]", state.equals("running")); // CLUSTERSTATE is always mutually exclusive, it should return with a response before the // split completes ModifiableSolrParams params = new ModifiableSolrParams(); params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString()); params.set("collection", "collection1"); SolrRequest request = new QueryRequest(params); request.setPath("/admin/collections"); server.request(request); state = getRequestState("2000", server); assertTrue( "After invoking OVERSEERSTATUS, SplitShard task [2000] was still supposed to be in [running] but isn't." + "It is [" + state + "]", state.equals("running")); } finally { try { indexThread.join(); } catch (InterruptedException e) { log.warn("Indexing thread interrupted."); } } }