private void testParallelCollectionAPICalls() throws IOException, SolrServerException { SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))); for (int i = 1; i <= NUM_COLLECTIONS; i++) { CollectionAdminRequest.createCollection("ocptest" + i, 4, "conf1", server, i + ""); } boolean pass = false; int counter = 0; while (true) { int numRunningTasks = 0; for (int i = 1; i <= NUM_COLLECTIONS; i++) if (getRequestState(i + "", server).equals("running")) numRunningTasks++; if (numRunningTasks > 1) { pass = true; break; } else if (counter++ > 100) break; try { Thread.sleep(100); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } assertTrue( "More than one tasks were supposed to be running in parallel but they weren't.", pass); for (int i = 1; i <= NUM_COLLECTIONS; i++) { String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server); assertTrue( "Task " + i + " did not complete, final state: " + state, state.equals("completed")); } }
@BeforeClass public static void setupCluster() throws Exception { final Path configDir = Paths.get(TEST_HOME(), "collection1", "conf"); String configName = "solrCloudCollectionConfig"; int nodeCount = 5; configureCluster(nodeCount).addConfig(configName, configDir).configure(); Map<String, String> collectionProperties = new HashMap<>(); collectionProperties.put("config", "solrconfig-tlog.xml"); collectionProperties.put("schema", "schema.xml"); // create a collection holding data for the "to" side of the JOIN int shards = 2; int replicas = 2; CollectionAdminRequest.createCollection(toColl, configName, shards, replicas) .setProperties(collectionProperties) .process(cluster.getSolrClient()); // get the set of nodes where replicas for the "to" collection exist Set<String> nodeSet = new HashSet<>(); ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader(); ClusterState cs = zkStateReader.getClusterState(); for (Slice slice : cs.getCollection(toColl).getActiveSlices()) for (Replica replica : slice.getReplicas()) nodeSet.add(replica.getNodeName()); assertTrue(nodeSet.size() > 0); // deploy the "from" collection to all nodes where the "to" collection exists CollectionAdminRequest.createCollection(fromColl, configName, 1, 4) .setCreateNodeSet(StringUtils.join(nodeSet, ",")) .setProperties(collectionProperties) .process(cluster.getSolrClient()); toDocId = indexDoc(toColl, 1001, "a", null, "b"); indexDoc(fromColl, 2001, "a", "c", null); Thread.sleep(1000); // so the commits fire }
@BeforeClass public static void setupCluster() throws Exception { configureCluster(2) .addConfig( "conf", TEST_PATH().resolve("configsets").resolve("exitable-directory").resolve("conf")) .configure(); CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1) .processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT); cluster .getSolrClient() .waitForState( COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS, (n, c) -> DocCollection.isFullyActive(n, c, 2, 1)); }
private void testTaskExclusivity() throws IOException, SolrServerException { SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))); CollectionAdminRequest.createCollection("ocptest_shardsplit", 4, "conf1", server, "1000"); CollectionAdminRequest.splitShard("ocptest_shardsplit", SHARD1, server, "1001"); CollectionAdminRequest.splitShard("ocptest_shardsplit", SHARD2, server, "1002"); int iterations = 0; while (true) { int runningTasks = 0; int completedTasks = 0; for (int i = 1001; i <= 1002; i++) { String state = getRequestState(i, server); if (state.equals("running")) runningTasks++; if (state.equals("completed")) completedTasks++; assertTrue("We have a failed SPLITSHARD task", !state.equals("failed")); } // TODO: REQUESTSTATUS might come back with more than 1 running tasks over multiple calls. // The only way to fix this is to support checking of multiple requestids in a single // REQUESTSTATUS task. assertTrue( "Mutual exclusion failed. Found more than one task running for the same collection", runningTasks < 2); if (completedTasks == 2 || iterations++ > REQUEST_STATUS_TIMEOUT) break; try { Thread.sleep(1000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return; } } for (int i = 1001; i <= 1002; i++) { String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server); assertTrue( "Task " + i + " did not complete, final state: " + state, state.equals("completed")); } }
private void testDeduplicationOfSubmittedTasks() throws IOException, SolrServerException { SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))); CollectionAdminRequest.createCollection("ocptest_shardsplit2", 4, "conf1", server, "3000"); CollectionAdminRequest.splitShard("ocptest_shardsplit2", SHARD1, server, "3001"); CollectionAdminRequest.splitShard("ocptest_shardsplit2", SHARD2, server, "3002"); // Now submit another task with the same id. At this time, hopefully the previous 3002 should // still be in the queue. CollectionAdminResponse response = CollectionAdminRequest.splitShard("ocptest_shardsplit2", SHARD1, server, "3002"); NamedList r = response.getResponse(); assertEquals( "Duplicate request was supposed to exist but wasn't found. De-duplication of submitted task failed.", "Task with the same requestid already exists.", r.get("error")); for (int i = 3001; i <= 3002; i++) { String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server); assertTrue( "Task " + i + " did not complete, final state: " + state, state.equals("completed")); } }
private void testSolrJAPICalls() throws Exception { SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))); CollectionAdminResponse response; Map<String, NamedList<Integer>> coresStatus; Map<String, NamedList<Integer>> nodesStatus; response = CollectionAdminRequest.createCollection( "solrj_collection", 2, 2, null, null, "conf1", "myOwnField", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); coresStatus = response.getCollectionCoresStatus(); assertEquals(4, coresStatus.size()); for (int i = 0; i < 4; i++) { NamedList<Integer> status = coresStatus.get("solrj_collection_shard" + (i / 2 + 1) + "_replica" + (i % 2 + 1)); assertEquals(0, (int) status.get("status")); assertTrue(status.get("QTime") > 0); } response = CollectionAdminRequest.createCollection("solrj_implicit", "shardA,shardB", "conf1", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); coresStatus = response.getCollectionCoresStatus(); assertEquals(2, coresStatus.size()); response = CollectionAdminRequest.createShard("solrj_implicit", "shardC", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); coresStatus = response.getCollectionCoresStatus(); assertEquals(1, coresStatus.size()); assertEquals(0, (int) coresStatus.get("solrj_implicit_shardC_replica1").get("status")); response = CollectionAdminRequest.deleteShard("solrj_implicit", "shardC", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); nodesStatus = response.getCollectionNodesStatus(); assertEquals(1, nodesStatus.size()); response = CollectionAdminRequest.deleteCollection("solrj_implicit", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); nodesStatus = response.getCollectionNodesStatus(); assertEquals(2, nodesStatus.size()); response = CollectionAdminRequest.createCollection("conf1", 4, "conf1", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); response = CollectionAdminRequest.reloadCollection("conf1", server); assertEquals(0, response.getStatus()); response = CollectionAdminRequest.createAlias("solrj_alias", "conf1,solrj_collection", server); assertEquals(0, response.getStatus()); response = CollectionAdminRequest.deleteAlias("solrj_alias", server); assertEquals(0, response.getStatus()); response = CollectionAdminRequest.splitShard("conf1", "shard1", server); assertEquals(0, response.getStatus()); assertTrue(response.isSuccess()); coresStatus = response.getCollectionCoresStatus(); assertEquals(0, (int) coresStatus.get("conf1_shard1_0_replica1").get("status")); assertEquals(0, (int) coresStatus.get("conf1_shard1_0_replica1").get("status")); response = CollectionAdminRequest.deleteCollection("conf1", server); assertEquals(0, response.getStatus()); nodesStatus = response.getCollectionNodesStatus(); assertTrue(response.isSuccess()); assertEquals(4, nodesStatus.size()); response = CollectionAdminRequest.deleteCollection("solrj_collection", server); assertEquals(0, response.getStatus()); nodesStatus = response.getCollectionNodesStatus(); assertTrue(response.isSuccess()); assertEquals(4, nodesStatus.size()); }
/** * Starts a source cluster with no CDCR configuration, indexes enough documents such that the at * least one old tlog is closed and thrown away so that the source cluster does not have all * updates available in tlogs only. * * <p>Then we start a target cluster with CDCR configuration and we change the source cluster * configuration to use CDCR (i.e. CdcrUpdateLog, CdcrRequestHandler and CdcrUpdateProcessor) and * restart it. * * <p>We test that all updates eventually make it to the target cluster and that the * collectioncheckpoint call returns the same version as the last update indexed on the source. */ @Test public void testConvertClusterToCdcrAndBootstrap() throws Exception { // start the target first so that we know its zkhost MiniSolrCloudCluster target = new MiniSolrCloudCluster(1, createTempDir("cdcr-target"), buildJettyConfig("/solr")); try { target.waitForAllNodes(30); System.out.println("Target zkHost = " + target.getZkServer().getZkAddress()); System.setProperty("cdcr.target.zkHost", target.getZkServer().getZkAddress()); // start a cluster with no cdcr MiniSolrCloudCluster source = new MiniSolrCloudCluster(1, createTempDir("cdcr-source"), buildJettyConfig("/solr")); try { source.waitForAllNodes(30); source.uploadConfigSet(configset("cdcr-source-disabled"), "cdcr-source"); // create a collection with the cdcr-source-disabled configset CollectionAdminRequest.createCollection("cdcr-source", "cdcr-source", 1, 1) // todo investigate why this is necessary??? because by default it selects a ram // directory which deletes the tlogs on reloads? .withProperty("solr.directoryFactory", "solr.StandardDirectoryFactory") .process(source.getSolrClient()); // index 10000 docs with a hard commit every 1000 documents CloudSolrClient sourceSolrClient = source.getSolrClient(); sourceSolrClient.setDefaultCollection("cdcr-source"); int numDocs = 0; for (int k = 0; k < 100; k++) { UpdateRequest req = new UpdateRequest(); for (; numDocs < (k + 1) * 100; numDocs++) { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", "source_" + numDocs); doc.addField("xyz", numDocs); req.add(doc); } req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); System.out.println("Adding 100 docs with commit=true, numDocs=" + numDocs); req.process(sourceSolrClient); } QueryResponse response = sourceSolrClient.query(new SolrQuery("*:*")); assertEquals("", numDocs, response.getResults().getNumFound()); // lets find and keep the maximum version assigned by source cluster across all our updates long maxVersion = Long.MIN_VALUE; ModifiableSolrParams params = new ModifiableSolrParams(); params.set(CommonParams.QT, "/get"); params.set("getVersions", numDocs); response = sourceSolrClient.query(params); List<Long> versions = (List<Long>) response.getResponse().get("versions"); for (Long version : versions) { maxVersion = Math.max(maxVersion, version); } // upload the cdcr-enabled config and restart source cluster source.uploadConfigSet(configset("cdcr-source"), "cdcr-source"); JettySolrRunner runner = source.stopJettySolrRunner(0); source.startJettySolrRunner(runner); assertTrue(runner.isRunning()); AbstractDistribZkTestBase.waitForRecoveriesToFinish( "cdcr-source", source.getSolrClient().getZkStateReader(), true, true, 330); response = sourceSolrClient.query(new SolrQuery("*:*")); assertEquals( "Document mismatch on source after restart", numDocs, response.getResults().getNumFound()); // setup the target cluster target.uploadConfigSet(configset("cdcr-target"), "cdcr-target"); CollectionAdminRequest.createCollection("cdcr-target", "cdcr-target", 1, 1) .process(target.getSolrClient()); CloudSolrClient targetSolrClient = target.getSolrClient(); targetSolrClient.setDefaultCollection("cdcr-target"); Thread.sleep(1000); cdcrStart(targetSolrClient); cdcrStart(sourceSolrClient); response = getCdcrQueue(sourceSolrClient); System.out.println("Cdcr queue response: " + response.getResponse()); long foundDocs = waitForTargetToSync(numDocs, targetSolrClient); assertEquals("Document mismatch on target after sync", numDocs, foundDocs); params = new ModifiableSolrParams(); params.set(CommonParams.ACTION, CdcrParams.CdcrAction.COLLECTIONCHECKPOINT.toString()); params.set(CommonParams.QT, "/cdcr"); response = targetSolrClient.query(params); Long checkpoint = (Long) response.getResponse().get(CdcrParams.CHECKPOINT); assertNotNull(checkpoint); assertEquals( "COLLECTIONCHECKPOINT from target cluster should have returned the maximum " + "version across all updates made to source", maxVersion, checkpoint.longValue()); } finally { source.shutdown(); } } finally { target.shutdown(); } }
public void testBootstrapWithContinousIndexingOnSourceCluster() throws Exception { // start the target first so that we know its zkhost MiniSolrCloudCluster target = new MiniSolrCloudCluster(1, createTempDir("cdcr-target"), buildJettyConfig("/solr")); target.waitForAllNodes(30); try { System.out.println("Target zkHost = " + target.getZkServer().getZkAddress()); System.setProperty("cdcr.target.zkHost", target.getZkServer().getZkAddress()); MiniSolrCloudCluster source = new MiniSolrCloudCluster(1, createTempDir("cdcr-source"), buildJettyConfig("/solr")); try { source.waitForAllNodes(30); source.uploadConfigSet(configset("cdcr-source"), "cdcr-source"); CollectionAdminRequest.createCollection("cdcr-source", "cdcr-source", 1, 1) .withProperty("solr.directoryFactory", "solr.StandardDirectoryFactory") .process(source.getSolrClient()); // index 10000 docs with a hard commit every 1000 documents CloudSolrClient sourceSolrClient = source.getSolrClient(); sourceSolrClient.setDefaultCollection("cdcr-source"); int numDocs = 0; for (int k = 0; k < 100; k++) { UpdateRequest req = new UpdateRequest(); for (; numDocs < (k + 1) * 100; numDocs++) { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", "source_" + numDocs); doc.addField("xyz", numDocs); req.add(doc); } req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); System.out.println("Adding 100 docs with commit=true, numDocs=" + numDocs); req.process(sourceSolrClient); } QueryResponse response = sourceSolrClient.query(new SolrQuery("*:*")); assertEquals("", numDocs, response.getResults().getNumFound()); // setup the target cluster target.uploadConfigSet(configset("cdcr-target"), "cdcr-target"); CollectionAdminRequest.createCollection("cdcr-target", "cdcr-target", 1, 1) .process(target.getSolrClient()); CloudSolrClient targetSolrClient = target.getSolrClient(); targetSolrClient.setDefaultCollection("cdcr-target"); Thread.sleep(1000); cdcrStart(targetSolrClient); cdcrStart(sourceSolrClient); int c = 0; for (int k = 0; k < 100; k++) { UpdateRequest req = new UpdateRequest(); for (; c < (k + 1) * 100; c++, numDocs++) { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", "source_" + numDocs); doc.addField("xyz", numDocs); req.add(doc); } req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); System.out.println("Adding 100 docs with commit=true, numDocs=" + numDocs); req.process(sourceSolrClient); } response = sourceSolrClient.query(new SolrQuery("*:*")); assertEquals("", numDocs, response.getResults().getNumFound()); response = getCdcrQueue(sourceSolrClient); System.out.println("Cdcr queue response: " + response.getResponse()); long foundDocs = waitForTargetToSync(numDocs, targetSolrClient); assertEquals("Document mismatch on target after sync", numDocs, foundDocs); } finally { source.shutdown(); } } finally { target.shutdown(); } }