コード例 #1
0
  public void testConcurrentCreateAndDeleteDoesNotFail() {
    final File configDir = getFile("solr").toPath().resolve("configsets/configset-2/conf").toFile();
    final AtomicReference<Exception> failure = new AtomicReference<>();
    final int timeToRunSec = 30;
    final Thread[] threads = new Thread[10];
    for (int i = 0; i < threads.length; i++) {
      final String collectionName = "collection" + i;
      uploadConfig(configDir, collectionName);
      final SolrClient solrClient =
          new HttpSolrClient(solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString());
      threads[i] =
          new CreateDeleteSearchCollectionThread(
              "create-delete-search-" + i,
              collectionName,
              collectionName,
              timeToRunSec,
              solrClient,
              failure);
    }

    startAll(threads);
    joinAll(threads);

    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
  }
コード例 #2
0
  public void testConcurrentCreateAndDeleteOverTheSameConfig() {
    final String configName = "testconfig";
    final File configDir = getFile("solr").toPath().resolve("configsets/configset-2/conf").toFile();
    uploadConfig(configDir, configName); // upload config once, to be used by all collections
    final SolrClient solrClient =
        new HttpSolrClient(solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString());
    final AtomicReference<Exception> failure = new AtomicReference<>();
    final int timeToRunSec = 30;
    final Thread[] threads = new Thread[2];
    for (int i = 0; i < threads.length; i++) {
      final String collectionName = "collection" + i;
      threads[i] =
          new CreateDeleteCollectionThread(
              "create-delete-" + i, collectionName, configName, timeToRunSec, solrClient, failure);
    }

    startAll(threads);
    joinAll(threads);

    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());

    try {
      solrClient.close();
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
  }
コード例 #3
0
 private void uploadConfig(File configDir, String configName) {
   try {
     solrCluster.uploadConfigDir(configDir, configName);
   } catch (IOException | KeeperException | InterruptedException e) {
     throw new RuntimeException(e);
   }
 }
コード例 #4
0
 @Override
 @After
 public void tearDown() throws Exception {
   solrCluster.shutdown();
   super.tearDown();
 }
コード例 #5
0
 /**
  * Sets the tracking queue for all nodes participating in this cluster. Once this method returns,
  * all search and core admin requests distributed to shards will be submitted to the given queue.
  *
  * <p>This is equivalent to calling: <code>
  * TrackingShardHandlerFactory.setTrackingQueue(cluster.getJettySolrRunners(), queue)</code>
  *
  * @see
  *     org.apache.solr.handler.component.TrackingShardHandlerFactory#setTrackingQueue(java.util.List,
  *     java.util.Queue)
  */
 public static void setTrackingQueue(
     MiniSolrCloudCluster cluster, Queue<ShardRequestAndParams> queue) {
   setTrackingQueue(cluster.getJettySolrRunners(), queue);
 }
コード例 #6
0
  /**
   * Starts a source cluster with no CDCR configuration, indexes enough documents such that the at
   * least one old tlog is closed and thrown away so that the source cluster does not have all
   * updates available in tlogs only.
   *
   * <p>Then we start a target cluster with CDCR configuration and we change the source cluster
   * configuration to use CDCR (i.e. CdcrUpdateLog, CdcrRequestHandler and CdcrUpdateProcessor) and
   * restart it.
   *
   * <p>We test that all updates eventually make it to the target cluster and that the
   * collectioncheckpoint call returns the same version as the last update indexed on the source.
   */
  @Test
  public void testConvertClusterToCdcrAndBootstrap() throws Exception {
    // start the target first so that we know its zkhost
    MiniSolrCloudCluster target =
        new MiniSolrCloudCluster(1, createTempDir("cdcr-target"), buildJettyConfig("/solr"));
    try {
      target.waitForAllNodes(30);
      System.out.println("Target zkHost = " + target.getZkServer().getZkAddress());
      System.setProperty("cdcr.target.zkHost", target.getZkServer().getZkAddress());

      // start a cluster with no cdcr
      MiniSolrCloudCluster source =
          new MiniSolrCloudCluster(1, createTempDir("cdcr-source"), buildJettyConfig("/solr"));
      try {
        source.waitForAllNodes(30);
        source.uploadConfigSet(configset("cdcr-source-disabled"), "cdcr-source");

        // create a collection with the cdcr-source-disabled configset
        CollectionAdminRequest.createCollection("cdcr-source", "cdcr-source", 1, 1)
            // todo investigate why this is necessary??? because by default it selects a ram
            // directory which deletes the tlogs on reloads?
            .withProperty("solr.directoryFactory", "solr.StandardDirectoryFactory")
            .process(source.getSolrClient());

        // index 10000 docs with a hard commit every 1000 documents
        CloudSolrClient sourceSolrClient = source.getSolrClient();
        sourceSolrClient.setDefaultCollection("cdcr-source");
        int numDocs = 0;
        for (int k = 0; k < 100; k++) {
          UpdateRequest req = new UpdateRequest();
          for (; numDocs < (k + 1) * 100; numDocs++) {
            SolrInputDocument doc = new SolrInputDocument();
            doc.addField("id", "source_" + numDocs);
            doc.addField("xyz", numDocs);
            req.add(doc);
          }
          req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
          System.out.println("Adding 100 docs with commit=true, numDocs=" + numDocs);
          req.process(sourceSolrClient);
        }

        QueryResponse response = sourceSolrClient.query(new SolrQuery("*:*"));
        assertEquals("", numDocs, response.getResults().getNumFound());

        // lets find and keep the maximum version assigned by source cluster across all our updates
        long maxVersion = Long.MIN_VALUE;
        ModifiableSolrParams params = new ModifiableSolrParams();
        params.set(CommonParams.QT, "/get");
        params.set("getVersions", numDocs);
        response = sourceSolrClient.query(params);
        List<Long> versions = (List<Long>) response.getResponse().get("versions");
        for (Long version : versions) {
          maxVersion = Math.max(maxVersion, version);
        }

        //       upload the cdcr-enabled config and restart source cluster
        source.uploadConfigSet(configset("cdcr-source"), "cdcr-source");
        JettySolrRunner runner = source.stopJettySolrRunner(0);
        source.startJettySolrRunner(runner);
        assertTrue(runner.isRunning());
        AbstractDistribZkTestBase.waitForRecoveriesToFinish(
            "cdcr-source", source.getSolrClient().getZkStateReader(), true, true, 330);

        response = sourceSolrClient.query(new SolrQuery("*:*"));
        assertEquals(
            "Document mismatch on source after restart",
            numDocs,
            response.getResults().getNumFound());

        // setup the target cluster
        target.uploadConfigSet(configset("cdcr-target"), "cdcr-target");
        CollectionAdminRequest.createCollection("cdcr-target", "cdcr-target", 1, 1)
            .process(target.getSolrClient());
        CloudSolrClient targetSolrClient = target.getSolrClient();
        targetSolrClient.setDefaultCollection("cdcr-target");
        Thread.sleep(1000);

        cdcrStart(targetSolrClient);
        cdcrStart(sourceSolrClient);

        response = getCdcrQueue(sourceSolrClient);
        System.out.println("Cdcr queue response: " + response.getResponse());
        long foundDocs = waitForTargetToSync(numDocs, targetSolrClient);
        assertEquals("Document mismatch on target after sync", numDocs, foundDocs);

        params = new ModifiableSolrParams();
        params.set(CommonParams.ACTION, CdcrParams.CdcrAction.COLLECTIONCHECKPOINT.toString());
        params.set(CommonParams.QT, "/cdcr");
        response = targetSolrClient.query(params);
        Long checkpoint = (Long) response.getResponse().get(CdcrParams.CHECKPOINT);
        assertNotNull(checkpoint);
        assertEquals(
            "COLLECTIONCHECKPOINT from target cluster should have returned the maximum "
                + "version across all updates made to source",
            maxVersion,
            checkpoint.longValue());
      } finally {
        source.shutdown();
      }
    } finally {
      target.shutdown();
    }
  }
コード例 #7
0
  public void testBootstrapWithContinousIndexingOnSourceCluster() throws Exception {
    // start the target first so that we know its zkhost
    MiniSolrCloudCluster target =
        new MiniSolrCloudCluster(1, createTempDir("cdcr-target"), buildJettyConfig("/solr"));
    target.waitForAllNodes(30);
    try {
      System.out.println("Target zkHost = " + target.getZkServer().getZkAddress());
      System.setProperty("cdcr.target.zkHost", target.getZkServer().getZkAddress());

      MiniSolrCloudCluster source =
          new MiniSolrCloudCluster(1, createTempDir("cdcr-source"), buildJettyConfig("/solr"));
      try {
        source.waitForAllNodes(30);
        source.uploadConfigSet(configset("cdcr-source"), "cdcr-source");

        CollectionAdminRequest.createCollection("cdcr-source", "cdcr-source", 1, 1)
            .withProperty("solr.directoryFactory", "solr.StandardDirectoryFactory")
            .process(source.getSolrClient());

        // index 10000 docs with a hard commit every 1000 documents
        CloudSolrClient sourceSolrClient = source.getSolrClient();
        sourceSolrClient.setDefaultCollection("cdcr-source");
        int numDocs = 0;
        for (int k = 0; k < 100; k++) {
          UpdateRequest req = new UpdateRequest();
          for (; numDocs < (k + 1) * 100; numDocs++) {
            SolrInputDocument doc = new SolrInputDocument();
            doc.addField("id", "source_" + numDocs);
            doc.addField("xyz", numDocs);
            req.add(doc);
          }
          req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
          System.out.println("Adding 100 docs with commit=true, numDocs=" + numDocs);
          req.process(sourceSolrClient);
        }

        QueryResponse response = sourceSolrClient.query(new SolrQuery("*:*"));
        assertEquals("", numDocs, response.getResults().getNumFound());

        // setup the target cluster
        target.uploadConfigSet(configset("cdcr-target"), "cdcr-target");
        CollectionAdminRequest.createCollection("cdcr-target", "cdcr-target", 1, 1)
            .process(target.getSolrClient());
        CloudSolrClient targetSolrClient = target.getSolrClient();
        targetSolrClient.setDefaultCollection("cdcr-target");
        Thread.sleep(1000);

        cdcrStart(targetSolrClient);
        cdcrStart(sourceSolrClient);

        int c = 0;
        for (int k = 0; k < 100; k++) {
          UpdateRequest req = new UpdateRequest();
          for (; c < (k + 1) * 100; c++, numDocs++) {
            SolrInputDocument doc = new SolrInputDocument();
            doc.addField("id", "source_" + numDocs);
            doc.addField("xyz", numDocs);
            req.add(doc);
          }
          req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
          System.out.println("Adding 100 docs with commit=true, numDocs=" + numDocs);
          req.process(sourceSolrClient);
        }

        response = sourceSolrClient.query(new SolrQuery("*:*"));
        assertEquals("", numDocs, response.getResults().getNumFound());

        response = getCdcrQueue(sourceSolrClient);
        System.out.println("Cdcr queue response: " + response.getResponse());
        long foundDocs = waitForTargetToSync(numDocs, targetSolrClient);
        assertEquals("Document mismatch on target after sync", numDocs, foundDocs);

      } finally {
        source.shutdown();
      }
    } finally {
      target.shutdown();
    }
  }