private void handleGET() {
      if (parts.size() == 1) {
        resp.add("solrConfig", req.getCore().getSolrConfig().toMap());
      } else {
        if (ConfigOverlay.NAME.equals(parts.get(1))) {
          resp.add(ConfigOverlay.NAME, req.getCore().getSolrConfig().getOverlay().toMap());
        } else if (RequestParams.NAME.equals(parts.get(1))) {
          if (parts.size() == 3) {
            RequestParams params = req.getCore().getSolrConfig().getRequestParams();
            MapSolrParams p = params.getParams(parts.get(2));
            Map m = new LinkedHashMap<>();
            m.put(ConfigOverlay.ZNODEVER, params.getZnodeVersion());
            if (p != null) {
              m.put(RequestParams.NAME, ZkNodeProps.makeMap(parts.get(2), p.getMap()));
            }
            resp.add(SolrQueryResponse.NAME, m);
          } else {
            resp.add(
                SolrQueryResponse.NAME, req.getCore().getSolrConfig().getRequestParams().toMap());
          }

        } else {
          Map<String, Object> m = req.getCore().getSolrConfig().toMap();
          resp.add("solrConfig", ZkNodeProps.makeMap(parts.get(1), m.get(parts.get(1))));
        }
      }
    }
 private SolrCore checkProps(ZkNodeProps zkProps) {
   String corename;
   SolrCore core = null;
   if (cores.getZkController().getNodeName().equals(zkProps.getStr(NODE_NAME_PROP))) {
     corename = zkProps.getStr(CORE_NAME_PROP);
     core = cores.getCore(corename);
   }
   return core;
 }
 private void handleCreateCollMessage(byte[] bytes) {
   try {
     ZkNodeProps props = ZkNodeProps.load(bytes);
     if (CollectionParams.CollectionAction.CREATE.isEqual(props.getStr("operation"))) {
       String collName = props.getStr("name");
       if (collName != null) collectionsSet.add(collName);
     }
   } catch (Exception e) {
   }
 }
  private void addShardToZk(SolrZkClient zkClient, String shardsPath, String zkNodeName, String url)
      throws IOException, KeeperException, InterruptedException {

    ZkNodeProps props = new ZkNodeProps();
    props.put(ZkStateReader.URL_PROP, url);
    props.put(ZkStateReader.NODE_NAME, TEST_NODE_NAME);
    byte[] bytes = props.store();

    zkClient.create(shardsPath + "/" + zkNodeName, bytes, CreateMode.PERSISTENT);
  }
  @Test
  public void testReadConfigName() throws Exception {
    String zkDir = dataDir.getAbsolutePath() + File.separator + "zookeeper/server1/data";

    ZkTestServer server = new ZkTestServer(zkDir);
    try {
      server.run();

      AbstractZkTestCase.makeSolrZkNode(server.getZkHost());

      SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
      String actualConfigName = "firstConfig";

      zkClient.makePath(ZkController.CONFIGS_ZKNODE + "/" + actualConfigName);

      ZkNodeProps props = new ZkNodeProps();
      props.put("configName", actualConfigName);
      zkClient.makePath(
          ZkStateReader.COLLECTIONS_ZKNODE + "/" + COLLECTION_NAME,
          props.store(),
          CreateMode.PERSISTENT);

      if (DEBUG) {
        zkClient.printLayoutToStdOut();
      }
      zkClient.close();
      ZkController zkController =
          new ZkController(server.getZkAddress(), TIMEOUT, TIMEOUT, "localhost", "8983", "/solr");
      try {
        String configName = zkController.readConfigName(COLLECTION_NAME);
        assertEquals(configName, actualConfigName);
      } finally {
        zkController.close();
      }
    } finally {

      server.shutdown();
    }
  }
  private String getUrlFromZk(String collection) {
    ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
    Map<String, Slice> slices = clusterState.getSlicesMap(collection);

    if (slices == null) {
      throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection:" + collection);
    }

    for (Map.Entry<String, Slice> entry : slices.entrySet()) {
      Slice slice = entry.getValue();
      Map<String, Replica> shards = slice.getReplicasMap();
      Set<Map.Entry<String, Replica>> shardEntries = shards.entrySet();
      for (Map.Entry<String, Replica> shardEntry : shardEntries) {
        final ZkNodeProps node = shardEntry.getValue();
        if (clusterState.liveNodesContain(node.getStr(ZkStateReader.NODE_NAME_PROP))) {
          return ZkCoreNodeProps.getCoreUrl(
              node.getStr(ZkStateReader.BASE_URL_PROP),
              collection); // new ZkCoreNodeProps(node).getCoreUrl();
        }
      }
    }

    throw new RuntimeException("Could not find a live node for collection:" + collection);
  }
  /** Returns true if the queue contains a task with the specified async id. */
  public boolean containsTaskWithRequestId(String requestIdKey, String requestId)
      throws KeeperException, InterruptedException {

    List<String> childNames = zookeeper.getChildren(dir, null, true);
    stats.setQueueLength(childNames.size());
    for (String childName : childNames) {
      if (childName != null && childName.startsWith(PREFIX)) {
        try {
          byte[] data = zookeeper.getData(dir + "/" + childName, null, null, true);
          if (data != null) {
            ZkNodeProps message = ZkNodeProps.load(data);
            if (message.containsKey(requestIdKey)) {
              LOG.debug(">>>> {}", message.get(requestIdKey));
              if (message.get(requestIdKey).equals(requestId)) return true;
            }
          }
        } catch (KeeperException.NoNodeException e) {
          // Another client removed the node first, try next
        }
      }
    }

    return false;
  }
  protected void issueCreateJob(
      Integer numberOfSlices,
      Integer replicationFactor,
      Integer maxShardsPerNode,
      List<String> createNodeList,
      boolean sendCreateNodeList,
      boolean createNodeSetShuffle) {
    Map<String, Object> propMap =
        ZkNodeProps.makeMap(
            Overseer.QUEUE_OPERATION,
            CollectionParams.CollectionAction.CREATE.toLower(),
            ZkStateReader.REPLICATION_FACTOR,
            replicationFactor.toString(),
            "name",
            COLLECTION_NAME,
            "collection.configName",
            CONFIG_NAME,
            OverseerCollectionProcessor.NUM_SLICES,
            numberOfSlices.toString(),
            ZkStateReader.MAX_SHARDS_PER_NODE,
            maxShardsPerNode.toString());
    if (sendCreateNodeList) {
      propMap.put(
          OverseerCollectionProcessor.CREATE_NODE_SET,
          (createNodeList != null) ? StrUtils.join(createNodeList, ',') : null);
      if (OverseerCollectionProcessor.CREATE_NODE_SET_SHUFFLE_DEFAULT != createNodeSetShuffle
          || random().nextBoolean()) {
        propMap.put(OverseerCollectionProcessor.CREATE_NODE_SET_SHUFFLE, createNodeSetShuffle);
      }
    }

    ZkNodeProps props = new ZkNodeProps(propMap);
    QueueEvent qe =
        new QueueEvent("id", ZkStateReader.toJSON(props), null) {
          @Override
          public void setBytes(byte[] bytes) {
            lastProcessMessageResult = SolrResponse.deserialize(bytes);
          }
        };
    queue.add(qe);
  }
  private void testCollectionsAPI() throws Exception {

    // TODO: fragile - because we dont pass collection.confName, it will only
    // find a default if a conf set with a name matching the collection name is found, or
    // if there is only one conf set. That and the fact that other tests run first in this
    // env make this pretty fragile

    // create new collections rapid fire
    Map<String, List<Integer>> collectionInfos = new HashMap<String, List<Integer>>();
    int cnt = random().nextInt(TEST_NIGHTLY ? 6 : 3) + 1;

    for (int i = 0; i < cnt; i++) {
      int numShards = _TestUtil.nextInt(random(), 0, shardCount) + 1;
      int replicationFactor = _TestUtil.nextInt(random(), 0, 3) + 1;
      int maxShardsPerNode =
          (((numShards * replicationFactor)
                  / getCommonCloudSolrServer()
                      .getZkStateReader()
                      .getClusterState()
                      .getLiveNodes()
                      .size()))
              + 1;

      CloudSolrServer client = null;
      try {
        if (i == 0) {
          // Test if we can create a collection through CloudSolrServer where
          // you havnt set default-collection
          // This is nice because you want to be able to create you first
          // collection using CloudSolrServer, and in such case there is
          // nothing reasonable to set as default-collection
          client = createCloudClient(null);
        } else if (i == 1) {
          // Test if we can create a collection through CloudSolrServer where
          // you have set default-collection to a non-existing collection
          // This is nice because you want to be able to create you first
          // collection using CloudSolrServer, and in such case there is
          // nothing reasonable to set as default-collection, but you might want
          // to use the same CloudSolrServer throughout the entire
          // lifetime of your client-application, so it is nice to be able to
          // set a default-collection on this CloudSolrServer once and for all
          // and use this CloudSolrServer to create the collection
          client = createCloudClient("awholynewcollection_" + i);
        }
        if (secondConfigSet) {
          createCollection(
              collectionInfos,
              "awholynewcollection_" + i,
              numShards,
              replicationFactor,
              maxShardsPerNode,
              client,
              null,
              "conf2");
        } else {
          createCollection(
              collectionInfos,
              "awholynewcollection_" + i,
              numShards,
              replicationFactor,
              maxShardsPerNode,
              client,
              null);
        }
      } finally {
        if (client != null) client.shutdown();
      }
    }

    Set<Entry<String, List<Integer>>> collectionInfosEntrySet = collectionInfos.entrySet();
    for (Entry<String, List<Integer>> entry : collectionInfosEntrySet) {
      String collection = entry.getKey();
      List<Integer> list = entry.getValue();
      checkForCollection(collection, list, null);

      String url = getUrlFromZk(collection);

      HttpSolrServer collectionClient = new HttpSolrServer(url);

      // poll for a second - it can take a moment before we are ready to serve
      waitForNon403or404or503(collectionClient);
    }

    // sometimes we restart one of the jetty nodes
    if (random().nextBoolean()) {
      JettySolrRunner jetty = jettys.get(random().nextInt(jettys.size()));
      ChaosMonkey.stop(jetty);
      ChaosMonkey.start(jetty);

      for (Entry<String, List<Integer>> entry : collectionInfosEntrySet) {
        String collection = entry.getKey();
        List<Integer> list = entry.getValue();
        checkForCollection(collection, list, null);

        String url = getUrlFromZk(collection);

        HttpSolrServer collectionClient = new HttpSolrServer(url);

        // poll for a second - it can take a moment before we are ready to serve
        waitForNon403or404or503(collectionClient);
      }
    }

    // sometimes we restart zookeeper
    if (random().nextBoolean()) {
      zkServer.shutdown();
      zkServer = new ZkTestServer(zkServer.getZkDir(), zkServer.getPort());
      zkServer.run();
    }

    // sometimes we cause a connection loss - sometimes it will hit the overseer
    if (random().nextBoolean()) {
      JettySolrRunner jetty = jettys.get(random().nextInt(jettys.size()));
      ChaosMonkey.causeConnectionLoss(jetty);
    }

    ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
    for (int j = 0; j < cnt; j++) {
      waitForRecoveriesToFinish("awholynewcollection_" + j, zkStateReader, false);

      if (secondConfigSet) {
        // let's see if they are using the second config set
        byte[] data =
            zkStateReader
                .getZkClient()
                .getData(
                    ZkStateReader.COLLECTIONS_ZKNODE + "/" + "awholynewcollection_" + j,
                    null,
                    null,
                    true);
        assertNotNull(data);
        ZkNodeProps props = ZkNodeProps.load(data);
        String configName = props.getStr(ZkController.CONFIGNAME_PROP);
        assertEquals("conf2", configName);
      }
    }

    checkInstanceDirs(jettys.get(0));

    List<String> collectionNameList = new ArrayList<String>();
    collectionNameList.addAll(collectionInfos.keySet());
    String collectionName = collectionNameList.get(random().nextInt(collectionNameList.size()));

    String url = getUrlFromZk(collectionName);

    HttpSolrServer collectionClient = new HttpSolrServer(url);

    // lets try and use the solrj client to index a couple documents
    SolrInputDocument doc1 = getDoc(id, 6, i1, -600, tlong, 600, t1, "humpty dumpy sat on a wall");
    SolrInputDocument doc2 =
        getDoc(id, 7, i1, -600, tlong, 600, t1, "humpty dumpy3 sat on a walls");
    SolrInputDocument doc3 =
        getDoc(id, 8, i1, -600, tlong, 600, t1, "humpty dumpy2 sat on a walled");

    collectionClient.add(doc1);

    collectionClient.add(doc2);

    collectionClient.add(doc3);

    collectionClient.commit();

    assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());

    // lets try a collection reload

    // get core open times
    Map<String, Long> urlToTimeBefore = new HashMap<String, Long>();
    collectStartTimes(collectionName, urlToTimeBefore);
    assertTrue(urlToTimeBefore.size() > 0);
    ModifiableSolrParams params = new ModifiableSolrParams();
    params.set("action", CollectionAction.RELOAD.toString());
    params.set("name", collectionName);
    QueryRequest request = new QueryRequest(params);
    request.setPath("/admin/collections");

    // we can use this client because we just want base url
    final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));

    createNewSolrServer("", baseUrl).request(request);

    // reloads make take a short while
    boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
    assertTrue("some core start times did not change on reload", allTimesAreCorrect);

    waitForRecoveriesToFinish("awholynewcollection_" + (cnt - 1), zkStateReader, false);

    // remove a collection
    params = new ModifiableSolrParams();
    params.set("action", CollectionAction.DELETE.toString());
    params.set("name", collectionName);
    request = new QueryRequest(params);
    request.setPath("/admin/collections");

    createNewSolrServer("", baseUrl).request(request);

    // ensure its out of the state
    checkForMissingCollection(collectionName);

    // collectionNameList.remove(collectionName);

    // remove an unknown collection
    params = new ModifiableSolrParams();
    params.set("action", CollectionAction.DELETE.toString());
    params.set("name", "unknown_collection");
    request = new QueryRequest(params);
    request.setPath("/admin/collections");

    boolean exp = false;
    try {
      createNewSolrServer("", baseUrl).request(request);
    } catch (SolrException e) {
      exp = true;
    }
    assertTrue("Expected exception", exp);

    // create another collection should still work
    params = new ModifiableSolrParams();
    params.set("action", CollectionAction.CREATE.toString());

    params.set("numShards", 1);
    params.set(REPLICATION_FACTOR, 2);
    collectionName = "acollectionafterbaddelete";

    params.set("name", collectionName);
    if (secondConfigSet) {
      params.set("collection.configName", "conf1");
    }
    request = new QueryRequest(params);
    request.setPath("/admin/collections");
    createNewSolrServer("", baseUrl).request(request);

    List<Integer> list = new ArrayList<Integer>(2);
    list.add(1);
    list.add(2);
    checkForCollection(collectionName, list, null);

    url = getUrlFromZk(collectionName);

    collectionClient = new HttpSolrServer(url);

    // poll for a second - it can take a moment before we are ready to serve
    waitForNon403or404or503(collectionClient);

    for (int j = 0; j < cnt; j++) {
      waitForRecoveriesToFinish(collectionName, zkStateReader, false);
    }

    // test maxShardsPerNode
    int numLiveNodes =
        getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes().size();
    int numShards = (numLiveNodes / 2) + 1;
    int replicationFactor = 2;
    int maxShardsPerNode = 1;
    collectionInfos = new HashMap<String, List<Integer>>();
    CloudSolrServer client = createCloudClient("awholynewcollection_" + cnt);
    try {
      exp = false;
      try {
        createCollection(
            collectionInfos,
            "awholynewcollection_" + cnt,
            numShards,
            replicationFactor,
            maxShardsPerNode,
            client,
            null,
            "conf1");
      } catch (SolrException e) {
        exp = true;
      }
      assertTrue("expected exception", exp);
    } finally {
      client.shutdown();
    }

    // Test createNodeSet
    numLiveNodes =
        getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes().size();
    List<String> createNodeList = new ArrayList<String>();
    int numOfCreateNodes = numLiveNodes / 2;
    assertFalse(
        "createNodeSet test is pointless with only " + numLiveNodes + " nodes running",
        numOfCreateNodes == 0);
    int i = 0;
    for (String liveNode :
        getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes()) {
      if (i < numOfCreateNodes) {
        createNodeList.add(liveNode);
        i++;
      } else {
        break;
      }
    }
    maxShardsPerNode = 2;
    numShards = createNodeList.size() * maxShardsPerNode;
    replicationFactor = 1;
    collectionInfos = new HashMap<String, List<Integer>>();
    client = createCloudClient("awholynewcollection_" + (cnt + 1));
    try {
      createCollection(
          collectionInfos,
          "awholynewcollection_" + (cnt + 1),
          numShards,
          replicationFactor,
          maxShardsPerNode,
          client,
          StrUtils.join(createNodeList, ','),
          "conf1");
    } finally {
      client.shutdown();
    }
    checkForCollection(
        collectionInfos.keySet().iterator().next(),
        collectionInfos.entrySet().iterator().next().getValue(),
        createNodeList);

    checkNoTwoShardsUseTheSameIndexDir();
  }
  @Test
  public void testReadShards() throws Exception {
    String zkDir = dataDir.getAbsolutePath() + File.separator + "zookeeper/server1/data";
    ZkTestServer server = null;
    SolrZkClient zkClient = null;
    ZkController zkController = null;
    try {
      server = new ZkTestServer(zkDir);
      server.run();

      AbstractZkTestCase.makeSolrZkNode(server.getZkHost());

      zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
      String shardsPath = "/collections/collection1/shards/shardid1";
      zkClient.makePath(shardsPath);

      addShardToZk(zkClient, shardsPath, SHARD1, URL1);
      addShardToZk(zkClient, shardsPath, SHARD2, URL2);
      addShardToZk(zkClient, shardsPath, SHARD3, URL3);

      if (DEBUG) {
        zkClient.printLayoutToStdOut();
      }

      zkController =
          new ZkController(server.getZkAddress(), TIMEOUT, 1000, "localhost", "8983", "solr");

      zkController.getZkStateReader().updateCloudState(true);
      CloudState cloudInfo = zkController.getCloudState();
      Map<String, Slice> slices = cloudInfo.getSlices("collection1");
      assertNotNull(slices);

      for (Slice slice : slices.values()) {
        Map<String, ZkNodeProps> shards = slice.getShards();
        if (DEBUG) {
          for (String shardName : shards.keySet()) {
            ZkNodeProps props = shards.get(shardName);
            System.out.println("shard:" + shardName);
            System.out.println("props:" + props.toString());
          }
        }
        assertNotNull(shards.get(SHARD1));
        assertNotNull(shards.get(SHARD2));
        assertNotNull(shards.get(SHARD3));

        ZkNodeProps props = shards.get(SHARD1);
        assertEquals(URL1, props.get(ZkStateReader.URL_PROP));
        assertEquals(TEST_NODE_NAME, props.get(ZkStateReader.NODE_NAME));

        props = shards.get(SHARD2);
        assertEquals(URL2, props.get(ZkStateReader.URL_PROP));
        assertEquals(TEST_NODE_NAME, props.get(ZkStateReader.NODE_NAME));

        props = shards.get(SHARD3);
        assertEquals(URL3, props.get(ZkStateReader.URL_PROP));
        assertEquals(TEST_NODE_NAME, props.get(ZkStateReader.NODE_NAME));
      }

    } finally {
      if (zkClient != null) {
        zkClient.close();
      }
      if (zkController != null) {
        zkController.close();
      }
      if (server != null) {
        server.shutdown();
      }
    }
  }
  protected NamedList<Object> sendRequest(SolrRequest request)
      throws SolrServerException, IOException {
    connect();

    ClusterState clusterState = zkStateReader.getClusterState();

    boolean sendToLeaders = false;
    List<String> replicas = null;

    if (request instanceof IsUpdateRequest) {
      if (request instanceof UpdateRequest) {
        NamedList<Object> response = directUpdate((AbstractUpdateRequest) request, clusterState);
        if (response != null) {
          return response;
        }
      }
      sendToLeaders = true;
      replicas = new ArrayList<>();
    }

    SolrParams reqParams = request.getParams();
    if (reqParams == null) {
      reqParams = new ModifiableSolrParams();
    }
    List<String> theUrlList = new ArrayList<>();
    if (request.getPath().equals("/admin/collections")
        || request.getPath().equals("/admin/cores")) {
      Set<String> liveNodes = clusterState.getLiveNodes();
      for (String liveNode : liveNodes) {
        theUrlList.add(zkStateReader.getBaseUrlForNodeName(liveNode));
      }
    } else {
      String collection = reqParams.get(UpdateParams.COLLECTION, defaultCollection);

      if (collection == null) {
        throw new SolrServerException(
            "No collection param specified on request and no default collection has been set.");
      }

      Set<String> collectionsList = getCollectionList(clusterState, collection);
      if (collectionsList.size() == 0) {
        throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection: " + collection);
      }

      String shardKeys = reqParams.get(ShardParams._ROUTE_);
      if (shardKeys == null) {
        shardKeys = reqParams.get(ShardParams.SHARD_KEYS); // deprecated
      }

      // TODO: not a big deal because of the caching, but we could avoid looking
      // at every shard
      // when getting leaders if we tweaked some things

      // Retrieve slices from the cloud state and, for each collection
      // specified,
      // add it to the Map of slices.
      Map<String, Slice> slices = new HashMap<>();
      for (String collectionName : collectionsList) {
        DocCollection col = getDocCollection(clusterState, collectionName);
        Collection<Slice> routeSlices = col.getRouter().getSearchSlices(shardKeys, reqParams, col);
        ClientUtils.addSlices(slices, collectionName, routeSlices, true);
      }
      Set<String> liveNodes = clusterState.getLiveNodes();

      List<String> leaderUrlList = null;
      List<String> urlList = null;
      List<String> replicasList = null;

      // build a map of unique nodes
      // TODO: allow filtering by group, role, etc
      Map<String, ZkNodeProps> nodes = new HashMap<>();
      List<String> urlList2 = new ArrayList<>();
      for (Slice slice : slices.values()) {
        for (ZkNodeProps nodeProps : slice.getReplicasMap().values()) {
          ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
          String node = coreNodeProps.getNodeName();
          if (!liveNodes.contains(coreNodeProps.getNodeName())
              || !coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) continue;
          if (nodes.put(node, nodeProps) == null) {
            if (!sendToLeaders || (sendToLeaders && coreNodeProps.isLeader())) {
              String url;
              if (reqParams.get(UpdateParams.COLLECTION) == null) {
                url =
                    ZkCoreNodeProps.getCoreUrl(
                        nodeProps.getStr(ZkStateReader.BASE_URL_PROP), defaultCollection);
              } else {
                url = coreNodeProps.getCoreUrl();
              }
              urlList2.add(url);
            } else if (sendToLeaders) {
              String url;
              if (reqParams.get(UpdateParams.COLLECTION) == null) {
                url =
                    ZkCoreNodeProps.getCoreUrl(
                        nodeProps.getStr(ZkStateReader.BASE_URL_PROP), defaultCollection);
              } else {
                url = coreNodeProps.getCoreUrl();
              }
              replicas.add(url);
            }
          }
        }
      }

      if (sendToLeaders) {
        leaderUrlList = urlList2;
        replicasList = replicas;
      } else {
        urlList = urlList2;
      }

      if (sendToLeaders) {
        theUrlList = new ArrayList<>(leaderUrlList.size());
        theUrlList.addAll(leaderUrlList);
      } else {
        theUrlList = new ArrayList<>(urlList.size());
        theUrlList.addAll(urlList);
      }
      if (theUrlList.isEmpty()) {
        throw new SolrException(
            SolrException.ErrorCode.INVALID_STATE, "Not enough nodes to handle the request");
      }

      Collections.shuffle(theUrlList, rand);
      if (sendToLeaders) {
        ArrayList<String> theReplicas = new ArrayList<>(replicasList.size());
        theReplicas.addAll(replicasList);
        Collections.shuffle(theReplicas, rand);
        theUrlList.addAll(theReplicas);
      }
    }

    LBHttpSolrServer.Req req = new LBHttpSolrServer.Req(request, theUrlList);
    LBHttpSolrServer.Rsp rsp = lbServer.request(req);
    return rsp.getResponse();
  }