private List<Node> getCollectionUrls(
      SolrQueryRequest req, String collection, String shardZkNodeName) {
    ClusterState clusterState =
        req.getCore().getCoreDescriptor().getCoreContainer().getZkController().getClusterState();
    List<Node> urls = new ArrayList<Node>();
    Map<String, Slice> slices = clusterState.getSlices(collection);
    if (slices == null) {
      throw new ZooKeeperException(
          ErrorCode.BAD_REQUEST, "Could not find collection in zk: " + clusterState);
    }
    for (Map.Entry<String, Slice> sliceEntry : slices.entrySet()) {
      Slice replicas = slices.get(sliceEntry.getKey());

      Map<String, Replica> shardMap = replicas.getReplicasMap();

      for (Entry<String, Replica> entry : shardMap.entrySet()) {
        ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(entry.getValue());
        if (clusterState.liveNodesContain(nodeProps.getNodeName())
            && !entry.getKey().equals(shardZkNodeName)) {
          urls.add(new StdNode(nodeProps));
        }
      }
    }
    if (urls.size() == 0) {
      return null;
    }
    return urls;
  }
    /**
     * Retrieve all requests recorded by this queue which were sent to given collection and shard
     *
     * @param zkStateReader the {@link org.apache.solr.common.cloud.ZkStateReader} from which
     *     cluster state is read
     * @param collectionName the given collection name for which requests have to be extracted
     * @param shardId the given shard name for which requests have to be extracted
     * @return a list of {@link
     *     org.apache.solr.handler.component.TrackingShardHandlerFactory.ShardRequestAndParams} or
     *     empty list if none are found
     */
    public List<ShardRequestAndParams> getShardRequests(
        ZkStateReader zkStateReader, String collectionName, String shardId) {
      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
      assert collection != null;
      Slice slice = collection.getSlice(shardId);
      assert slice != null;

      for (Map.Entry<String, List<ShardRequestAndParams>> entry : requests.entrySet()) {
        // multiple shard addresses may be present separated by '|'
        List<String> list = StrUtils.splitSmart(entry.getKey(), '|');
        for (Map.Entry<String, Replica> replica : slice.getReplicasMap().entrySet()) {
          String coreUrl = new ZkCoreNodeProps(replica.getValue()).getCoreUrl();
          if (list.contains(coreUrl)) {
            return new ArrayList<>(entry.getValue());
          }
        }
      }
      return Collections.emptyList();
    }
  private String getUrlFromZk(String collection) {
    ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
    Map<String, Slice> slices = clusterState.getSlicesMap(collection);

    if (slices == null) {
      throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection:" + collection);
    }

    for (Map.Entry<String, Slice> entry : slices.entrySet()) {
      Slice slice = entry.getValue();
      Map<String, Replica> shards = slice.getReplicasMap();
      Set<Map.Entry<String, Replica>> shardEntries = shards.entrySet();
      for (Map.Entry<String, Replica> shardEntry : shardEntries) {
        final ZkNodeProps node = shardEntry.getValue();
        if (clusterState.liveNodesContain(node.getStr(ZkStateReader.NODE_NAME_PROP))) {
          return ZkCoreNodeProps.getCoreUrl(
              node.getStr(ZkStateReader.BASE_URL_PROP),
              collection); // new ZkCoreNodeProps(node).getCoreUrl();
        }
      }
    }

    throw new RuntimeException("Could not find a live node for collection:" + collection);
  }
  protected NamedList<Object> sendRequest(SolrRequest request)
      throws SolrServerException, IOException {
    connect();

    ClusterState clusterState = zkStateReader.getClusterState();

    boolean sendToLeaders = false;
    List<String> replicas = null;

    if (request instanceof IsUpdateRequest) {
      if (request instanceof UpdateRequest) {
        NamedList<Object> response = directUpdate((AbstractUpdateRequest) request, clusterState);
        if (response != null) {
          return response;
        }
      }
      sendToLeaders = true;
      replicas = new ArrayList<>();
    }

    SolrParams reqParams = request.getParams();
    if (reqParams == null) {
      reqParams = new ModifiableSolrParams();
    }
    List<String> theUrlList = new ArrayList<>();
    if (request.getPath().equals("/admin/collections")
        || request.getPath().equals("/admin/cores")) {
      Set<String> liveNodes = clusterState.getLiveNodes();
      for (String liveNode : liveNodes) {
        theUrlList.add(zkStateReader.getBaseUrlForNodeName(liveNode));
      }
    } else {
      String collection = reqParams.get(UpdateParams.COLLECTION, defaultCollection);

      if (collection == null) {
        throw new SolrServerException(
            "No collection param specified on request and no default collection has been set.");
      }

      Set<String> collectionsList = getCollectionList(clusterState, collection);
      if (collectionsList.size() == 0) {
        throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection: " + collection);
      }

      String shardKeys = reqParams.get(ShardParams._ROUTE_);
      if (shardKeys == null) {
        shardKeys = reqParams.get(ShardParams.SHARD_KEYS); // deprecated
      }

      // TODO: not a big deal because of the caching, but we could avoid looking
      // at every shard
      // when getting leaders if we tweaked some things

      // Retrieve slices from the cloud state and, for each collection
      // specified,
      // add it to the Map of slices.
      Map<String, Slice> slices = new HashMap<>();
      for (String collectionName : collectionsList) {
        DocCollection col = getDocCollection(clusterState, collectionName);
        Collection<Slice> routeSlices = col.getRouter().getSearchSlices(shardKeys, reqParams, col);
        ClientUtils.addSlices(slices, collectionName, routeSlices, true);
      }
      Set<String> liveNodes = clusterState.getLiveNodes();

      List<String> leaderUrlList = null;
      List<String> urlList = null;
      List<String> replicasList = null;

      // build a map of unique nodes
      // TODO: allow filtering by group, role, etc
      Map<String, ZkNodeProps> nodes = new HashMap<>();
      List<String> urlList2 = new ArrayList<>();
      for (Slice slice : slices.values()) {
        for (ZkNodeProps nodeProps : slice.getReplicasMap().values()) {
          ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
          String node = coreNodeProps.getNodeName();
          if (!liveNodes.contains(coreNodeProps.getNodeName())
              || !coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) continue;
          if (nodes.put(node, nodeProps) == null) {
            if (!sendToLeaders || (sendToLeaders && coreNodeProps.isLeader())) {
              String url;
              if (reqParams.get(UpdateParams.COLLECTION) == null) {
                url =
                    ZkCoreNodeProps.getCoreUrl(
                        nodeProps.getStr(ZkStateReader.BASE_URL_PROP), defaultCollection);
              } else {
                url = coreNodeProps.getCoreUrl();
              }
              urlList2.add(url);
            } else if (sendToLeaders) {
              String url;
              if (reqParams.get(UpdateParams.COLLECTION) == null) {
                url =
                    ZkCoreNodeProps.getCoreUrl(
                        nodeProps.getStr(ZkStateReader.BASE_URL_PROP), defaultCollection);
              } else {
                url = coreNodeProps.getCoreUrl();
              }
              replicas.add(url);
            }
          }
        }
      }

      if (sendToLeaders) {
        leaderUrlList = urlList2;
        replicasList = replicas;
      } else {
        urlList = urlList2;
      }

      if (sendToLeaders) {
        theUrlList = new ArrayList<>(leaderUrlList.size());
        theUrlList.addAll(leaderUrlList);
      } else {
        theUrlList = new ArrayList<>(urlList.size());
        theUrlList.addAll(urlList);
      }
      if (theUrlList.isEmpty()) {
        throw new SolrException(
            SolrException.ErrorCode.INVALID_STATE, "Not enough nodes to handle the request");
      }

      Collections.shuffle(theUrlList, rand);
      if (sendToLeaders) {
        ArrayList<String> theReplicas = new ArrayList<>(replicasList.size());
        theReplicas.addAll(replicasList);
        Collections.shuffle(theReplicas, rand);
        theUrlList.addAll(theReplicas);
      }
    }

    LBHttpSolrServer.Req req = new LBHttpSolrServer.Req(request, theUrlList);
    LBHttpSolrServer.Rsp rsp = lbServer.request(req);
    return rsp.getResponse();
  }