private String getRemotCoreUrl(String collectionName, String origCorename) { ClusterState clusterState = cores.getZkController().getClusterState(); Collection<Slice> slices = clusterState.getActiveSlices(collectionName); boolean byCoreName = false; if (slices == null) { slices = new ArrayList<>(); // look by core name byCoreName = true; getSlicesForCollections(clusterState, slices, true); if (slices.isEmpty()) { getSlicesForCollections(clusterState, slices, false); } } if (slices.isEmpty()) { return null; } if (collectionsList == null) collectionsList = new ArrayList<>(); collectionsList.add(collectionName); String coreUrl = getCoreUrl(collectionName, origCorename, clusterState, slices, byCoreName, true); if (coreUrl == null) { coreUrl = getCoreUrl(collectionName, origCorename, clusterState, slices, byCoreName, false); } return coreUrl; }
protected void constructStreams() throws IOException { try { Object pushStream = ((Expressible) tupleStream).toExpression(streamFactory); ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); ClusterState clusterState = zkStateReader.getClusterState(); Set<String> liveNodes = clusterState.getLiveNodes(); Collection<Slice> slices = clusterState.getActiveSlices(this.collection); List<Replica> shuffler = new ArrayList(); for (Slice slice : slices) { Collection<Replica> replicas = slice.getReplicas(); for (Replica replica : replicas) { if (replica.getState() == Replica.State.ACTIVE && liveNodes.contains(replica.getNodeName())) shuffler.add(replica); } } if (workers > shuffler.size()) { throw new IOException("Number of workers exceeds nodes in the worker collection"); } Collections.shuffle(shuffler, new Random()); for (int w = 0; w < workers; w++) { HashMap params = new HashMap(); params.put("distrib", "false"); // We are the aggregator. params.put("numWorkers", workers); params.put("workerID", w); params.put("expr", pushStream); params.put("qt", "/stream"); Replica rep = shuffler.get(w); ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep); String url = zkProps.getCoreUrl(); SolrStream solrStream = new SolrStream(url, params); solrStreams.add(solrStream); } assert (solrStreams.size() == workers); } catch (Exception e) { throw new IOException(e); } }
/** * Walks the NamedList response after performing an update request looking for the replication * factor that was achieved in each shard involved in the request. For single doc updates, there * will be only one shard in the return value. */ @SuppressWarnings("rawtypes") public Map<String, Integer> getShardReplicationFactor(String collection, NamedList resp) { connect(); Map<String, Integer> results = new HashMap<String, Integer>(); if (resp instanceof CloudSolrServer.RouteResponse) { NamedList routes = ((CloudSolrServer.RouteResponse) resp).getRouteResponses(); ClusterState clusterState = zkStateReader.getClusterState(); Map<String, String> leaders = new HashMap<String, String>(); for (Slice slice : clusterState.getActiveSlices(collection)) { Replica leader = slice.getLeader(); if (leader != null) { ZkCoreNodeProps zkProps = new ZkCoreNodeProps(leader); String leaderUrl = zkProps.getBaseUrl() + "/" + zkProps.getCoreName(); leaders.put(leaderUrl, slice.getName()); String altLeaderUrl = zkProps.getBaseUrl() + "/" + collection; leaders.put(altLeaderUrl, slice.getName()); } } Iterator<Map.Entry<String, Object>> routeIter = routes.iterator(); while (routeIter.hasNext()) { Map.Entry<String, Object> next = routeIter.next(); String host = next.getKey(); NamedList hostResp = (NamedList) next.getValue(); Integer rf = (Integer) ((NamedList) hostResp.get("responseHeader")).get(UpdateRequest.REPFACT); if (rf != null) { String shard = leaders.get(host); if (shard == null) { if (host.endsWith("/")) shard = leaders.get(host.substring(0, host.length() - 1)); if (shard == null) { shard = host; } } results.put(shard, rf); } } } return results; }