private void waitTillNodesActive() throws Exception {
    for (int i = 0; i < 60; i++) {
      Thread.sleep(3000);
      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
      ClusterState clusterState = zkStateReader.getClusterState();
      DocCollection collection1 = clusterState.getCollection("collection1");
      Slice slice = collection1.getSlice("shard1");
      Collection<Replica> replicas = slice.getReplicas();
      boolean allActive = true;

      Collection<String> nodesDownNames =
          nodesDown.stream().map(n -> n.coreNodeName).collect(Collectors.toList());

      Collection<Replica> replicasToCheck =
          replicas
              .stream()
              .filter(r -> !nodesDownNames.contains(r.getName()))
              .collect(Collectors.toList());

      for (Replica replica : replicasToCheck) {
        if (!clusterState.liveNodesContain(replica.getNodeName())
            || replica.getState() != Replica.State.ACTIVE) {
          allActive = false;
          break;
        }
      }
      if (allActive) {
        return;
      }
    }
    printLayout();
    fail("timeout waiting to see all nodes active");
  }
Ejemplo n.º 2
0
  private SolrCore getCoreByCollection(String collectionName) {
    ZkStateReader zkStateReader = cores.getZkController().getZkStateReader();

    ClusterState clusterState = zkStateReader.getClusterState();
    DocCollection collection = clusterState.getCollectionOrNull(collectionName);
    if (collection == null) {
      return null;
    }
    Map<String, Slice> slices = collection.getActiveSlicesMap();
    if (slices == null) {
      return null;
    }
    Set<String> liveNodes = clusterState.getLiveNodes();
    // look for a core on this node
    Set<Map.Entry<String, Slice>> entries = slices.entrySet();
    SolrCore core = null;

    // Hitting the leaders is useful when it's an update request.
    // For queries it doesn't matter and hence we don't distinguish here.
    for (Map.Entry<String, Slice> entry : entries) {
      // first see if we have the leader
      Replica leaderProps = collection.getLeader(entry.getKey());
      if (leaderProps != null
          && liveNodes.contains(leaderProps.getNodeName())
          && leaderProps.getState() == Replica.State.ACTIVE) {
        core = checkProps(leaderProps);
        if (core != null) {
          return core;
        }
      }

      // check everyone then
      Map<String, Replica> shards = entry.getValue().getReplicasMap();
      Set<Map.Entry<String, Replica>> shardEntries = shards.entrySet();
      for (Map.Entry<String, Replica> shardEntry : shardEntries) {
        Replica zkProps = shardEntry.getValue();
        if (liveNodes.contains(zkProps.getNodeName())
            && zkProps.getState() == Replica.State.ACTIVE) {
          core = checkProps(zkProps);
          if (core != null) {
            return core;
          }
        }
      }
    }
    return null;
  }
  private static String findLocalReplicaForFromIndex(ZkController zkController, String fromIndex) {
    String fromReplica = null;

    String nodeName = zkController.getNodeName();
    for (Slice slice : zkController.getClusterState().getActiveSlices(fromIndex)) {
      if (fromReplica != null)
        throw new SolrException(
            SolrException.ErrorCode.BAD_REQUEST,
            "SolrCloud join: multiple shards not yet supported " + fromIndex);

      for (Replica replica : slice.getReplicas()) {
        if (replica.getNodeName().equals(nodeName)) {
          fromReplica = replica.getStr(ZkStateReader.CORE_NAME_PROP);
          // found local replica, but is it Active?
          if (replica.getState() != Replica.State.ACTIVE)
            throw new SolrException(
                SolrException.ErrorCode.BAD_REQUEST,
                "SolrCloud join: "
                    + fromIndex
                    + " has a local replica ("
                    + fromReplica
                    + ") on "
                    + nodeName
                    + ", but it is "
                    + replica.getState());

          break;
        }
      }
    }

    if (fromReplica == null)
      throw new SolrException(
          SolrException.ErrorCode.BAD_REQUEST,
          "SolrCloud join: No active replicas for " + fromIndex + " found in node " + nodeName);

    return fromReplica;
  }
Ejemplo n.º 4
0
  private String getCoreUrl(
      String collectionName,
      String origCorename,
      ClusterState clusterState,
      Collection<Slice> slices,
      boolean byCoreName,
      boolean activeReplicas) {
    String coreUrl;
    Set<String> liveNodes = clusterState.getLiveNodes();
    List<Slice> randomizedSlices = new ArrayList<>(slices.size());
    randomizedSlices.addAll(slices);
    Collections.shuffle(randomizedSlices, random);

    for (Slice slice : randomizedSlices) {
      List<Replica> randomizedReplicas = new ArrayList<>();
      randomizedReplicas.addAll(slice.getReplicas());
      Collections.shuffle(randomizedReplicas, random);

      for (Replica replica : randomizedReplicas) {
        if (!activeReplicas
            || (liveNodes.contains(replica.getNodeName())
                && replica.getState() == Replica.State.ACTIVE)) {

          if (byCoreName && !collectionName.equals(replica.getStr(CORE_NAME_PROP))) {
            // if it's by core name, make sure they match
            continue;
          }
          if (replica.getStr(BASE_URL_PROP).equals(cores.getZkController().getBaseUrl())) {
            // don't count a local core
            continue;
          }

          if (origCorename != null) {
            coreUrl = replica.getStr(BASE_URL_PROP) + "/" + origCorename;
          } else {
            coreUrl = replica.getCoreUrl();
            if (coreUrl.endsWith("/")) {
              coreUrl = coreUrl.substring(0, coreUrl.length() - 1);
            }
          }

          return coreUrl;
        }
      }
    }
    return null;
  }
Ejemplo n.º 5
0
  protected void constructStreams() throws IOException {

    try {
      Object pushStream = ((Expressible) tupleStream).toExpression(streamFactory);

      ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
      ClusterState clusterState = zkStateReader.getClusterState();
      Set<String> liveNodes = clusterState.getLiveNodes();
      Collection<Slice> slices = clusterState.getActiveSlices(this.collection);
      List<Replica> shuffler = new ArrayList();
      for (Slice slice : slices) {
        Collection<Replica> replicas = slice.getReplicas();
        for (Replica replica : replicas) {
          if (replica.getState() == Replica.State.ACTIVE
              && liveNodes.contains(replica.getNodeName())) shuffler.add(replica);
        }
      }

      if (workers > shuffler.size()) {
        throw new IOException("Number of workers exceeds nodes in the worker collection");
      }

      Collections.shuffle(shuffler, new Random());

      for (int w = 0; w < workers; w++) {
        HashMap params = new HashMap();
        params.put("distrib", "false"); // We are the aggregator.
        params.put("numWorkers", workers);
        params.put("workerID", w);
        params.put("expr", pushStream);
        params.put("qt", "/stream");
        Replica rep = shuffler.get(w);
        ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep);
        String url = zkProps.getCoreUrl();
        SolrStream solrStream = new SolrStream(url, params);
        solrStreams.add(solrStream);
      }

      assert (solrStreams.size() == workers);

    } catch (Exception e) {
      throw new IOException(e);
    }
  }