コード例 #1
0
  private void waitTillNodesActive() throws Exception {
    for (int i = 0; i < 60; i++) {
      Thread.sleep(3000);
      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
      ClusterState clusterState = zkStateReader.getClusterState();
      DocCollection collection1 = clusterState.getCollection("collection1");
      Slice slice = collection1.getSlice("shard1");
      Collection<Replica> replicas = slice.getReplicas();
      boolean allActive = true;

      Collection<String> nodesDownNames =
          nodesDown.stream().map(n -> n.coreNodeName).collect(Collectors.toList());

      Collection<Replica> replicasToCheck =
          replicas
              .stream()
              .filter(r -> !nodesDownNames.contains(r.getName()))
              .collect(Collectors.toList());

      for (Replica replica : replicasToCheck) {
        if (!clusterState.liveNodesContain(replica.getNodeName())
            || replica.getState() != Replica.State.ACTIVE) {
          allActive = false;
          break;
        }
      }
      if (allActive) {
        return;
      }
    }
    printLayout();
    fail("timeout waiting to see all nodes active");
  }
コード例 #2
0
 private void extractRemotePath(String corename, String origCorename, int idx)
     throws UnsupportedEncodingException, KeeperException, InterruptedException {
   if (core == null && idx > 0) {
     coreUrl = getRemotCoreUrl(corename, origCorename);
     // don't proxy for internal update requests
     invalidStates = checkStateIsValid(queryParams.get(CloudSolrClient.STATE_VERSION));
     if (coreUrl != null
         && queryParams.get(DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM) == null) {
       path = path.substring(idx);
       if (invalidStates != null) {
         // it does not make sense to send the request to a remote node
         throw new SolrException(
             SolrException.ErrorCode.INVALID_STATE,
             new String(Utils.toJSON(invalidStates), org.apache.lucene.util.IOUtils.UTF_8));
       }
       action = REMOTEQUERY;
     } else {
       if (!retry) {
         // we couldn't find a core to work with, try reloading aliases
         // TODO: it would be nice if admin ui elements skipped this...
         ZkStateReader reader = cores.getZkController().getZkStateReader();
         reader.updateAliases();
         action = RETRY;
       }
     }
   }
 }
コード例 #3
0
 public void deleteAllCollections() throws Exception {
   try (ZkStateReader reader = new ZkStateReader(solrClient.getZkStateReader().getZkClient())) {
     reader.createClusterStateWatchersAndUpdate();
     for (String collection : reader.getClusterState().getCollectionStates().keySet()) {
       CollectionAdminRequest.deleteCollection(collection).process(solrClient);
     }
   }
 }
コード例 #4
0
  private SolrCore getCoreByCollection(String collectionName) {
    ZkStateReader zkStateReader = cores.getZkController().getZkStateReader();

    ClusterState clusterState = zkStateReader.getClusterState();
    DocCollection collection = clusterState.getCollectionOrNull(collectionName);
    if (collection == null) {
      return null;
    }
    Map<String, Slice> slices = collection.getActiveSlicesMap();
    if (slices == null) {
      return null;
    }
    Set<String> liveNodes = clusterState.getLiveNodes();
    // look for a core on this node
    Set<Map.Entry<String, Slice>> entries = slices.entrySet();
    SolrCore core = null;

    // Hitting the leaders is useful when it's an update request.
    // For queries it doesn't matter and hence we don't distinguish here.
    for (Map.Entry<String, Slice> entry : entries) {
      // first see if we have the leader
      Replica leaderProps = collection.getLeader(entry.getKey());
      if (leaderProps != null
          && liveNodes.contains(leaderProps.getNodeName())
          && leaderProps.getState() == Replica.State.ACTIVE) {
        core = checkProps(leaderProps);
        if (core != null) {
          return core;
        }
      }

      // check everyone then
      Map<String, Replica> shards = entry.getValue().getReplicasMap();
      Set<Map.Entry<String, Replica>> shardEntries = shards.entrySet();
      for (Map.Entry<String, Replica> shardEntry : shardEntries) {
        Replica zkProps = shardEntry.getValue();
        if (liveNodes.contains(zkProps.getNodeName())
            && zkProps.getState() == Replica.State.ACTIVE) {
          core = checkProps(zkProps);
          if (core != null) {
            return core;
          }
        }
      }
    }
    return null;
  }
コード例 #5
0
  protected final synchronized DocCollection getDocCollection() {
    if (docCollection == null) {
      ZkStateReader zkStateReader = getCloudSolrServer().getZkStateReader();
      docCollection = zkStateReader.getClusterState().getCollection(collection);

      // do basic checks once
      DocRouter docRouter = docCollection.getRouter();
      if (docRouter instanceof ImplicitDocRouter)
        throw new IllegalStateException(
            "Implicit document routing not supported by this Partitioner!");
      Collection<Slice> shards = getDocCollection().getSlices();
      if (shards == null || shards.size() == 0)
        throw new IllegalStateException(
            "Collection '" + collection + "' does not have any shards!");
    }
    return docCollection;
  }
コード例 #6
0
 /**
  * Connect to the zookeeper ensemble. This is an optional method that may be used to force a
  * connect before any other requests are sent.
  */
 public void connect() {
   if (zkStateReader == null) {
     synchronized (this) {
       if (zkStateReader == null) {
         ZkStateReader zk = null;
         try {
           zk = new ZkStateReader(zkHost, zkClientTimeout, zkConnectTimeout);
           zk.createClusterStateWatchersAndUpdate();
           zkStateReader = zk;
         } catch (InterruptedException e) {
           if (zk != null) zk.close();
           Thread.currentThread().interrupt();
           throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
         } catch (KeeperException e) {
           if (zk != null) zk.close();
           throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
         } catch (IOException e) {
           if (zk != null) zk.close();
           throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
         } catch (TimeoutException e) {
           if (zk != null) zk.close();
           throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
         } catch (Exception e) {
           if (zk != null) zk.close();
           // do not wrap because clients may be relying on the underlying exception being thrown
           throw e;
         }
       }
     }
   }
 }
コード例 #7
0
ファイル: ParallelStream.java プロジェクト: rmuir/lucene-solr
  protected void constructStreams() throws IOException {

    try {
      Object pushStream = ((Expressible) tupleStream).toExpression(streamFactory);

      ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
      ClusterState clusterState = zkStateReader.getClusterState();
      Set<String> liveNodes = clusterState.getLiveNodes();
      Collection<Slice> slices = clusterState.getActiveSlices(this.collection);
      List<Replica> shuffler = new ArrayList();
      for (Slice slice : slices) {
        Collection<Replica> replicas = slice.getReplicas();
        for (Replica replica : replicas) {
          if (replica.getState() == Replica.State.ACTIVE
              && liveNodes.contains(replica.getNodeName())) shuffler.add(replica);
        }
      }

      if (workers > shuffler.size()) {
        throw new IOException("Number of workers exceeds nodes in the worker collection");
      }

      Collections.shuffle(shuffler, new Random());

      for (int w = 0; w < workers; w++) {
        HashMap params = new HashMap();
        params.put("distrib", "false"); // We are the aggregator.
        params.put("numWorkers", workers);
        params.put("workerID", w);
        params.put("expr", pushStream);
        params.put("qt", "/stream");
        Replica rep = shuffler.get(w);
        ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep);
        String url = zkProps.getCoreUrl();
        SolrStream solrStream = new SolrStream(url, params);
        solrStreams.add(solrStream);
      }

      assert (solrStreams.size() == workers);

    } catch (Exception e) {
      throw new IOException(e);
    }
  }
コード例 #8
0
  private void closeZK() throws Exception {
    if (zkClient != null) {
      zkClient.close();
    }

    if (reader != null) {
      reader.close();
    }
    zkServer.shutdown();
  }
コード例 #9
0
ファイル: SolrRDD.java プロジェクト: rahuldhote/spark-solr
  protected List<String> buildShardList(CloudSolrClient cloudSolrServer) {
    ZkStateReader zkStateReader = cloudSolrServer.getZkStateReader();

    ClusterState clusterState = zkStateReader.getClusterState();

    String[] collections = null;
    if (clusterState.hasCollection(collection)) {
      collections = new String[] {collection};
    } else {
      // might be a collection alias?
      Aliases aliases = zkStateReader.getAliases();
      String aliasedCollections = aliases.getCollectionAlias(collection);
      if (aliasedCollections == null)
        throw new IllegalArgumentException("Collection " + collection + " not found!");
      collections = aliasedCollections.split(",");
    }

    Set<String> liveNodes = clusterState.getLiveNodes();
    Random random = new Random(5150);

    List<String> shards = new ArrayList<String>();
    for (String coll : collections) {
      for (Slice slice : clusterState.getSlices(coll)) {
        List<String> replicas = new ArrayList<String>();
        for (Replica r : slice.getReplicas()) {
          ZkCoreNodeProps replicaCoreProps = new ZkCoreNodeProps(r);
          if (liveNodes.contains(replicaCoreProps.getNodeName()))
            replicas.add(replicaCoreProps.getCoreUrl());
        }
        int numReplicas = replicas.size();
        if (numReplicas == 0)
          throw new IllegalStateException(
              "Shard " + slice.getName() + " does not have any active replicas!");

        String replicaUrl =
            (numReplicas == 1) ? replicas.get(0) : replicas.get(random.nextInt(replicas.size()));
        shards.add(replicaUrl);
      }
    }
    return shards;
  }
コード例 #10
0
  @BeforeClass
  public static void setupCluster() throws Exception {
    final Path configDir = Paths.get(TEST_HOME(), "collection1", "conf");

    String configName = "solrCloudCollectionConfig";
    int nodeCount = 5;
    configureCluster(nodeCount).addConfig(configName, configDir).configure();

    Map<String, String> collectionProperties = new HashMap<>();
    collectionProperties.put("config", "solrconfig-tlog.xml");
    collectionProperties.put("schema", "schema.xml");

    // create a collection holding data for the "to" side of the JOIN

    int shards = 2;
    int replicas = 2;
    CollectionAdminRequest.createCollection(toColl, configName, shards, replicas)
        .setProperties(collectionProperties)
        .process(cluster.getSolrClient());

    // get the set of nodes where replicas for the "to" collection exist
    Set<String> nodeSet = new HashSet<>();
    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
    ClusterState cs = zkStateReader.getClusterState();
    for (Slice slice : cs.getCollection(toColl).getActiveSlices())
      for (Replica replica : slice.getReplicas()) nodeSet.add(replica.getNodeName());
    assertTrue(nodeSet.size() > 0);

    // deploy the "from" collection to all nodes where the "to" collection exists
    CollectionAdminRequest.createCollection(fromColl, configName, 1, 4)
        .setCreateNodeSet(StringUtils.join(nodeSet, ","))
        .setProperties(collectionProperties)
        .process(cluster.getSolrClient());

    toDocId = indexDoc(toColl, 1001, "a", null, "b");
    indexDoc(fromColl, 2001, "a", "c", null);

    Thread.sleep(1000); // so the commits fire
  }
コード例 #11
0
    @Override
    public boolean checkRetry() {
      ZkCoreNodeProps leaderProps;
      try {
        leaderProps = new ZkCoreNodeProps(zkStateReader.getLeaderProps(collection, shardId));
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        return false;
      }

      this.url = leaderProps.getCoreUrl();

      return true;
    }
コード例 #12
0
    @Override
    public boolean checkRetry() {
      ZkCoreNodeProps leaderProps;
      try {
        leaderProps = new ZkCoreNodeProps(zkStateReader.getLeaderRetry(collection, shardId));
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        return false;
      } catch (Exception e) {
        // we retry with same info
        log.warn(null, e);
        return true;
      }

      this.nodeProps = leaderProps;

      return true;
    }
コード例 #13
0
    /**
     * Retrieve all requests recorded by this queue which were sent to given collection and shard
     *
     * @param zkStateReader the {@link org.apache.solr.common.cloud.ZkStateReader} from which
     *     cluster state is read
     * @param collectionName the given collection name for which requests have to be extracted
     * @param shardId the given shard name for which requests have to be extracted
     * @return a list of {@link
     *     org.apache.solr.handler.component.TrackingShardHandlerFactory.ShardRequestAndParams} or
     *     empty list if none are found
     */
    public List<ShardRequestAndParams> getShardRequests(
        ZkStateReader zkStateReader, String collectionName, String shardId) {
      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
      assert collection != null;
      Slice slice = collection.getSlice(shardId);
      assert slice != null;

      for (Map.Entry<String, List<ShardRequestAndParams>> entry : requests.entrySet()) {
        // multiple shard addresses may be present separated by '|'
        List<String> list = StrUtils.splitSmart(entry.getKey(), '|');
        for (Map.Entry<String, Replica> replica : slice.getReplicasMap().entrySet()) {
          String coreUrl = new ZkCoreNodeProps(replica.getValue()).getCoreUrl();
          if (list.contains(coreUrl)) {
            return new ArrayList<>(entry.getValue());
          }
        }
      }
      return Collections.emptyList();
    }
コード例 #14
0
  /**
   * Walks the NamedList response after performing an update request looking for the replication
   * factor that was achieved in each shard involved in the request. For single doc updates, there
   * will be only one shard in the return value.
   */
  @SuppressWarnings("rawtypes")
  public Map<String, Integer> getShardReplicationFactor(String collection, NamedList resp) {
    connect();

    Map<String, Integer> results = new HashMap<String, Integer>();
    if (resp instanceof CloudSolrServer.RouteResponse) {
      NamedList routes = ((CloudSolrServer.RouteResponse) resp).getRouteResponses();
      ClusterState clusterState = zkStateReader.getClusterState();
      Map<String, String> leaders = new HashMap<String, String>();
      for (Slice slice : clusterState.getActiveSlices(collection)) {
        Replica leader = slice.getLeader();
        if (leader != null) {
          ZkCoreNodeProps zkProps = new ZkCoreNodeProps(leader);
          String leaderUrl = zkProps.getBaseUrl() + "/" + zkProps.getCoreName();
          leaders.put(leaderUrl, slice.getName());
          String altLeaderUrl = zkProps.getBaseUrl() + "/" + collection;
          leaders.put(altLeaderUrl, slice.getName());
        }
      }

      Iterator<Map.Entry<String, Object>> routeIter = routes.iterator();
      while (routeIter.hasNext()) {
        Map.Entry<String, Object> next = routeIter.next();
        String host = next.getKey();
        NamedList hostResp = (NamedList) next.getValue();
        Integer rf =
            (Integer) ((NamedList) hostResp.get("responseHeader")).get(UpdateRequest.REPFACT);
        if (rf != null) {
          String shard = leaders.get(host);
          if (shard == null) {
            if (host.endsWith("/")) shard = leaders.get(host.substring(0, host.length() - 1));
            if (shard == null) {
              shard = host;
            }
          }
          results.put(shard, rf);
        }
      }
    }
    return results;
  }
コード例 #15
0
  protected void issueCreateJob(
      Integer numberOfSlices,
      Integer replicationFactor,
      Integer maxShardsPerNode,
      List<String> createNodeList,
      boolean sendCreateNodeList,
      boolean createNodeSetShuffle) {
    Map<String, Object> propMap =
        ZkNodeProps.makeMap(
            Overseer.QUEUE_OPERATION,
            CollectionParams.CollectionAction.CREATE.toLower(),
            ZkStateReader.REPLICATION_FACTOR,
            replicationFactor.toString(),
            "name",
            COLLECTION_NAME,
            "collection.configName",
            CONFIG_NAME,
            OverseerCollectionProcessor.NUM_SLICES,
            numberOfSlices.toString(),
            ZkStateReader.MAX_SHARDS_PER_NODE,
            maxShardsPerNode.toString());
    if (sendCreateNodeList) {
      propMap.put(
          OverseerCollectionProcessor.CREATE_NODE_SET,
          (createNodeList != null) ? StrUtils.join(createNodeList, ',') : null);
      if (OverseerCollectionProcessor.CREATE_NODE_SET_SHUFFLE_DEFAULT != createNodeSetShuffle
          || random().nextBoolean()) {
        propMap.put(OverseerCollectionProcessor.CREATE_NODE_SET_SHUFFLE, createNodeSetShuffle);
      }
    }

    ZkNodeProps props = new ZkNodeProps(propMap);
    QueueEvent qe =
        new QueueEvent("id", ZkStateReader.toJSON(props), null) {
          @Override
          public void setBytes(byte[] bytes) {
            lastProcessMessageResult = SolrResponse.deserialize(bytes);
          }
        };
    queue.add(qe);
  }
コード例 #16
0
  @Override
  public void shutdown() {
    if (zkStateReader != null) {
      synchronized (this) {
        if (zkStateReader != null) zkStateReader.close();
        zkStateReader = null;
      }
    }

    if (shutdownLBHttpSolrServer) {
      lbServer.shutdown();
    }

    if (clientIsInternal && myClient != null) {
      myClient.getConnectionManager().shutdown();
    }

    if (this.threadPool != null && !this.threadPool.isShutdown()) {
      this.threadPool.shutdown();
    }
  }
コード例 #17
0
  private Set<String> getCollectionList(ClusterState clusterState, String collection) {
    // Extract each comma separated collection name and store in a List.
    List<String> rawCollectionsList = StrUtils.splitSmart(collection, ",", true);
    Set<String> collectionsList = new HashSet<>();
    // validate collections
    for (String collectionName : rawCollectionsList) {
      if (!clusterState.getCollections().contains(collectionName)) {
        Aliases aliases = zkStateReader.getAliases();
        String alias = aliases.getCollectionAlias(collectionName);
        if (alias != null) {
          List<String> aliasList = StrUtils.splitSmart(alias, ",", true);
          collectionsList.addAll(aliasList);
          continue;
        }

        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection not found: " + collectionName);
      }

      collectionsList.add(collectionName);
    }
    return collectionsList;
  }
コード例 #18
0
  @Override
  public void doTest() throws Exception {
    boolean testsSuccesful = false;
    try {
      handle.clear();
      handle.put("QTime", SKIPVAL);
      handle.put("timestamp", SKIPVAL);
      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
      // make sure we have leaders for each shard
      for (int j = 1; j < sliceCount; j++) {
        zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + j, 10000);
      } // make sure we again have leaders for each shard

      waitForRecoveriesToFinish(false);

      // we cannot do delete by query
      // as it's not supported for recovery
      del("*:*");

      List<StopableThread> threads = new ArrayList<StopableThread>();
      int threadCount = 1;
      int i = 0;
      for (i = 0; i < threadCount; i++) {
        StopableIndexingThread indexThread = new StopableIndexingThread(Integer.toString(i), true);
        threads.add(indexThread);
        indexThread.start();
      }

      threadCount = 1;
      i = 0;
      for (i = 0; i < threadCount; i++) {
        StopableSearchThread searchThread = new StopableSearchThread();
        threads.add(searchThread);
        searchThread.start();
      }

      // TODO: we only do this sometimes so that we can sometimes compare against control,
      // it's currently hard to know what requests failed when using ConcurrentSolrUpdateServer
      boolean runFullThrottle = random().nextBoolean();
      if (runFullThrottle) {
        FullThrottleStopableIndexingThread ftIndexThread =
            new FullThrottleStopableIndexingThread(clients, "ft1", true);
        threads.add(ftIndexThread);
        ftIndexThread.start();
      }

      chaosMonkey.startTheMonkey(true, 10000);
      try {
        long runLength;
        if (RUN_LENGTH != -1) {
          runLength = RUN_LENGTH;
        } else {
          int[] runTimes =
              new int[] {5000, 6000, 10000, 15000, 25000, 30000, 30000, 45000, 90000, 120000};
          runLength = runTimes[random().nextInt(runTimes.length - 1)];
        }

        Thread.sleep(runLength);
      } finally {
        chaosMonkey.stopTheMonkey();
      }

      for (StopableThread indexThread : threads) {
        indexThread.safeStop();
      }

      // start any downed jetties to be sure we still will end up with a leader per shard...

      // wait for stop...
      for (StopableThread indexThread : threads) {
        indexThread.join();
      }

      // try and wait for any replications and what not to finish...

      Thread.sleep(2000);

      // wait until there are no recoveries...
      waitForThingsToLevelOut(Integer.MAX_VALUE); // Math.round((runLength / 1000.0f / 3.0f)));

      // make sure we again have leaders for each shard
      for (int j = 1; j < sliceCount; j++) {
        zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + j, 30000);
      }

      commit();

      // TODO: assert we didnt kill everyone

      zkStateReader.updateClusterState(true);
      assertTrue(zkStateReader.getClusterState().getLiveNodes().size() > 0);

      // we expect full throttle fails, but cloud client should not easily fail
      for (StopableThread indexThread : threads) {
        if (indexThread instanceof StopableIndexingThread
            && !(indexThread instanceof FullThrottleStopableIndexingThread)) {
          assertFalse(
              "There were too many update fails - we expect it can happen, but shouldn't easily",
              ((StopableIndexingThread) indexThread).getFailCount() > 10);
        }
      }

      // full throttle thread can
      // have request fails
      checkShardConsistency(!runFullThrottle, true);

      long ctrlDocs = controlClient.query(new SolrQuery("*:*")).getResults().getNumFound();

      // ensure we have added more than 0 docs
      long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();

      assertTrue("Found " + ctrlDocs + " control docs", cloudClientDocs > 0);

      if (VERBOSE)
        System.out.println(
            "control docs:"
                + controlClient.query(new SolrQuery("*:*")).getResults().getNumFound()
                + "\n\n");

      // try and make a collection to make sure the overseer has survived the expiration and session
      // loss

      // sometimes we restart zookeeper as well
      if (random().nextBoolean()) {
        zkServer.shutdown();
        zkServer = new ZkTestServer(zkServer.getZkDir(), zkServer.getPort());
        zkServer.run();
      }

      CloudSolrServer client = createCloudClient("collection1");
      try {
        createCollection(null, "testcollection", 1, 1, 1, client, null, "conf1");

      } finally {
        client.shutdown();
      }
      List<Integer> numShardsNumReplicas = new ArrayList<Integer>(2);
      numShardsNumReplicas.add(1);
      numShardsNumReplicas.add(1);
      checkForCollection("testcollection", numShardsNumReplicas, null);

      testsSuccesful = true;
    } finally {
      if (!testsSuccesful) {
        printLayout();
      }
    }
  }
コード例 #19
0
  private void init() throws Exception {
    // The states of client that is invalid in this request
    Aliases aliases = null;
    String corename = "";
    String origCorename = null;
    // set a request timer which can be reused by requests if needed
    req.setAttribute(SolrRequestParsers.REQUEST_TIMER_SERVLET_ATTRIBUTE, new RTimerTree());
    // put the core container in request attribute
    req.setAttribute("org.apache.solr.CoreContainer", cores);
    path = req.getServletPath();
    if (req.getPathInfo() != null) {
      // this lets you handle /update/commit when /update is a servlet
      path += req.getPathInfo();
    }
    // check for management path
    String alternate = cores.getManagementPath();
    if (alternate != null && path.startsWith(alternate)) {
      path = path.substring(0, alternate.length());
    }
    // unused feature ?
    int idx = path.indexOf(':');
    if (idx > 0) {
      // save the portion after the ':' for a 'handler' path parameter
      path = path.substring(0, idx);
    }

    boolean usingAliases = false;

    // Check for container handlers
    handler = cores.getRequestHandler(path);
    if (handler != null) {
      solrReq = SolrRequestParsers.DEFAULT.parse(null, path, req);
      solrReq.getContext().put(CoreContainer.class.getName(), cores);
      requestType = RequestType.ADMIN;
      action = ADMIN;
      return;
    } else {
      // otherwise, we should find a core from the path
      idx = path.indexOf("/", 1);
      if (idx > 1) {
        // try to get the corename as a request parameter first
        corename = path.substring(1, idx);

        // look at aliases
        if (cores.isZooKeeperAware()) {
          origCorename = corename;
          ZkStateReader reader = cores.getZkController().getZkStateReader();
          aliases = reader.getAliases();
          if (aliases != null && aliases.collectionAliasSize() > 0) {
            usingAliases = true;
            String alias = aliases.getCollectionAlias(corename);
            if (alias != null) {
              collectionsList = StrUtils.splitSmart(alias, ",", true);
              corename = collectionsList.get(0);
            }
          }
        }

        core = cores.getCore(corename);
        if (core != null) {
          path = path.substring(idx);
        } else if (cores.isCoreLoading(
            corename)) { // extra mem barriers, so don't look at this before trying to get core
          throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "SolrCore is loading");
        } else {
          // the core may have just finished loading
          core = cores.getCore(corename);
          if (core != null) {
            path = path.substring(idx);
          }
        }
      }
      if (core == null) {
        if (!cores.isZooKeeperAware()) {
          core = cores.getCore("");
        }
      }
    }

    if (core == null && cores.isZooKeeperAware()) {
      // we couldn't find the core - lets make sure a collection was not specified instead
      core = getCoreByCollection(corename);
      if (core != null) {
        // we found a core, update the path
        path = path.substring(idx);
        if (collectionsList == null) collectionsList = new ArrayList<>();
        collectionsList.add(corename);
      }

      // if we couldn't find it locally, look on other nodes
      extractRemotePath(corename, origCorename, idx);
      if (action != null) return;
    }

    // With a valid core...
    if (core != null) {
      MDCLoggingContext.setCore(core);
      config = core.getSolrConfig();
      // get or create/cache the parser for the core
      SolrRequestParsers parser = config.getRequestParsers();

      // Determine the handler from the url path if not set
      // (we might already have selected the cores handler)
      extractHandlerFromURLPath(parser);
      if (action != null) return;

      // With a valid handler and a valid core...
      if (handler != null) {
        // if not a /select, create the request
        if (solrReq == null) {
          solrReq = parser.parse(core, path, req);
        }

        if (usingAliases) {
          processAliases(aliases, collectionsList);
        }

        action = PROCESS;
        return; // we are done with a valid handler
      }
    }
    log.debug("no handler or core retrieved for " + path + ", follow through...");

    action = PASSTHROUGH;
  }
コード例 #20
0
  private void testCollectionsAPI() throws Exception {

    // TODO: fragile - because we dont pass collection.confName, it will only
    // find a default if a conf set with a name matching the collection name is found, or
    // if there is only one conf set. That and the fact that other tests run first in this
    // env make this pretty fragile

    // create new collections rapid fire
    Map<String, List<Integer>> collectionInfos = new HashMap<String, List<Integer>>();
    int cnt = random().nextInt(TEST_NIGHTLY ? 6 : 3) + 1;

    for (int i = 0; i < cnt; i++) {
      int numShards = _TestUtil.nextInt(random(), 0, shardCount) + 1;
      int replicationFactor = _TestUtil.nextInt(random(), 0, 3) + 1;
      int maxShardsPerNode =
          (((numShards * replicationFactor)
                  / getCommonCloudSolrServer()
                      .getZkStateReader()
                      .getClusterState()
                      .getLiveNodes()
                      .size()))
              + 1;

      CloudSolrServer client = null;
      try {
        if (i == 0) {
          // Test if we can create a collection through CloudSolrServer where
          // you havnt set default-collection
          // This is nice because you want to be able to create you first
          // collection using CloudSolrServer, and in such case there is
          // nothing reasonable to set as default-collection
          client = createCloudClient(null);
        } else if (i == 1) {
          // Test if we can create a collection through CloudSolrServer where
          // you have set default-collection to a non-existing collection
          // This is nice because you want to be able to create you first
          // collection using CloudSolrServer, and in such case there is
          // nothing reasonable to set as default-collection, but you might want
          // to use the same CloudSolrServer throughout the entire
          // lifetime of your client-application, so it is nice to be able to
          // set a default-collection on this CloudSolrServer once and for all
          // and use this CloudSolrServer to create the collection
          client = createCloudClient("awholynewcollection_" + i);
        }
        if (secondConfigSet) {
          createCollection(
              collectionInfos,
              "awholynewcollection_" + i,
              numShards,
              replicationFactor,
              maxShardsPerNode,
              client,
              null,
              "conf2");
        } else {
          createCollection(
              collectionInfos,
              "awholynewcollection_" + i,
              numShards,
              replicationFactor,
              maxShardsPerNode,
              client,
              null);
        }
      } finally {
        if (client != null) client.shutdown();
      }
    }

    Set<Entry<String, List<Integer>>> collectionInfosEntrySet = collectionInfos.entrySet();
    for (Entry<String, List<Integer>> entry : collectionInfosEntrySet) {
      String collection = entry.getKey();
      List<Integer> list = entry.getValue();
      checkForCollection(collection, list, null);

      String url = getUrlFromZk(collection);

      HttpSolrServer collectionClient = new HttpSolrServer(url);

      // poll for a second - it can take a moment before we are ready to serve
      waitForNon403or404or503(collectionClient);
    }

    // sometimes we restart one of the jetty nodes
    if (random().nextBoolean()) {
      JettySolrRunner jetty = jettys.get(random().nextInt(jettys.size()));
      ChaosMonkey.stop(jetty);
      ChaosMonkey.start(jetty);

      for (Entry<String, List<Integer>> entry : collectionInfosEntrySet) {
        String collection = entry.getKey();
        List<Integer> list = entry.getValue();
        checkForCollection(collection, list, null);

        String url = getUrlFromZk(collection);

        HttpSolrServer collectionClient = new HttpSolrServer(url);

        // poll for a second - it can take a moment before we are ready to serve
        waitForNon403or404or503(collectionClient);
      }
    }

    // sometimes we restart zookeeper
    if (random().nextBoolean()) {
      zkServer.shutdown();
      zkServer = new ZkTestServer(zkServer.getZkDir(), zkServer.getPort());
      zkServer.run();
    }

    // sometimes we cause a connection loss - sometimes it will hit the overseer
    if (random().nextBoolean()) {
      JettySolrRunner jetty = jettys.get(random().nextInt(jettys.size()));
      ChaosMonkey.causeConnectionLoss(jetty);
    }

    ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
    for (int j = 0; j < cnt; j++) {
      waitForRecoveriesToFinish("awholynewcollection_" + j, zkStateReader, false);

      if (secondConfigSet) {
        // let's see if they are using the second config set
        byte[] data =
            zkStateReader
                .getZkClient()
                .getData(
                    ZkStateReader.COLLECTIONS_ZKNODE + "/" + "awholynewcollection_" + j,
                    null,
                    null,
                    true);
        assertNotNull(data);
        ZkNodeProps props = ZkNodeProps.load(data);
        String configName = props.getStr(ZkController.CONFIGNAME_PROP);
        assertEquals("conf2", configName);
      }
    }

    checkInstanceDirs(jettys.get(0));

    List<String> collectionNameList = new ArrayList<String>();
    collectionNameList.addAll(collectionInfos.keySet());
    String collectionName = collectionNameList.get(random().nextInt(collectionNameList.size()));

    String url = getUrlFromZk(collectionName);

    HttpSolrServer collectionClient = new HttpSolrServer(url);

    // lets try and use the solrj client to index a couple documents
    SolrInputDocument doc1 = getDoc(id, 6, i1, -600, tlong, 600, t1, "humpty dumpy sat on a wall");
    SolrInputDocument doc2 =
        getDoc(id, 7, i1, -600, tlong, 600, t1, "humpty dumpy3 sat on a walls");
    SolrInputDocument doc3 =
        getDoc(id, 8, i1, -600, tlong, 600, t1, "humpty dumpy2 sat on a walled");

    collectionClient.add(doc1);

    collectionClient.add(doc2);

    collectionClient.add(doc3);

    collectionClient.commit();

    assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());

    // lets try a collection reload

    // get core open times
    Map<String, Long> urlToTimeBefore = new HashMap<String, Long>();
    collectStartTimes(collectionName, urlToTimeBefore);
    assertTrue(urlToTimeBefore.size() > 0);
    ModifiableSolrParams params = new ModifiableSolrParams();
    params.set("action", CollectionAction.RELOAD.toString());
    params.set("name", collectionName);
    QueryRequest request = new QueryRequest(params);
    request.setPath("/admin/collections");

    // we can use this client because we just want base url
    final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));

    createNewSolrServer("", baseUrl).request(request);

    // reloads make take a short while
    boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
    assertTrue("some core start times did not change on reload", allTimesAreCorrect);

    waitForRecoveriesToFinish("awholynewcollection_" + (cnt - 1), zkStateReader, false);

    // remove a collection
    params = new ModifiableSolrParams();
    params.set("action", CollectionAction.DELETE.toString());
    params.set("name", collectionName);
    request = new QueryRequest(params);
    request.setPath("/admin/collections");

    createNewSolrServer("", baseUrl).request(request);

    // ensure its out of the state
    checkForMissingCollection(collectionName);

    // collectionNameList.remove(collectionName);

    // remove an unknown collection
    params = new ModifiableSolrParams();
    params.set("action", CollectionAction.DELETE.toString());
    params.set("name", "unknown_collection");
    request = new QueryRequest(params);
    request.setPath("/admin/collections");

    boolean exp = false;
    try {
      createNewSolrServer("", baseUrl).request(request);
    } catch (SolrException e) {
      exp = true;
    }
    assertTrue("Expected exception", exp);

    // create another collection should still work
    params = new ModifiableSolrParams();
    params.set("action", CollectionAction.CREATE.toString());

    params.set("numShards", 1);
    params.set(REPLICATION_FACTOR, 2);
    collectionName = "acollectionafterbaddelete";

    params.set("name", collectionName);
    if (secondConfigSet) {
      params.set("collection.configName", "conf1");
    }
    request = new QueryRequest(params);
    request.setPath("/admin/collections");
    createNewSolrServer("", baseUrl).request(request);

    List<Integer> list = new ArrayList<Integer>(2);
    list.add(1);
    list.add(2);
    checkForCollection(collectionName, list, null);

    url = getUrlFromZk(collectionName);

    collectionClient = new HttpSolrServer(url);

    // poll for a second - it can take a moment before we are ready to serve
    waitForNon403or404or503(collectionClient);

    for (int j = 0; j < cnt; j++) {
      waitForRecoveriesToFinish(collectionName, zkStateReader, false);
    }

    // test maxShardsPerNode
    int numLiveNodes =
        getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes().size();
    int numShards = (numLiveNodes / 2) + 1;
    int replicationFactor = 2;
    int maxShardsPerNode = 1;
    collectionInfos = new HashMap<String, List<Integer>>();
    CloudSolrServer client = createCloudClient("awholynewcollection_" + cnt);
    try {
      exp = false;
      try {
        createCollection(
            collectionInfos,
            "awholynewcollection_" + cnt,
            numShards,
            replicationFactor,
            maxShardsPerNode,
            client,
            null,
            "conf1");
      } catch (SolrException e) {
        exp = true;
      }
      assertTrue("expected exception", exp);
    } finally {
      client.shutdown();
    }

    // Test createNodeSet
    numLiveNodes =
        getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes().size();
    List<String> createNodeList = new ArrayList<String>();
    int numOfCreateNodes = numLiveNodes / 2;
    assertFalse(
        "createNodeSet test is pointless with only " + numLiveNodes + " nodes running",
        numOfCreateNodes == 0);
    int i = 0;
    for (String liveNode :
        getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes()) {
      if (i < numOfCreateNodes) {
        createNodeList.add(liveNode);
        i++;
      } else {
        break;
      }
    }
    maxShardsPerNode = 2;
    numShards = createNodeList.size() * maxShardsPerNode;
    replicationFactor = 1;
    collectionInfos = new HashMap<String, List<Integer>>();
    client = createCloudClient("awholynewcollection_" + (cnt + 1));
    try {
      createCollection(
          collectionInfos,
          "awholynewcollection_" + (cnt + 1),
          numShards,
          replicationFactor,
          maxShardsPerNode,
          client,
          StrUtils.join(createNodeList, ','),
          "conf1");
    } finally {
      client.shutdown();
    }
    checkForCollection(
        collectionInfos.keySet().iterator().next(),
        collectionInfos.entrySet().iterator().next().getValue(),
        createNodeList);

    checkNoTwoShardsUseTheSameIndexDir();
  }
コード例 #21
0
  private NamedList<Object> directUpdate(AbstractUpdateRequest request, ClusterState clusterState)
      throws SolrServerException {
    UpdateRequest updateRequest = (UpdateRequest) request;
    ModifiableSolrParams params = (ModifiableSolrParams) request.getParams();
    ModifiableSolrParams routableParams = new ModifiableSolrParams();
    ModifiableSolrParams nonRoutableParams = new ModifiableSolrParams();

    if (params != null) {
      nonRoutableParams.add(params);
      routableParams.add(params);
      for (String param : NON_ROUTABLE_PARAMS) {
        routableParams.remove(param);
      }
    }

    String collection = nonRoutableParams.get(UpdateParams.COLLECTION, defaultCollection);
    if (collection == null) {
      throw new SolrServerException(
          "No collection param specified on request and no default collection has been set.");
    }

    // Check to see if the collection is an alias.
    Aliases aliases = zkStateReader.getAliases();
    if (aliases != null) {
      Map<String, String> collectionAliases = aliases.getCollectionAliasMap();
      if (collectionAliases != null && collectionAliases.containsKey(collection)) {
        collection = collectionAliases.get(collection);
      }
    }

    DocCollection col = getDocCollection(clusterState, collection);

    DocRouter router = col.getRouter();

    if (router instanceof ImplicitDocRouter) {
      // short circuit as optimization
      return null;
    }

    // Create the URL map, which is keyed on slice name.
    // The value is a list of URLs for each replica in the slice.
    // The first value in the list is the leader for the slice.
    Map<String, List<String>> urlMap = buildUrlMap(col);
    if (urlMap == null) {
      // we could not find a leader yet - use unoptimized general path
      return null;
    }

    NamedList<Throwable> exceptions = new NamedList<>();
    NamedList<NamedList> shardResponses = new NamedList<>();

    Map<String, LBHttpSolrServer.Req> routes =
        updateRequest.getRoutes(router, col, urlMap, routableParams, this.idField);
    if (routes == null) {
      return null;
    }

    long start = System.nanoTime();

    if (parallelUpdates) {
      final Map<String, Future<NamedList<?>>> responseFutures = new HashMap<>(routes.size());
      for (final Map.Entry<String, LBHttpSolrServer.Req> entry : routes.entrySet()) {
        final String url = entry.getKey();
        final LBHttpSolrServer.Req lbRequest = entry.getValue();
        responseFutures.put(
            url,
            threadPool.submit(
                new Callable<NamedList<?>>() {
                  @Override
                  public NamedList<?> call() throws Exception {
                    return lbServer.request(lbRequest).getResponse();
                  }
                }));
      }

      for (final Map.Entry<String, Future<NamedList<?>>> entry : responseFutures.entrySet()) {
        final String url = entry.getKey();
        final Future<NamedList<?>> responseFuture = entry.getValue();
        try {
          shardResponses.add(url, responseFuture.get());
        } catch (InterruptedException e) {
          Thread.currentThread().interrupt();
          throw new RuntimeException(e);
        } catch (ExecutionException e) {
          exceptions.add(url, e.getCause());
        }
      }

      if (exceptions.size() > 0) {
        throw new RouteException(ErrorCode.SERVER_ERROR, exceptions, routes);
      }
    } else {
      for (Map.Entry<String, LBHttpSolrServer.Req> entry : routes.entrySet()) {
        String url = entry.getKey();
        LBHttpSolrServer.Req lbRequest = entry.getValue();
        try {
          NamedList<Object> rsp = lbServer.request(lbRequest).getResponse();
          shardResponses.add(url, rsp);
        } catch (Exception e) {
          throw new SolrServerException(e);
        }
      }
    }

    UpdateRequest nonRoutableRequest = null;
    List<String> deleteQuery = updateRequest.getDeleteQuery();
    if (deleteQuery != null && deleteQuery.size() > 0) {
      UpdateRequest deleteQueryRequest = new UpdateRequest();
      deleteQueryRequest.setDeleteQuery(deleteQuery);
      nonRoutableRequest = deleteQueryRequest;
    }

    Set<String> paramNames = nonRoutableParams.getParameterNames();

    Set<String> intersection = new HashSet<>(paramNames);
    intersection.retainAll(NON_ROUTABLE_PARAMS);

    if (nonRoutableRequest != null || intersection.size() > 0) {
      if (nonRoutableRequest == null) {
        nonRoutableRequest = new UpdateRequest();
      }
      nonRoutableRequest.setParams(nonRoutableParams);
      List<String> urlList = new ArrayList<>();
      urlList.addAll(routes.keySet());
      Collections.shuffle(urlList, rand);
      LBHttpSolrServer.Req req = new LBHttpSolrServer.Req(nonRoutableRequest, urlList);
      try {
        LBHttpSolrServer.Rsp rsp = lbServer.request(req);
        shardResponses.add(urlList.get(0), rsp.getResponse());
      } catch (Exception e) {
        throw new SolrException(ErrorCode.SERVER_ERROR, urlList.get(0), e);
      }
    }

    long end = System.nanoTime();

    RouteResponse rr = condenseResponse(shardResponses, (long) ((end - start) / 1000000));
    rr.setRouteResponses(shardResponses);
    rr.setRoutes(routes);
    return rr;
  }
コード例 #22
0
  protected Set<String> commonMocks(int liveNodesCount) throws Exception {

    shardHandlerFactoryMock.getShardHandler();
    expectLastCall()
        .andAnswer(
            new IAnswer<ShardHandler>() {
              @Override
              public ShardHandler answer() throws Throwable {
                log.info("SHARDHANDLER");
                return shardHandlerMock;
              }
            })
        .anyTimes();
    workQueueMock.peekTopN(EasyMock.anyInt(), anyObject(Set.class), EasyMock.anyLong());
    expectLastCall()
        .andAnswer(
            new IAnswer<List>() {
              @Override
              public List answer() throws Throwable {
                Object result;
                int count = 0;
                while ((result = queue.peek()) == null) {
                  Thread.sleep(1000);
                  count++;
                  if (count > 1) return null;
                }

                return Arrays.asList(result);
              }
            })
        .anyTimes();

    workQueueMock.getTailId();
    expectLastCall()
        .andAnswer(
            new IAnswer<Object>() {
              @Override
              public Object answer() throws Throwable {
                Object result = null;
                Iterator iter = queue.iterator();
                while (iter.hasNext()) {
                  result = iter.next();
                }
                return result == null ? null : ((QueueEvent) result).getId();
              }
            })
        .anyTimes();

    workQueueMock.peek(true);
    expectLastCall()
        .andAnswer(
            new IAnswer<Object>() {
              @Override
              public Object answer() throws Throwable {
                Object result;
                while ((result = queue.peek()) == null) {
                  Thread.sleep(1000);
                }
                return result;
              }
            })
        .anyTimes();

    workQueueMock.remove(anyObject(QueueEvent.class));
    expectLastCall()
        .andAnswer(
            new IAnswer<Object>() {
              @Override
              public Object answer() throws Throwable {
                queue.remove((QueueEvent) getCurrentArguments()[0]);
                return null;
              }
            })
        .anyTimes();

    workQueueMock.poll();
    expectLastCall()
        .andAnswer(
            new IAnswer<Object>() {
              @Override
              public Object answer() throws Throwable {
                return queue.poll();
              }
            })
        .anyTimes();

    zkStateReaderMock.getClusterState();
    expectLastCall()
        .andAnswer(
            new IAnswer<Object>() {
              @Override
              public Object answer() throws Throwable {
                return clusterStateMock;
              }
            })
        .anyTimes();

    zkStateReaderMock.getZkClient();
    expectLastCall()
        .andAnswer(
            new IAnswer<Object>() {
              @Override
              public Object answer() throws Throwable {
                return solrZkClientMock;
              }
            })
        .anyTimes();

    zkStateReaderMock.updateClusterState(anyBoolean());

    clusterStateMock.getCollections();
    expectLastCall()
        .andAnswer(
            new IAnswer<Object>() {
              @Override
              public Object answer() throws Throwable {
                return collectionsSet;
              }
            })
        .anyTimes();
    final Set<String> liveNodes = new HashSet<>();
    for (int i = 0; i < liveNodesCount; i++) {
      final String address = "localhost:" + (8963 + i) + "_solr";
      liveNodes.add(address);

      zkStateReaderMock.getBaseUrlForNodeName(address);
      expectLastCall()
          .andAnswer(
              new IAnswer<Object>() {
                @Override
                public Object answer() throws Throwable {
                  // This works as long as this test does not use a
                  // webapp context with an underscore in it
                  return address.replaceAll("_", "/");
                }
              })
          .anyTimes();
    }
    zkStateReaderMock.getClusterProps();
    expectLastCall()
        .andAnswer(
            new IAnswer<Map>() {
              @Override
              public Map answer() throws Throwable {
                return new HashMap();
              }
            });

    solrZkClientMock.getZkClientTimeout();
    expectLastCall()
        .andAnswer(
            new IAnswer<Object>() {
              @Override
              public Object answer() throws Throwable {
                return 30000;
              }
            })
        .anyTimes();

    clusterStateMock.hasCollection(anyObject(String.class));
    expectLastCall()
        .andAnswer(
            new IAnswer<Boolean>() {
              @Override
              public Boolean answer() throws Throwable {
                String key = (String) getCurrentArguments()[0];
                return collectionsSet.contains(key);
              }
            })
        .anyTimes();

    clusterStateMock.getLiveNodes();
    expectLastCall()
        .andAnswer(
            new IAnswer<Object>() {
              @Override
              public Object answer() throws Throwable {
                return liveNodes;
              }
            })
        .anyTimes();
    solrZkClientMock.create(
        anyObject(String.class),
        anyObject(byte[].class),
        anyObject(CreateMode.class),
        anyBoolean());
    expectLastCall()
        .andAnswer(
            new IAnswer<String>() {
              @Override
              public String answer() throws Throwable {
                String key = (String) getCurrentArguments()[0];
                zkMap.put(key, null);
                handleCreateCollMessage((byte[]) getCurrentArguments()[1]);
                return key;
              }
            })
        .anyTimes();

    solrZkClientMock.makePath(anyObject(String.class), anyObject(byte[].class), anyBoolean());
    expectLastCall()
        .andAnswer(
            new IAnswer<String>() {
              @Override
              public String answer() throws Throwable {
                String key = (String) getCurrentArguments()[0];
                return key;
              }
            })
        .anyTimes();

    solrZkClientMock.makePath(
        anyObject(String.class),
        anyObject(byte[].class),
        anyObject(CreateMode.class),
        anyBoolean());
    expectLastCall()
        .andAnswer(
            new IAnswer<String>() {
              @Override
              public String answer() throws Throwable {
                String key = (String) getCurrentArguments()[0];
                return key;
              }
            })
        .anyTimes();

    solrZkClientMock.exists(anyObject(String.class), anyBoolean());
    expectLastCall()
        .andAnswer(
            new IAnswer<Boolean>() {
              @Override
              public Boolean answer() throws Throwable {
                String key = (String) getCurrentArguments()[0];
                return zkMap.containsKey(key);
              }
            })
        .anyTimes();

    zkMap.put("/configs/myconfig", null);

    return liveNodes;
  }
コード例 #23
0
  /**
   * As this class doesn't watch external collections on the client side, there's a chance that the
   * request will fail due to cached stale state, which means the state must be refreshed from ZK
   * and retried.
   */
  protected NamedList<Object> requestWithRetryOnStaleState(
      SolrRequest request, int retryCount, String collection)
      throws SolrServerException, IOException {

    connect(); // important to call this before you start working with the ZkStateReader

    // build up a _stateVer_ param to pass to the server containing all of the
    // external collection state versions involved in this request, which allows
    // the server to notify us that our cached state for one or more of the external
    // collections is stale and needs to be refreshed ... this code has no impact on internal
    // collections
    String stateVerParam = null;
    List<DocCollection> requestedCollections = null;
    if (collection != null
        && !request
            .getPath()
            .startsWith("/admin")) { // don't do _stateVer_ checking for admin requests
      Set<String> requestedCollectionNames =
          getCollectionList(getZkStateReader().getClusterState(), collection);

      StringBuilder stateVerParamBuilder = null;
      for (String requestedCollection : requestedCollectionNames) {
        // track the version of state we're using on the client side using the _stateVer_ param
        DocCollection coll =
            getDocCollection(getZkStateReader().getClusterState(), requestedCollection);
        int collVer = coll.getZNodeVersion();
        if (coll.getStateFormat() > 1) {
          if (requestedCollections == null)
            requestedCollections = new ArrayList<>(requestedCollectionNames.size());
          requestedCollections.add(coll);

          if (stateVerParamBuilder == null) {
            stateVerParamBuilder = new StringBuilder();
          } else {
            stateVerParamBuilder.append(
                "|"); // hopefully pipe is not an allowed char in a collection name
          }

          stateVerParamBuilder.append(coll.getName()).append(":").append(collVer);
        }
      }

      if (stateVerParamBuilder != null) {
        stateVerParam = stateVerParamBuilder.toString();
      }
    }

    if (request.getParams() instanceof ModifiableSolrParams) {
      ModifiableSolrParams params = (ModifiableSolrParams) request.getParams();
      if (stateVerParam != null) {
        params.set(STATE_VERSION, stateVerParam);
      } else {
        params.remove(STATE_VERSION);
      }
    } // else: ??? how to set this ???

    NamedList<Object> resp = null;
    try {
      resp = sendRequest(request);
    } catch (Exception exc) {

      Throwable rootCause = SolrException.getRootCause(exc);
      // don't do retry support for admin requests or if the request doesn't have a collection
      // specified
      if (collection == null || request.getPath().startsWith("/admin")) {
        if (exc instanceof SolrServerException) {
          throw (SolrServerException) exc;
        } else if (exc instanceof IOException) {
          throw (IOException) exc;
        } else if (exc instanceof RuntimeException) {
          throw (RuntimeException) exc;
        } else {
          throw new SolrServerException(rootCause);
        }
      }

      int errorCode =
          (rootCause instanceof SolrException)
              ? ((SolrException) rootCause).code()
              : SolrException.ErrorCode.UNKNOWN.code;

      log.error(
          "Request to collection {} failed due to (" + errorCode + ") {}, retry? " + retryCount,
          collection,
          rootCause.toString());

      boolean wasCommError =
          (rootCause instanceof ConnectException
              || rootCause instanceof ConnectTimeoutException
              || rootCause instanceof NoHttpResponseException
              || rootCause instanceof SocketException);

      boolean stateWasStale = false;
      if (retryCount < MAX_STALE_RETRIES
          && requestedCollections != null
          && !requestedCollections.isEmpty()
          && SolrException.ErrorCode.getErrorCode(errorCode)
              == SolrException.ErrorCode.INVALID_STATE) {
        // cached state for one or more external collections was stale
        // re-issue request using updated state
        stateWasStale = true;

        // just re-read state for all of them, which is a little heavy handed but hopefully a rare
        // occurrence
        for (DocCollection ext : requestedCollections) {
          collectionStateCache.remove(ext.getName());
        }
      }

      // if we experienced a communication error, it's worth checking the state
      // with ZK just to make sure the node we're trying to hit is still part of the collection
      if (retryCount < MAX_STALE_RETRIES
          && !stateWasStale
          && requestedCollections != null
          && !requestedCollections.isEmpty()
          && wasCommError) {
        for (DocCollection ext : requestedCollections) {
          DocCollection latestStateFromZk =
              getDocCollection(zkStateReader.getClusterState(), ext.getName());
          if (latestStateFromZk.getZNodeVersion() != ext.getZNodeVersion()) {
            // looks like we couldn't reach the server because the state was stale == retry
            stateWasStale = true;
            // we just pulled state from ZK, so update the cache so that the retry uses it
            collectionStateCache.put(
                ext.getName(), new ExpiringCachedDocCollection(latestStateFromZk));
          }
        }
      }

      if (requestedCollections != null) {
        requestedCollections.clear(); // done with this
      }

      // if the state was stale, then we retry the request once with new state pulled from Zk
      if (stateWasStale) {
        log.warn(
            "Re-trying request to  collection(s) "
                + collection
                + " after stale state error from server.");
        resp = requestWithRetryOnStaleState(request, retryCount + 1, collection);
      } else {
        if (exc instanceof SolrServerException) {
          throw (SolrServerException) exc;
        } else if (exc instanceof IOException) {
          throw (IOException) exc;
        } else {
          throw new SolrServerException(rootCause);
        }
      }
    }

    return resp;
  }
コード例 #24
0
  protected NamedList<Object> sendRequest(SolrRequest request)
      throws SolrServerException, IOException {
    connect();

    ClusterState clusterState = zkStateReader.getClusterState();

    boolean sendToLeaders = false;
    List<String> replicas = null;

    if (request instanceof IsUpdateRequest) {
      if (request instanceof UpdateRequest) {
        NamedList<Object> response = directUpdate((AbstractUpdateRequest) request, clusterState);
        if (response != null) {
          return response;
        }
      }
      sendToLeaders = true;
      replicas = new ArrayList<>();
    }

    SolrParams reqParams = request.getParams();
    if (reqParams == null) {
      reqParams = new ModifiableSolrParams();
    }
    List<String> theUrlList = new ArrayList<>();
    if (request.getPath().equals("/admin/collections")
        || request.getPath().equals("/admin/cores")) {
      Set<String> liveNodes = clusterState.getLiveNodes();
      for (String liveNode : liveNodes) {
        theUrlList.add(zkStateReader.getBaseUrlForNodeName(liveNode));
      }
    } else {
      String collection = reqParams.get(UpdateParams.COLLECTION, defaultCollection);

      if (collection == null) {
        throw new SolrServerException(
            "No collection param specified on request and no default collection has been set.");
      }

      Set<String> collectionsList = getCollectionList(clusterState, collection);
      if (collectionsList.size() == 0) {
        throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection: " + collection);
      }

      String shardKeys = reqParams.get(ShardParams._ROUTE_);
      if (shardKeys == null) {
        shardKeys = reqParams.get(ShardParams.SHARD_KEYS); // deprecated
      }

      // TODO: not a big deal because of the caching, but we could avoid looking
      // at every shard
      // when getting leaders if we tweaked some things

      // Retrieve slices from the cloud state and, for each collection
      // specified,
      // add it to the Map of slices.
      Map<String, Slice> slices = new HashMap<>();
      for (String collectionName : collectionsList) {
        DocCollection col = getDocCollection(clusterState, collectionName);
        Collection<Slice> routeSlices = col.getRouter().getSearchSlices(shardKeys, reqParams, col);
        ClientUtils.addSlices(slices, collectionName, routeSlices, true);
      }
      Set<String> liveNodes = clusterState.getLiveNodes();

      List<String> leaderUrlList = null;
      List<String> urlList = null;
      List<String> replicasList = null;

      // build a map of unique nodes
      // TODO: allow filtering by group, role, etc
      Map<String, ZkNodeProps> nodes = new HashMap<>();
      List<String> urlList2 = new ArrayList<>();
      for (Slice slice : slices.values()) {
        for (ZkNodeProps nodeProps : slice.getReplicasMap().values()) {
          ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
          String node = coreNodeProps.getNodeName();
          if (!liveNodes.contains(coreNodeProps.getNodeName())
              || !coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) continue;
          if (nodes.put(node, nodeProps) == null) {
            if (!sendToLeaders || (sendToLeaders && coreNodeProps.isLeader())) {
              String url;
              if (reqParams.get(UpdateParams.COLLECTION) == null) {
                url =
                    ZkCoreNodeProps.getCoreUrl(
                        nodeProps.getStr(ZkStateReader.BASE_URL_PROP), defaultCollection);
              } else {
                url = coreNodeProps.getCoreUrl();
              }
              urlList2.add(url);
            } else if (sendToLeaders) {
              String url;
              if (reqParams.get(UpdateParams.COLLECTION) == null) {
                url =
                    ZkCoreNodeProps.getCoreUrl(
                        nodeProps.getStr(ZkStateReader.BASE_URL_PROP), defaultCollection);
              } else {
                url = coreNodeProps.getCoreUrl();
              }
              replicas.add(url);
            }
          }
        }
      }

      if (sendToLeaders) {
        leaderUrlList = urlList2;
        replicasList = replicas;
      } else {
        urlList = urlList2;
      }

      if (sendToLeaders) {
        theUrlList = new ArrayList<>(leaderUrlList.size());
        theUrlList.addAll(leaderUrlList);
      } else {
        theUrlList = new ArrayList<>(urlList.size());
        theUrlList.addAll(urlList);
      }
      if (theUrlList.isEmpty()) {
        throw new SolrException(
            SolrException.ErrorCode.INVALID_STATE, "Not enough nodes to handle the request");
      }

      Collections.shuffle(theUrlList, rand);
      if (sendToLeaders) {
        ArrayList<String> theReplicas = new ArrayList<>(replicasList.size());
        theReplicas.addAll(replicasList);
        Collections.shuffle(theReplicas, rand);
        theUrlList.addAll(theReplicas);
      }
    }

    LBHttpSolrServer.Req req = new LBHttpSolrServer.Req(request, theUrlList);
    LBHttpSolrServer.Rsp rsp = lbServer.request(req);
    return rsp.getResponse();
  }
コード例 #25
0
ファイル: RequestProcesser.java プロジェクト: vootoo/vootoo
  public void handleRequest(RequestGetter requestGetter) {
    MDCLoggingContext.reset();
    MDCLoggingContext.setNode(cores);

    String path = requestGetter.getPath();
    solrParams = requestGetter.getSolrParams();
    SolrRequestHandler handler = null;
    String corename = "";
    String origCorename = null;
    try {
      // set a request timer which can be reused by requests if needed
      // req.setAttribute(SolrRequestParsers.REQUEST_TIMER_SERVLET_ATTRIBUTE, new RTimer());
      // put the core container in request attribute
      // req.setAttribute("org.apache.solr.CoreContainer", cores);
      // check for management path
      String alternate = cores.getManagementPath();
      if (alternate != null && path.startsWith(alternate)) {
        path = path.substring(0, alternate.length());
      }
      // unused feature ?
      int idx = path.indexOf(':');
      if (idx > 0) {
        // save the portion after the ':' for a 'handler' path parameter
        path = path.substring(0, idx);
      }

      boolean usingAliases = false;
      List<String> collectionsList = null;

      // Check for container handlers
      handler = cores.getRequestHandler(path);
      if (handler != null) {
        solrReq = parseSolrQueryRequest(SolrRequestParsers.DEFAULT, requestGetter);
        handleAdminRequest(handler, solrReq);
        return;
      } else {
        // otherwise, we should find a core from the path
        idx = path.indexOf("/", 1);
        if (idx > 1) {
          // try to get the corename as a request parameter first
          corename = path.substring(1, idx);

          // look at aliases
          if (cores.isZooKeeperAware()) {
            origCorename = corename;
            ZkStateReader reader = cores.getZkController().getZkStateReader();
            aliases = reader.getAliases();
            if (aliases != null && aliases.collectionAliasSize() > 0) {
              usingAliases = true;
              String alias = aliases.getCollectionAlias(corename);
              if (alias != null) {
                collectionsList = StrUtils.splitSmart(alias, ",", true);
                corename = collectionsList.get(0);
              }
            }
          }

          core = cores.getCore(corename);

          if (core != null) {
            path = path.substring(idx);
          }
        }

        // add collection name
        if (core == null && StringUtils.isNotBlank(requestGetter.getCollection())) {
          corename = requestGetter.getCollection();
          core = cores.getCore(corename);
        }

        if (core == null) {
          if (!cores.isZooKeeperAware()) {
            core = cores.getCore("");
          }
        }
      }

      if (core == null && cores.isZooKeeperAware()) {
        // we couldn't find the core - lets make sure a collection was not specified instead
        core = getCoreByCollection(cores, corename);

        if (core != null) {
          // we found a core, update the path
          path = path.substring(idx);
        }

        // try the default core
        if (core == null) {
          core = cores.getCore("");
          if (core != null) {}
        }
      }

      // With a valid core...
      if (core != null) {
        MDCLoggingContext.setCore(core);
        final SolrConfig config = core.getSolrConfig();
        // get or create/cache the parser for the core
        SolrRequestParsers parser = config.getRequestParsers();

        // Determine the handler from the url path if not set
        // (we might already have selected the cores handler)
        if (handler == null && path.length() > 1) { // don't match "" or "/" as valid path
          handler = core.getRequestHandler(path);

          if (handler == null) {
            // may be a restlet path
            // Handle /schema/* paths via Restlet
            if (path.equals("/schema") || path.startsWith("/schema/")) {
              throw new SolrException(
                  SolrException.ErrorCode.BAD_REQUEST, "unsupport /schema/**, use http solr");
            }
          }
          // no handler yet but allowed to handle select; let's check
          if (handler == null && parser.isHandleSelect()) {
            if ("/select".equals(path) || "/select/".equals(path)) {
              solrReq = parseSolrQueryRequest(parser, requestGetter);

              invalidStates =
                  checkStateIsValid(cores, solrReq.getParams().get(CloudSolrClient.STATE_VERSION));
              String qt = solrReq.getParams().get(CommonParams.QT);
              handler = core.getRequestHandler(qt);
              if (handler == null) {
                throw new SolrException(
                    SolrException.ErrorCode.BAD_REQUEST, "unknown handler: " + qt);
              }
              if (qt != null
                  && qt.startsWith("/")
                  && (handler instanceof ContentStreamHandlerBase)) {
                // For security reasons it's a bad idea to allow a leading '/', ex:
                // /select?qt=/update see SOLR-3161
                // There was no restriction from Solr 1.4 thru 3.5 and it's not supported for update
                // handlers.
                throw new SolrException(
                    SolrException.ErrorCode.BAD_REQUEST,
                    "Invalid Request Handler ('qt').  Do not use /select to access: " + qt);
              }
            }
          }
        }

        // With a valid handler and a valid core...
        if (handler != null) {
          // if not a /select, create the request
          if (solrReq == null) {
            solrReq = parseSolrQueryRequest(parser, requestGetter);
          }

          if (usingAliases) {
            processAliases(solrReq, aliases, collectionsList);
          }

          SolrQueryResponse solrRsp = new SolrQueryResponse();
          SolrRequestInfo.setRequestInfo(new SolrRequestInfo(solrReq, solrRsp));
          this.execute(handler, solrReq, solrRsp);
          QueryResponseWriter responseWriter = core.getQueryResponseWriter(solrReq);
          if (invalidStates != null)
            solrReq.getContext().put(CloudSolrClient.STATE_VERSION, invalidStates);
          writeResponse(solrRsp, responseWriter, solrReq);

          return; // we are done with a valid handler
        }
      }
      logger.debug("no handler or core retrieved for {}, follow through...", path);
      throw new SolrException(
          SolrException.ErrorCode.BAD_REQUEST, "no handler or core retrieved for " + path);
    } catch (Throwable ex) {
      sendError(core, solrReq, ex);
      // walk the the entire cause chain to search for an Error
      Throwable t = ex;
      while (t != null) {
        if (t instanceof Error) {
          if (t != ex) {
            logger.error(
                "An Error was wrapped in another exception - please report complete stacktrace on SOLR-6161",
                ex);
          }
          throw (Error) t;
        }
        t = t.getCause();
      }
      return;
    } finally {
      try {
        if (solrReq != null) {
          logger.debug("Closing out SolrRequest: {}", solrReq);
          solrReq.close();
        }
      } finally {
        try {
          if (core != null) {
            core.close();
          }
        } finally {
          SolrRequestInfo.clearRequestInfo();
        }
      }
      MDCLoggingContext.clear();
    }
  }