コード例 #1
0
 /**
  * Deletes a shard data directory. Note: this method assumes that the shard lock is acquired. This
  * method will also attempt to acquire the write locks for the shard's paths before deleting the
  * data, but this is best effort, as the lock is released before the deletion happens in order to
  * allow the folder to be deleted
  *
  * @param lock the shards lock
  * @throws IOException if an IOException occurs
  * @throws ElasticsearchException if the write.lock is not acquirable
  */
 public void deleteShardDirectoryUnderLock(ShardLock lock, IndexSettings indexSettings)
     throws IOException {
   final ShardId shardId = lock.getShardId();
   assert isShardLocked(shardId) : "shard " + shardId + " is not locked";
   final Path[] paths = availableShardPaths(shardId);
   logger.trace("acquiring locks for {}, paths: [{}]", shardId, paths);
   acquireFSLockForPaths(indexSettings, paths);
   IOUtils.rm(paths);
   if (indexSettings.hasCustomDataPath()) {
     Path customLocation = resolveCustomLocation(indexSettings, shardId);
     logger.trace("acquiring lock for {}, custom path: [{}]", shardId, customLocation);
     acquireFSLockForPaths(indexSettings, customLocation);
     logger.trace("deleting custom shard {} directory [{}]", shardId, customLocation);
     IOUtils.rm(customLocation);
   }
   logger.trace("deleted shard {} directory, paths: [{}]", shardId, paths);
   assert FileSystemUtils.exists(paths) == false;
 }
コード例 #2
0
  private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exception {
    logger.info("--> starting 2 nodes");
    String node_1 = cluster().startNode(commonSettings);
    cluster().startNode(commonSettings);
    assertThat(cluster().size(), equalTo(2));
    ClusterHealthResponse healthResponse =
        client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
    assertThat(healthResponse.isTimedOut(), equalTo(false));

    logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate");
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(settingsBuilder().put("index.number_of_shards", 1))
        .execute()
        .actionGet();

    ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
    assertThat(state.routingNodes().unassigned().size(), equalTo(2));

    logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
    state =
        client()
            .admin()
            .cluster()
            .prepareReroute()
            .setExplain(randomBoolean())
            .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
            .execute()
            .actionGet()
            .getState();
    assertThat(state.routingNodes().unassigned().size(), equalTo(1));
    assertThat(
        state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(),
        equalTo(ShardRoutingState.INITIALIZING));

    healthResponse =
        client()
            .admin()
            .cluster()
            .prepareHealth()
            .setWaitForEvents(Priority.LANGUID)
            .setWaitForYellowStatus()
            .execute()
            .actionGet();
    assertThat(healthResponse.isTimedOut(), equalTo(false));

    logger.info("--> get the state, verify shard 1 primary allocated");
    state = client().admin().cluster().prepareState().execute().actionGet().getState();
    assertThat(state.routingNodes().unassigned().size(), equalTo(1));
    assertThat(
        state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(),
        equalTo(ShardRoutingState.STARTED));

    client()
        .prepareIndex("test", "type", "1")
        .setSource("field", "value")
        .setRefresh(true)
        .execute()
        .actionGet();

    logger.info("--> closing all nodes");
    File[] shardLocation =
        cluster().getInstance(NodeEnvironment.class, node_1).shardLocations(new ShardId("test", 0));
    assertThat(
        FileSystemUtils.exists(shardLocation), equalTo(true)); // make sure the data is there!
    cluster()
        .closeNonSharedNodes(false); // don't wipe data directories the index needs to be there!

    logger.info("--> deleting the shard data [{}] ", Arrays.toString(shardLocation));
    assertThat(
        FileSystemUtils.exists(shardLocation),
        equalTo(true)); // verify again after cluster was shut down
    assertThat(FileSystemUtils.deleteRecursively(shardLocation), equalTo(true));

    logger.info(
        "--> starting nodes back, will not allocate the shard since it has no data, but the index will be there");
    node_1 = cluster().startNode(commonSettings);
    cluster().startNode(commonSettings);
    // wait a bit for the cluster to realize that the shard is not there...
    // TODO can we get around this? the cluster is RED, so what do we wait for?
    client().admin().cluster().prepareReroute().get();
    assertThat(
        client()
            .admin()
            .cluster()
            .prepareHealth()
            .setWaitForNodes("2")
            .execute()
            .actionGet()
            .getStatus(),
        equalTo(ClusterHealthStatus.RED));
    logger.info("--> explicitly allocate primary");
    state =
        client()
            .admin()
            .cluster()
            .prepareReroute()
            .setExplain(randomBoolean())
            .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
            .execute()
            .actionGet()
            .getState();
    assertThat(state.routingNodes().unassigned().size(), equalTo(1));
    assertThat(
        state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(),
        equalTo(ShardRoutingState.INITIALIZING));

    healthResponse =
        client()
            .admin()
            .cluster()
            .prepareHealth()
            .setWaitForEvents(Priority.LANGUID)
            .setWaitForYellowStatus()
            .execute()
            .actionGet();
    assertThat(healthResponse.isTimedOut(), equalTo(false));

    logger.info("--> get the state, verify shard 1 primary allocated");
    state = client().admin().cluster().prepareState().execute().actionGet().getState();
    assertThat(state.routingNodes().unassigned().size(), equalTo(1));
    assertThat(
        state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(),
        equalTo(ShardRoutingState.STARTED));
  }