public void testFailToPublishWithLessThanMinMasterNodes() throws Exception {
    final int masterNodes = randomIntBetween(1, 10);

    MockNode master = createMockNode("master");
    DiscoveryNodes.Builder discoveryNodesBuilder =
        DiscoveryNodes.builder().add(master.discoveryNode);
    for (int i = 1; i < masterNodes; i++) {
      discoveryNodesBuilder.add(createMockNode("node" + i).discoveryNode);
    }
    final int dataNodes = randomIntBetween(0, 5);
    final Settings dataSettings =
        Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build();
    for (int i = 0; i < dataNodes; i++) {
      discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings, null).discoveryNode);
    }
    discoveryNodesBuilder
        .localNodeId(master.discoveryNode.getId())
        .masterNodeId(master.discoveryNode.getId());
    DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
    MetaData metaData = MetaData.EMPTY_META_DATA;
    ClusterState clusterState =
        ClusterState.builder(CLUSTER_NAME).metaData(metaData).nodes(discoveryNodes).build();
    ClusterState previousState = master.clusterState;
    try {
      publishState(
          master.action, clusterState, previousState, masterNodes + randomIntBetween(1, 5));
      fail("cluster state publishing didn't fail despite of not having enough nodes");
    } catch (Discovery.FailedToCommitClusterStateException expected) {
      logger.debug("failed to publish as expected", expected);
    }
  }
Ejemplo n.º 2
0
 public static boolean nodeRequiresLocalStorage(Settings settings) {
   boolean localStorageEnable = Node.NODE_LOCAL_STORAGE_SETTING.get(settings);
   if (localStorageEnable == false
       && (Node.NODE_DATA_SETTING.get(settings) || Node.NODE_MASTER_SETTING.get(settings))) {
     // TODO: make this a proper setting validation logic, requiring multi-settings validation
     throw new IllegalArgumentException("storage can not be disabled for master and data nodes");
   }
   return localStorageEnable;
 }
Ejemplo n.º 3
0
  /** Creates a DiscoveryNode representing the local node. */
  public static DiscoveryNode createLocal(
      Settings settings, TransportAddress publishAddress, String nodeIdSupplier) {
    Map<String, String> attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(settings).getAsMap());
    Set<DiscoveryNode.Role> roles = new HashSet<>();
    if (Node.NODE_INGEST_SETTING.get(settings)) {
      roles.add(DiscoveryNode.Role.INGEST);
    }
    if (Node.NODE_MASTER_SETTING.get(settings)) {
      roles.add(DiscoveryNode.Role.MASTER);
    }
    if (Node.NODE_DATA_SETTING.get(settings)) {
      roles.add(DiscoveryNode.Role.DATA);
    }

    return new DiscoveryNode(
        Node.NODE_NAME_SETTING.get(settings),
        nodeIdSupplier,
        publishAddress,
        attributes,
        roles,
        Version.CURRENT);
  }
  public void testPublishingWithSendingErrors() throws Exception {
    int goodNodes = randomIntBetween(2, 5);
    int errorNodes = randomIntBetween(1, 5);
    int timeOutNodes =
        randomBoolean()
            ? 0
            : randomIntBetween(1, 5); // adding timeout nodes will force timeout errors
    final int numberOfMasterNodes = goodNodes + errorNodes + timeOutNodes + 1; // master
    final boolean expectingToCommit = randomBoolean();
    Settings.Builder settings = Settings.builder();
    // make sure we have a reasonable timeout if we expect to timeout, o.w. one that will make the
    // test "hang"
    settings
        .put(
            DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(),
            expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h")
        .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "5ms"); // test is about committing

    MockNode master = createMockNode("master", settings.build(), null);

    // randomize things a bit
    int[] nodeTypes = new int[goodNodes + errorNodes + timeOutNodes];
    for (int i = 0; i < goodNodes; i++) {
      nodeTypes[i] = 0;
    }
    for (int i = goodNodes; i < goodNodes + errorNodes; i++) {
      nodeTypes[i] = 1;
    }
    for (int i = goodNodes + errorNodes; i < nodeTypes.length; i++) {
      nodeTypes[i] = 2;
    }
    Collections.shuffle(Arrays.asList(nodeTypes), random());

    DiscoveryNodes.Builder discoveryNodesBuilder =
        DiscoveryNodes.builder().add(master.discoveryNode);
    for (int i = 0; i < nodeTypes.length; i++) {
      final MockNode mockNode = createMockNode("node" + i);
      discoveryNodesBuilder.add(mockNode.discoveryNode);
      switch (nodeTypes[i]) {
        case 1:
          mockNode.action.errorOnSend.set(true);
          break;
        case 2:
          mockNode.action.timeoutOnSend.set(true);
          break;
      }
    }
    final int dataNodes = randomIntBetween(0, 3); // data nodes don't matter
    for (int i = 0; i < dataNodes; i++) {
      final MockNode mockNode =
          createMockNode(
              "data_" + i,
              Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(),
              null);
      discoveryNodesBuilder.add(mockNode.discoveryNode);
      if (randomBoolean()) {
        // we really don't care - just chaos monkey
        mockNode.action.errorOnCommit.set(randomBoolean());
        mockNode.action.errorOnSend.set(randomBoolean());
        mockNode.action.timeoutOnCommit.set(randomBoolean());
        mockNode.action.timeoutOnSend.set(randomBoolean());
      }
    }

    final int minMasterNodes;
    final String expectedBehavior;
    if (expectingToCommit) {
      minMasterNodes = randomIntBetween(0, goodNodes + 1); // count master
      expectedBehavior = "succeed";
    } else {
      minMasterNodes = randomIntBetween(goodNodes + 2, numberOfMasterNodes); // +2 because of master
      expectedBehavior = timeOutNodes > 0 ? "timeout" : "fail";
    }
    logger.info(
        "--> expecting commit to {}. good nodes [{}], errors [{}], timeouts [{}]. min_master_nodes [{}]",
        expectedBehavior,
        goodNodes + 1,
        errorNodes,
        timeOutNodes,
        minMasterNodes);

    discoveryNodesBuilder
        .localNodeId(master.discoveryNode.getId())
        .masterNodeId(master.discoveryNode.getId());
    DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
    MetaData metaData = MetaData.EMPTY_META_DATA;
    ClusterState clusterState =
        ClusterState.builder(CLUSTER_NAME).metaData(metaData).nodes(discoveryNodes).build();
    ClusterState previousState = master.clusterState;
    try {
      publishState(master.action, clusterState, previousState, minMasterNodes);
      if (expectingToCommit == false) {
        fail("cluster state publishing didn't fail despite of not have enough nodes");
      }
    } catch (Discovery.FailedToCommitClusterStateException exception) {
      logger.debug("failed to publish as expected", exception);
      if (expectingToCommit) {
        throw exception;
      }
      assertThat(exception.getMessage(), containsString(timeOutNodes > 0 ? "timed out" : "failed"));
    }
  }
  /**
   * Tests corruption that happens on the network layer and that the primary does not get affected
   * by corruption that happens on the way to the replica. The file on disk stays uncorrupted
   */
  public void testCorruptionOnNetworkLayer() throws ExecutionException, InterruptedException {
    int numDocs = scaledRandomIntBetween(100, 1000);
    internalCluster().ensureAtLeastNumDataNodes(2);
    if (cluster().numDataNodes() < 3) {
      internalCluster()
          .startNode(
              Settings.builder()
                  .put(Node.NODE_DATA_SETTING.getKey(), true)
                  .put(Node.NODE_MASTER_SETTING.getKey(), false));
    }
    NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
    List<NodeStats> dataNodeStats = new ArrayList<>();
    for (NodeStats stat : nodeStats.getNodes()) {
      if (stat.getNode().isDataNode()) {
        dataNodeStats.add(stat);
      }
    }

    assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
    Collections.shuffle(dataNodeStats, random());
    NodeStats primariesNode = dataNodeStats.get(0);
    NodeStats unluckyNode = dataNodeStats.get(1);

    assertAcked(
        prepareCreate("test")
            .setSettings(
                Settings.builder()
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
                    .put(
                        IndexMetaData.SETTING_NUMBER_OF_SHARDS,
                        between(1, 4)) // don't go crazy here it must recovery fast
                    // This does corrupt files on the replica, so we can't check:
                    .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
                    .put(
                        "index.routing.allocation.include._name", primariesNode.getNode().getName())
                    .put(
                        EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(),
                        EnableAllocationDecider.Rebalance.NONE)));
    ensureGreen();
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < builders.length; i++) {
      builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
    }
    indexRandom(true, builders);
    ensureGreen();
    assertAllSuccessful(
        client()
            .admin()
            .indices()
            .prepareFlush()
            .setForce(true)
            .setWaitIfOngoing(true)
            .execute()
            .actionGet());
    // we have to flush at least once here since we don't corrupt the translog
    SearchResponse countResponse = client().prepareSearch().setSize(0).get();
    assertHitCount(countResponse, numDocs);
    final boolean truncate = randomBoolean();
    for (NodeStats dataNode : dataNodeStats) {
      MockTransportService mockTransportService =
          ((MockTransportService)
              internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
      mockTransportService.addDelegate(
          internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()),
          new MockTransportService.DelegateTransport(mockTransportService.original()) {

            @Override
            public void sendRequest(
                DiscoveryNode node,
                long requestId,
                String action,
                TransportRequest request,
                TransportRequestOptions options)
                throws IOException, TransportException {
              if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) {
                RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
                if (truncate && req.length() > 1) {
                  BytesRef bytesRef = req.content().toBytesRef();
                  BytesArray array =
                      new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1);
                  request =
                      new RecoveryFileChunkRequest(
                          req.recoveryId(),
                          req.shardId(),
                          req.metadata(),
                          req.position(),
                          array,
                          req.lastChunk(),
                          req.totalTranslogOps(),
                          req.sourceThrottleTimeInNanos());
                } else {
                  assert req.content().toBytesRef().bytes == req.content().toBytesRef().bytes
                      : "no internal reference!!";
                  final byte[] array = req.content().toBytesRef().bytes;
                  int i = randomIntBetween(0, req.content().length() - 1);
                  array[i] = (byte) ~array[i]; // flip one byte in the content
                }
              }
              super.sendRequest(node, requestId, action, request, options);
            }
          });
    }

    Settings build =
        Settings.builder()
            .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
            .put("index.routing.allocation.include._name", "*")
            .build();
    client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
    client().admin().cluster().prepareReroute().get();
    ClusterHealthResponse actionGet =
        client()
            .admin()
            .cluster()
            .health(Requests.clusterHealthRequest("test").waitForGreenStatus())
            .actionGet();
    if (actionGet.isTimedOut()) {
      logger.info(
          "ensureGreen timed out, cluster state:\n{}\n{}",
          client().admin().cluster().prepareState().get().getState().prettyPrint(),
          client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
      assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
    }
    // we are green so primaries got not corrupted.
    // ensure that no shard is actually allocated on the unlucky node
    ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
    for (IndexShardRoutingTable table :
        clusterStateResponse.getState().getRoutingTable().index("test")) {
      for (ShardRouting routing : table) {
        if (unluckyNode.getNode().getId().equals(routing.currentNodeId())) {
          assertThat(routing.state(), not(equalTo(ShardRoutingState.STARTED)));
          assertThat(routing.state(), not(equalTo(ShardRoutingState.RELOCATING)));
        }
      }
    }
    final int numIterations = scaledRandomIntBetween(5, 20);
    for (int i = 0; i < numIterations; i++) {
      SearchResponse response = client().prepareSearch().setSize(numDocs).get();
      assertHitCount(response, numDocs);
    }
  }
  public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception {
    InternalTestCluster.Async<String> masterFuture =
        internalCluster()
            .startNodeAsync(
                Settings.builder()
                    .put(
                        Node.NODE_MASTER_SETTING.getKey(),
                        true,
                        Node.NODE_DATA_SETTING.getKey(),
                        false)
                    .build());
    InternalTestCluster.Async<List<String>> nodesFutures =
        internalCluster()
            .startNodesAsync(
                4,
                Settings.builder()
                    .put(
                        Node.NODE_MASTER_SETTING.getKey(),
                        false,
                        Node.NODE_DATA_SETTING.getKey(),
                        true)
                    .build());

    final String masterNode = masterFuture.get();
    final String node1 = nodesFutures.get().get(0);
    final String node2 = nodesFutures.get().get(1);
    final String node3 = nodesFutures.get().get(2);
    // we will use this later on, handy to start now to make sure it has a different data folder
    // that node 1,2 &3
    final String node4 = nodesFutures.get().get(3);

    assertAcked(
        prepareCreate("test")
            .setSettings(
                Settings.builder()
                    .put(indexSettings())
                    .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
                    .put(
                        IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_name",
                        node4)));
    assertFalse(
        client()
            .admin()
            .cluster()
            .prepareHealth()
            .setWaitForNoRelocatingShards(true)
            .setWaitForGreenStatus()
            .setWaitForNodes("5")
            .get()
            .isTimedOut());

    // disable allocation to control the situation more easily
    assertAcked(
        client()
            .admin()
            .cluster()
            .prepareUpdateSettings()
            .setTransientSettings(
                Settings.builder()
                    .put(
                        EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(),
                        "none")));

    logger.debug("--> shutting down two random nodes");
    internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3));
    internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3));

    logger.debug("--> verifying index is red");
    ClusterHealthResponse health =
        client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
    if (health.getStatus() != ClusterHealthStatus.RED) {
      logClusterState();
      fail("cluster didn't become red, despite of shutting 2 of 3 nodes");
    }

    logger.debug("--> allowing index to be assigned to node [{}]", node4);
    assertAcked(
        client()
            .admin()
            .indices()
            .prepareUpdateSettings("test")
            .setSettings(
                Settings.builder()
                    .put(
                        IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_name",
                        "NONE")));

    assertAcked(
        client()
            .admin()
            .cluster()
            .prepareUpdateSettings()
            .setTransientSettings(
                Settings.builder()
                    .put(
                        EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(),
                        "all")));

    logger.debug("--> waiting for shards to recover on [{}]", node4);
    // we have to do this in two steps as we now do async shard fetching before assigning, so the
    // change to the
    // allocation filtering may not have immediate effect
    // TODO: we should add an easier to do this. It's too much of a song and dance..
    Index index = resolveIndex("test");
    assertBusy(
        new Runnable() {
          @Override
          public void run() {
            assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex(index));
          }
        });

    // wait for 4 active shards - we should have lost one shard
    assertFalse(
        client().admin().cluster().prepareHealth().setWaitForActiveShards(4).get().isTimedOut());

    // disable allocation again to control concurrency a bit and allow shard active to kick in
    // before allocation
    assertAcked(
        client()
            .admin()
            .cluster()
            .prepareUpdateSettings()
            .setTransientSettings(
                Settings.builder()
                    .put(
                        EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(),
                        "none")));

    logger.debug("--> starting the two old nodes back");

    internalCluster()
        .startNodesAsync(
            2,
            Settings.builder()
                .put(
                    Node.NODE_MASTER_SETTING.getKey(), false, Node.NODE_DATA_SETTING.getKey(), true)
                .build());

    assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("5").get().isTimedOut());

    assertAcked(
        client()
            .admin()
            .cluster()
            .prepareUpdateSettings()
            .setTransientSettings(
                Settings.builder()
                    .put(
                        EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(),
                        "all")));

    logger.debug("--> waiting for the lost shard to be recovered");

    ensureGreen("test");
  }
  public void testIndexCleanup() throws Exception {
    final String masterNode =
        internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false));
    final String node_1 =
        internalCluster()
            .startNode(Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false));
    final String node_2 =
        internalCluster()
            .startNode(Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false));
    logger.info("--> creating index [test] with one shard and on replica");
    assertAcked(
        prepareCreate("test")
            .setSettings(
                Settings.builder()
                    .put(indexSettings())
                    .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)));
    ensureGreen("test");
    ClusterState state = client().admin().cluster().prepareState().get().getState();
    Index index = state.metaData().index("test").getIndex();

    logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2");
    assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
    assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true));
    assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true));
    assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(true));

    logger.info("--> starting node server3");
    final String node_3 =
        internalCluster()
            .startNode(Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false));
    logger.info("--> running cluster_health");
    ClusterHealthResponse clusterHealth =
        client()
            .admin()
            .cluster()
            .prepareHealth()
            .setWaitForNodes("4")
            .setWaitForNoRelocatingShards(true)
            .get();
    assertThat(clusterHealth.isTimedOut(), equalTo(false));

    assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
    assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true));
    assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true));
    assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(true));
    assertThat(Files.exists(shardDirectory(node_3, index, 0)), equalTo(false));
    assertThat(Files.exists(indexDirectory(node_3, index)), equalTo(false));

    logger.info("--> move shard from node_1 to node_3, and wait for relocation to finish");

    if (randomBoolean()) { // sometimes add cluster-state delay to trigger observers in
      // IndicesStore.ShardActiveRequestHandler
      SingleNodeDisruption disruption = new BlockClusterStateProcessing(node_3, random());
      internalCluster().setDisruptionScheme(disruption);
      MockTransportService transportServiceNode3 =
          (MockTransportService) internalCluster().getInstance(TransportService.class, node_3);
      CountDownLatch beginRelocationLatch = new CountDownLatch(1);
      CountDownLatch endRelocationLatch = new CountDownLatch(1);
      transportServiceNode3.addTracer(
          new ReclocationStartEndTracer(logger, beginRelocationLatch, endRelocationLatch));
      internalCluster()
          .client()
          .admin()
          .cluster()
          .prepareReroute()
          .add(new MoveAllocationCommand("test", 0, node_1, node_3))
          .get();
      // wait for relocation to start
      beginRelocationLatch.await();
      disruption.startDisrupting();
      // wait for relocation to finish
      endRelocationLatch.await();
      // wait a little so that cluster state observer is registered
      sleep(50);
      disruption.stopDisrupting();
    } else {
      internalCluster()
          .client()
          .admin()
          .cluster()
          .prepareReroute()
          .add(new MoveAllocationCommand("test", 0, node_1, node_3))
          .get();
    }
    clusterHealth =
        client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).get();
    assertThat(clusterHealth.isTimedOut(), equalTo(false));

    assertThat(waitForShardDeletion(node_1, index, 0), equalTo(false));
    assertThat(waitForIndexDeletion(node_1, index), equalTo(false));
    assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true));
    assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(true));
    assertThat(Files.exists(shardDirectory(node_3, index, 0)), equalTo(true));
    assertThat(Files.exists(indexDirectory(node_3, index)), equalTo(true));
  }
Ejemplo n.º 8
0
 public static boolean isMasterNode(Settings settings) {
   return Node.NODE_MASTER_SETTING.get(settings);
 }
Ejemplo n.º 9
0
  void setupCluster() throws Exception {
    InternalTestCluster.Async<List<String>> replicas =
        internalCluster().startNodesAsync(1); // for replicas

    Path baseTempDir = createTempDir();
    // start single data path node
    Settings.Builder nodeSettings =
        Settings.builder()
            .put(
                Environment.PATH_DATA_SETTING.getKey(),
                baseTempDir.resolve("single-path").toAbsolutePath())
            .put(
                Node.NODE_MASTER_SETTING.getKey(),
                false); // workaround for dangling index loading issue when node is master
    InternalTestCluster.Async<String> singleDataPathNode =
        internalCluster().startNodeAsync(nodeSettings.build());

    // start multi data path node
    nodeSettings =
        Settings.builder()
            .put(
                Environment.PATH_DATA_SETTING.getKey(),
                baseTempDir.resolve("multi-path1").toAbsolutePath()
                    + ","
                    + baseTempDir.resolve("multi-path2").toAbsolutePath())
            .put(
                Node.NODE_MASTER_SETTING.getKey(),
                false); // workaround for dangling index loading issue when node is master
    InternalTestCluster.Async<String> multiDataPathNode =
        internalCluster().startNodeAsync(nodeSettings.build());

    // find single data path dir
    singleDataPathNodeName = singleDataPathNode.get();
    Path[] nodePaths =
        internalCluster()
            .getInstance(NodeEnvironment.class, singleDataPathNodeName)
            .nodeDataPaths();
    assertEquals(1, nodePaths.length);
    singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
    assertFalse(Files.exists(singleDataPath));
    Files.createDirectories(singleDataPath);
    logger.info("--> Single data path: {}", singleDataPath);

    // find multi data path dirs
    multiDataPathNodeName = multiDataPathNode.get();
    nodePaths =
        internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName).nodeDataPaths();
    assertEquals(2, nodePaths.length);
    multiDataPath =
        new Path[] {
          nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER),
          nodePaths[1].resolve(NodeEnvironment.INDICES_FOLDER)
        };
    assertFalse(Files.exists(multiDataPath[0]));
    assertFalse(Files.exists(multiDataPath[1]));
    Files.createDirectories(multiDataPath[0]);
    Files.createDirectories(multiDataPath[1]);
    logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]);

    replicas.get(); // wait for replicas
  }