/** Adds access to all configurable paths. */ static void addFilePermissions(Permissions policy, Environment environment) { // read-only dirs addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.binFile(), "read,readlink"); addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink"); addPath( policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink"); addPath( policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), "read,readlink"); addPath( policy, Environment.PATH_CONF_SETTING.getKey(), environment.configFile(), "read,readlink"); addPath( policy, Environment.PATH_SCRIPTS_SETTING.getKey(), environment.scriptsFile(), "read,readlink"); // read-write dirs addPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete"); addPath( policy, Environment.PATH_LOGS_SETTING.getKey(), environment.logsFile(), "read,readlink,write,delete"); if (environment.sharedDataFile() != null) { addPath( policy, Environment.PATH_SHARED_DATA_SETTING.getKey(), environment.sharedDataFile(), "read,readlink,write,delete"); } for (Path path : environment.dataFiles()) { addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete"); } // TODO: this should be removed in ES 6.0! We will no longer support data paths with the cluster // as a folder assert Version.CURRENT.major < 6 : "cluster name is no longer used in data path"; for (Path path : environment.dataWithClusterFiles()) { addPathIfExists( policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete"); } for (Path path : environment.repoFiles()) { addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete"); } if (environment.pidFile() != null) { // we just need permission to remove the file if its elsewhere. policy.add(new FilePermission(environment.pidFile().toString(), "delete")); } }
private void setupNode() throws Exception { Path dataDir = createTempDir(); Path clusterDir = Files.createDirectory(dataDir.resolve(cluster().getClusterName())); try (InputStream stream = PercolatorBackwardsCompatibilityIT.class.getResourceAsStream( "/indices/percolator/bwc_index_2.0.0.zip")) { TestUtil.unzip(stream, clusterDir); } Settings.Builder nodeSettings = Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), dataDir); internalCluster().startNode(nodeSettings.build()); ensureGreen(INDEX_NAME); }
@Override protected Settings nodeSettings( int nodeOrdinal) { // simplify this and only use a single data path return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(Environment.PATH_DATA_SETTING.getKey(), "") // by default this value is 1 sec in tests (30 sec in practice) but we adding disruption // here // which is between 1 and 2 sec can cause each of the shard deletion requests to timeout. // to prevent this we are setting the timeout here to something highish ie. the default in // practice .put( IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(30, TimeUnit.SECONDS)) .build(); }
void setupCluster() throws Exception { InternalTestCluster.Async<List<String>> replicas = internalCluster().startNodesAsync(1); // for replicas Path baseTempDir = createTempDir(); // start single data path node Settings.Builder nodeSettings = Settings.builder() .put( Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("single-path").toAbsolutePath()) .put( Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master InternalTestCluster.Async<String> singleDataPathNode = internalCluster().startNodeAsync(nodeSettings.build()); // start multi data path node nodeSettings = Settings.builder() .put( Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("multi-path1").toAbsolutePath() + "," + baseTempDir.resolve("multi-path2").toAbsolutePath()) .put( Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master InternalTestCluster.Async<String> multiDataPathNode = internalCluster().startNodeAsync(nodeSettings.build()); // find single data path dir singleDataPathNodeName = singleDataPathNode.get(); Path[] nodePaths = internalCluster() .getInstance(NodeEnvironment.class, singleDataPathNodeName) .nodeDataPaths(); assertEquals(1, nodePaths.length); singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER); assertFalse(Files.exists(singleDataPath)); Files.createDirectories(singleDataPath); logger.info("--> Single data path: {}", singleDataPath); // find multi data path dirs multiDataPathNodeName = multiDataPathNode.get(); nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName).nodeDataPaths(); assertEquals(2, nodePaths.length); multiDataPath = new Path[] { nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER), nodePaths[1].resolve(NodeEnvironment.INDICES_FOLDER) }; assertFalse(Files.exists(multiDataPath[0])); assertFalse(Files.exists(multiDataPath[1])); Files.createDirectories(multiDataPath[0]); Files.createDirectories(multiDataPath[1]); logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]); replicas.get(); // wait for replicas }