public void testThatCachingWorksForCachingStrategyOne() { PreBuiltTokenFilterFactoryFactory factory = new PreBuiltTokenFilterFactoryFactory( PreBuiltTokenFilters.WORD_DELIMITER.getTokenFilterFactory(Version.CURRENT)); TokenFilterFactory former090TokenizerFactory = factory.create( "word_delimiter", Settings.settingsBuilder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1) .build()); TokenFilterFactory former090TokenizerFactoryCopy = factory.create( "word_delimiter", Settings.settingsBuilder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2) .build()); TokenFilterFactory currentTokenizerFactory = factory.create( "word_delimiter", Settings.settingsBuilder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .build()); assertThat(currentTokenizerFactory, is(former090TokenizerFactory)); assertThat(currentTokenizerFactory, is(former090TokenizerFactoryCopy)); }
static void addBindPermissions(Permissions policy, Settings settings) throws IOException { // http is simple String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString(); // listen is always called with 'localhost' but use wildcard to be sure, no name service is // consulted. // see SocketPermission implies() code policy.add(new SocketPermission("*:" + httpRange, "listen,resolve")); // transport is waaaay overengineered Map<String, Settings> profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings).getAsGroups(); if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) { profiles = new HashMap<>(profiles); profiles.put(TransportSettings.DEFAULT_PROFILE, Settings.EMPTY); } // loop through all profiles and add permissions for each one, if its valid. // (otherwise NettyTransport is lenient and ignores it) for (Map.Entry<String, Settings> entry : profiles.entrySet()) { Settings profileSettings = entry.getValue(); String name = entry.getKey(); String transportRange = profileSettings.get("port", TransportSettings.PORT.get(settings)); // a profile is only valid if its the default profile, or if it has an actual name and // specifies a port boolean valid = TransportSettings.DEFAULT_PROFILE.equals(name) || (Strings.hasLength(name) && profileSettings.get("port") != null); if (valid) { // listen is always called with 'localhost' but use wildcard to be sure, no name service is // consulted. // see SocketPermission implies() code policy.add(new SocketPermission("*:" + transportRange, "listen,resolve")); } } }
private void assertMMNinClusterSetting(InternalTestCluster cluster, int masterNodes) { final int minMasterNodes = masterNodes / 2 + 1; for (final String node : cluster.getNodeNames()) { Settings stateSettings = cluster .client(node) .admin() .cluster() .prepareState() .setLocal(true) .get() .getState() .getMetaData() .settings(); assertThat( "dynamic setting for node [" + node + "] has the wrong min_master_node setting : [" + stateSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) + "]", stateSettings.getAsMap(), hasEntry( DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes))); } }
public Plugin(Settings settings) { String priorPath = settings.get(PRIOR_PATH_KEY); enabled = settings.getAsBoolean(PRIOR_STORE_ENABLED_KEY, true) && null != priorPath && Files.isDirectory(Paths.get(priorPath)); }
@BeforeClass public static void createTribes() throws NodeValidationException { Settings baseSettings = Settings.builder() .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .build(); tribe1 = new TribeClientNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe1") .put("node.name", "tribe1_node") .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) .build(), Collections.singleton(MockTcpTransportPlugin.class)) .start(); tribe2 = new TribeClientNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe2") .put("node.name", "tribe2_node") .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) .build(), Collections.singleton(MockTcpTransportPlugin.class)) .start(); }
public void testPrimaryRelocation() throws Exception { Path dataPath = createTempDir(); Settings nodeSettings = nodeSettings(dataPath); String node1 = internalCluster().startNode(nodeSettings); String IDX = "test"; Settings idxSettings = Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) .build(); prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); assertTrue(gResp1.isExists()); assertTrue(gResp2.isExists()); assertThat(gResp1.getSource().get("foo"), equalTo("bar")); assertThat(gResp2.getSource().get("foo"), equalTo("bar")); // Node1 has the primary, now node2 has the replica String node2 = internalCluster().startNode(nodeSettings); ensureGreen(IDX); client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); flushAndRefresh(IDX); // now prevent primary from being allocated on node 1 move to node_3 String node3 = internalCluster().startNode(nodeSettings); Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build(); client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet(); ensureGreen(IDX); logger.info("--> performing query"); SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); assertHitCount(resp, 2); gResp1 = client().prepareGet(IDX, "doc", "1").get(); gResp2 = client().prepareGet(IDX, "doc", "2").get(); assertTrue(gResp1.isExists()); assertTrue(gResp2.toString(), gResp2.isExists()); assertThat(gResp1.getSource().get("foo"), equalTo("bar")); assertThat(gResp2.getSource().get("foo"), equalTo("bar")); client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get(); client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get(); gResp1 = client().prepareGet(IDX, "doc", "3").setPreference("_primary").get(); gResp2 = client().prepareGet(IDX, "doc", "4").setPreference("_primary").get(); assertTrue(gResp1.isExists()); assertTrue(gResp2.isExists()); assertThat(gResp1.getSource().get("foo"), equalTo("bar")); assertThat(gResp2.getSource().get("foo"), equalTo("bar")); }
public static Settings processSettings(Settings settings) { if (settings.get(TRIBE_NAME) != null) { // if its a node client started by this service as tribe, remove any tribe group setting // to avoid recursive configuration Settings.Builder sb = Settings.builder().put(settings); for (String s : settings.getAsMap().keySet()) { if (s.startsWith("tribe.") && !s.equals(TRIBE_NAME)) { sb.remove(s); } } return sb.build(); } Map<String, Settings> nodesSettings = settings.getGroups("tribe", true); if (nodesSettings.isEmpty()) { return settings; } // its a tribe configured node..., force settings Settings.Builder sb = Settings.builder().put(settings); sb.put(Node.NODE_CLIENT_SETTING.getKey(), true); // this node should just act as a node client sb.put( DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery sb.put( DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); // nothing is going to be discovered, since no master will be elected if (sb.get("cluster.name") == null) { sb.put( "cluster.name", "tribe_" + Strings .randomBase64UUID()); // make sure it won't join other tribe nodes in the same JVM } sb.put(TransportMasterNodeReadAction.FORCE_LOCAL_SETTING, true); return sb.build(); }
public void testFailLoadShardPathOnMultiState() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(Settings.builder().build())) { final String indexUUID = "0xDEADBEEF"; Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_INDEX_UUID, indexUUID) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); Settings settings = builder.build(); ShardId shardId = new ShardId("foo", indexUUID, 0); Path[] paths = env.availableShardPaths(shardId); assumeTrue("This test tests multi data.path but we only got one", paths.length > 1); int id = randomIntBetween(1, 10); ShardStateMetaData.FORMAT.write( new ShardStateMetaData(id, true, indexUUID, AllocationId.newInitializing()), paths); Exception e = expectThrows( IllegalStateException.class, () -> ShardPath.loadShardPath( logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings))); assertThat(e.getMessage(), containsString("more than one shard state found")); } }
@Override public Node stop() { if (!lifecycle.moveToStopped()) { return this; } ESLogger logger = Loggers.getLogger(Node.class, settings.get("name")); logger.info("{{}}[{}]: stopping ...", Version.full(), JvmInfo.jvmInfo().pid()); if (settings.getAsBoolean("http.enabled", true)) { injector.getInstance(HttpServer.class).stop(); } injector.getInstance(RoutingService.class).stop(); injector.getInstance(ClusterService.class).stop(); injector.getInstance(DiscoveryService.class).stop(); injector.getInstance(MonitorService.class).stop(); injector.getInstance(GatewayService.class).stop(); injector.getInstance(SearchService.class).stop(); injector.getInstance(RiversManager.class).stop(); injector.getInstance(IndicesClusterStateService.class).stop(); injector.getInstance(IndicesService.class).stop(); injector.getInstance(RestController.class).stop(); injector.getInstance(TransportService.class).stop(); injector.getInstance(JmxService.class).close(); for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) { injector.getInstance(plugin).stop(); } logger.info("{{}}[{}]: stopped", Version.full(), JvmInfo.jvmInfo().pid()); return this; }
protected TransportShardReplicationOperationAction( Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction) { super(settings, threadPool); this.transportService = transportService; this.clusterService = clusterService; this.indicesService = indicesService; this.shardStateAction = shardStateAction; this.transportAction = transportAction(); this.transportReplicaAction = transportReplicaAction(); this.executor = executor(); this.checkWriteConsistency = checkWriteConsistency(); transportService.registerHandler(transportAction, new OperationTransportHandler()); transportService.registerHandler( transportReplicaAction, new ReplicaOperationTransportHandler()); this.transportOptions = transportOptions(); this.defaultReplicationType = ReplicationType.fromString(settings.get("action.replication_type", "sync")); this.defaultWriteConsistencyLevel = WriteConsistencyLevel.fromString(settings.get("action.write_consistency", "quorum")); }
void assertRealtimeGetWorks(String indexName) { assertAcked( client() .admin() .indices() .prepareUpdateSettings(indexName) .setSettings(Settings.builder().put("refresh_interval", -1).build())); SearchRequestBuilder searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.matchAllQuery()); SearchHit hit = searchReq.get().getHits().getAt(0); String docId = hit.getId(); // foo is new, it is not a field in the generated index client().prepareUpdate(indexName, "doc", docId).setDoc("foo", "bar").get(); GetResponse getRsp = client().prepareGet(indexName, "doc", docId).get(); Map<String, Object> source = getRsp.getSourceAsMap(); assertThat(source, Matchers.hasKey("foo")); assertAcked( client() .admin() .indices() .prepareUpdateSettings(indexName) .setSettings( Settings.builder() .put("refresh_interval", EngineConfig.DEFAULT_REFRESH_INTERVAL) .build())); }
@SuppressForbidden(reason = "do not know what this method does") public static ESLogger getLogger(String loggerName, Settings settings, String... prefixes) { List<String> prefixesList = new ArrayList<>(); if (settings.getAsBoolean("logger.logHostAddress", false)) { final InetAddress addr = getHostAddress(); if (addr != null) { prefixesList.add(addr.getHostAddress()); } } if (settings.getAsBoolean("logger.logHostName", false)) { final InetAddress addr = getHostAddress(); if (addr != null) { prefixesList.add(addr.getHostName()); } } String name = settings.get("node.name"); if (name != null) { prefixesList.add(name); } if (prefixes != null && prefixes.length > 0) { prefixesList.addAll(asList(prefixes)); } return getLogger( getLoggerName(loggerName), prefixesList.toArray(new String[prefixesList.size()])); }
public void testRegisterHttpTransport() { Settings settings = Settings.builder().put(NetworkModule.HTTP_TYPE_SETTING.getKey(), "custom").build(); NetworkModule module = new NetworkModule( new NetworkService(settings), settings, false, new NamedWriteableRegistry()); module.registerHttpTransport("custom", FakeHttpTransport.class); assertBinding(module, HttpServerTransport.class, FakeHttpTransport.class); // check registration not allowed for transport only module = new NetworkModule( new NetworkService(settings), settings, true, new NamedWriteableRegistry()); try { module.registerHttpTransport("custom", FakeHttpTransport.class); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Cannot register http transport")); assertTrue(e.getMessage().contains("for transport client")); } // not added if http is disabled settings = Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false).build(); module = new NetworkModule( new NetworkService(settings), settings, false, new NamedWriteableRegistry()); assertNotBound(module, HttpServerTransport.class); }
public void testThatDifferentVersionsCanBeLoaded() { PreBuiltTokenFilterFactoryFactory factory = new PreBuiltTokenFilterFactoryFactory( PreBuiltTokenFilters.STOP.getTokenFilterFactory(Version.CURRENT)); TokenFilterFactory former090TokenizerFactory = factory.create( "stop", Settings.settingsBuilder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1) .build()); TokenFilterFactory former090TokenizerFactoryCopy = factory.create( "stop", Settings.settingsBuilder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2) .build()); TokenFilterFactory currentTokenizerFactory = factory.create( "stop", Settings.settingsBuilder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .build()); assertThat(currentTokenizerFactory, is(not(former090TokenizerFactory))); assertThat(former090TokenizerFactory, is(former090TokenizerFactoryCopy)); }
List<String> getIndexSettingsValidationErrors(Settings settings) { String customPath = IndexMetaData.INDEX_DATA_PATH_SETTING.get(settings); List<String> validationErrors = new ArrayList<>(); if (Strings.isEmpty(customPath) == false && env.sharedDataFile() == null) { validationErrors.add("path.shared_data must be set in order to use custom data paths"); } else if (Strings.isEmpty(customPath) == false) { Path resolvedPath = PathUtils.get(new Path[] {env.sharedDataFile()}, customPath); if (resolvedPath == null) { validationErrors.add( "custom path [" + customPath + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]"); } } // norelease - this can be removed? Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null); if (number_of_primaries != null && number_of_primaries <= 0) { validationErrors.add("index must have 1 or more primary shards"); } if (number_of_replicas != null && number_of_replicas < 0) { validationErrors.add("index must have 0 or more replica shards"); } return validationErrors; }
/** * This test verifies that the test configuration is set up in a manner that does not make the * test {@link #testRepositoryInRemoteRegion()} pointless. */ @Test(expected = RepositoryVerificationException.class) public void assertRepositoryInRemoteRegionIsRemote() { Client client = client(); Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.remote-bucket."); logger.info( "--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath); client .admin() .cluster() .preparePutRepository("test-repo") .setType("s3") .setSettings( Settings.settingsBuilder() .put("base_path", basePath) .put("bucket", bucketSettings.get("bucket")) // Below setting intentionally omitted to assert bucket is not available in default // region. // .put("region", privateBucketSettings.get("region")) ) .get(); fail("repository verification should have raise an exception!"); }
public void testLoadShardPath() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(Settings.builder().build())) { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_INDEX_UUID, "0xDEADBEEF") .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); Settings settings = builder.build(); ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); ShardStateMetaData.FORMAT.write( new ShardStateMetaData(2, true, "0xDEADBEEF", AllocationId.newInitializing()), path); ShardPath shardPath = ShardPath.loadShardPath( logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); assertEquals(path, shardPath.getDataPath()); assertEquals("0xDEADBEEF", shardPath.getShardId().getIndex().getUUID()); assertEquals("foo", shardPath.getShardId().getIndexName()); assertEquals(path.resolve("translog"), shardPath.resolveTranslog()); assertEquals(path.resolve("index"), shardPath.resolveIndex()); } }
public void testKeepWordsPathSettings() { Settings settings = Settings.settingsBuilder() .put("path.home", createTempDir().toString()) .put("index.analysis.filter.non_broken_keep_filter.type", "keep") .put( "index.analysis.filter.non_broken_keep_filter.keep_words_path", "does/not/exists.txt") .build(); try { // test our none existing setup is picked up AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); fail("expected an exception due to non existent keep_words_path"); } catch (IllegalArgumentException e) { } catch (IOException e) { fail("expected IAE"); } settings = Settings.settingsBuilder() .put(settings) .put("index.analysis.filter.non_broken_keep_filter.keep_words", new String[] {"test"}) .build(); try { // test our none existing setup is picked up AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); fail( "expected an exception indicating that you can't use [keep_words_path] with [keep_words] "); } catch (IllegalArgumentException e) { } catch (IOException e) { fail("expected IAE"); } }
/** * Fetches a list of words from the specified settings file. The list should either be available * at the key specified by settingsPrefix or in a file specified by settingsPrefix + _path. * * @throws IllegalArgumentException If the word list cannot be found at either key. */ public static List<String> getWordList(Environment env, Settings settings, String settingPrefix) { String wordListPath = settings.get(settingPrefix + "_path", null); if (wordListPath == null) { String[] explicitWordList = settings.getAsArray(settingPrefix, null); if (explicitWordList == null) { return null; } else { return Arrays.asList(explicitWordList); } } final Path wordListFile = env.configFile().resolve(wordListPath); try (BufferedReader reader = FileSystemUtils.newBufferedReader(wordListFile.toUri().toURL(), StandardCharsets.UTF_8)) { return loadWordList(reader, "#"); } catch (IOException ioe) { String message = String.format( Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, ioe.getMessage()); throw new IllegalArgumentException(message); } }
/** * Creates a new TranslogConfig instance * * @param shardId the shard ID this translog belongs to * @param translogPath the path to use for the transaction log files * @param indexSettings the index settings used to set internal variables * @param durabilty the default durability setting for the translog * @param bigArrays a bigArrays instance used for temporarily allocating write operations * @param threadPool a {@link ThreadPool} to schedule async sync durability */ public TranslogConfig( ShardId shardId, Path translogPath, Settings indexSettings, Translog.Durabilty durabilty, BigArrays bigArrays, @Nullable ThreadPool threadPool) { this.indexSettings = indexSettings; this.shardId = shardId; this.translogPath = translogPath; this.durabilty = durabilty; this.threadPool = threadPool; this.bigArrays = bigArrays; this.type = TranslogWriter.Type.fromString( indexSettings.get(INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.BUFFERED.name())); this.bufferSize = (int) indexSettings .getAsBytesSize( INDEX_TRANSLOG_BUFFER_SIZE, IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER) .bytes(); // Not really interesting, updated by IndexingMemoryController... syncInterval = indexSettings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5)); if (syncInterval.millis() > 0 && threadPool != null) { syncOnEachOperation = false; } else if (syncInterval.millis() == 0) { syncOnEachOperation = true; } else { syncOnEachOperation = false; } }
@Test public void testDefaultRecoverAfterTime() throws IOException { // check that the default is not set GatewayService service = createService(Settings.builder()); assertNull(service.recoverAfterTime()); // ensure default is set when setting expected_nodes service = createService(Settings.builder().put("gateway.expected_nodes", 1)); assertThat( service.recoverAfterTime(), Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET)); // ensure default is set when setting expected_data_nodes service = createService(Settings.builder().put("gateway.expected_data_nodes", 1)); assertThat( service.recoverAfterTime(), Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET)); // ensure default is set when setting expected_master_nodes service = createService(Settings.builder().put("gateway.expected_master_nodes", 1)); assertThat( service.recoverAfterTime(), Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET)); // ensure settings override default TimeValue timeValue = TimeValue.timeValueHours(3); // ensure default is set when setting expected_nodes service = createService( Settings.builder() .put("gateway.expected_nodes", 1) .put("gateway.recover_after_time", timeValue.toString())); assertThat(service.recoverAfterTime().millis(), Matchers.equalTo(timeValue.millis())); }
public MetaDataDiff(StreamInput in) throws IOException { clusterUUID = in.readString(); version = in.readLong(); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); indices = DiffableUtils.readImmutableOpenMapDiff( in, DiffableUtils.getStringKeySerializer(), IndexMetaData.PROTO); templates = DiffableUtils.readImmutableOpenMapDiff( in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData.PROTO); customs = DiffableUtils.readImmutableOpenMapDiff( in, DiffableUtils.getStringKeySerializer(), new DiffableUtils.DiffableValueSerializer<String, Custom>() { @Override public Custom read(StreamInput in, String key) throws IOException { return lookupPrototypeSafe(key).readFrom(in); } @Override public Diff<Custom> readDiff(StreamInput in, String key) throws IOException { return lookupPrototypeSafe(key).readDiffFrom(in); } }); }
@BeforeClass public static void createTribes() { Settings baseSettings = Settings.builder() .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true) .put("http.enabled", false) .put("node.mode", NODE_MODE) .put("path.home", createTempDir()) .build(); tribe1 = NodeBuilder.nodeBuilder() .settings( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe1") .put("node.name", "tribe1_node")) .node(); tribe2 = NodeBuilder.nodeBuilder() .settings( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe2") .put("node.name", "tribe2_node")) .node(); }
public void testBuiltInAnalyzersAreCached() throws IOException { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); IndexAnalyzers indexAnalyzers = new AnalysisRegistry( new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()) .build(idxSettings); IndexAnalyzers otherIndexAnalyzers = new AnalysisRegistry( new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()) .build(idxSettings); final int numIters = randomIntBetween(5, 20); for (int i = 0; i < numIters; i++) { PreBuiltAnalyzers preBuiltAnalyzers = RandomPicks.randomFrom(random(), PreBuiltAnalyzers.values()); assertSame( indexAnalyzers.get(preBuiltAnalyzers.name()), otherIndexAnalyzers.get(preBuiltAnalyzers.name())); } }
@Inject public DiskThresholdDecider( Settings settings, NodeSettingsService nodeSettingsService, ClusterInfoService infoService, Client client) { super(settings); String lowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "85%"); String highWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "90%"); if (!validWatermarkSetting(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) { throw new ElasticsearchParseException("unable to parse low watermark [{}]", lowWatermark); } if (!validWatermarkSetting(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) { throw new ElasticsearchParseException("unable to parse high watermark [{}]", highWatermark); } // Watermark is expressed in terms of used data, but we need "free" data watermark this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark); this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark); this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK); this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK); this.includeRelocations = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true); this.rerouteInterval = settings.getAsTime( CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, TimeValue.timeValueSeconds(60)); this.enabled = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true); nodeSettingsService.addListener(new ApplySettings()); infoService.addListener(new DiskListener(client)); }
public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(Settings.builder().build())) { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_INDEX_UUID, "foobar") .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); Settings settings = builder.build(); ShardId shardId = new ShardId("foo", "foobar", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); int id = randomIntBetween(1, 10); ShardStateMetaData.FORMAT.write( new ShardStateMetaData(id, true, "0xDEADBEEF", AllocationId.newInitializing()), path); Exception e = expectThrows( IllegalStateException.class, () -> ShardPath.loadShardPath( logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings))); assertThat(e.getMessage(), containsString("expected: foobar on shard path")); } }
@Inject public LocalGatewayMetaState( Settings settings, ThreadPool threadPool, NodeEnvironment nodeEnv, TransportNodesListGatewayMetaState nodesListGatewayMetaState, LocalAllocateDangledIndices allocateDangledIndices, NodeIndexDeletedAction nodeIndexDeletedAction) throws Exception { super(settings); this.nodeEnv = nodeEnv; this.threadPool = threadPool; this.format = XContentType.fromRestContentType(settings.get("format", "smile")); this.allocateDangledIndices = allocateDangledIndices; this.nodeIndexDeletedAction = nodeIndexDeletedAction; nodesListGatewayMetaState.init(this); if (this.format == XContentType.SMILE) { Map<String, String> params = Maps.newHashMap(); params.put("binary", "true"); formatParams = new ToXContent.MapParams(params); Map<String, String> globalOnlyParams = Maps.newHashMap(); globalOnlyParams.put("binary", "true"); globalOnlyParams.put(MetaData.PERSISTENT_ONLY_PARAM, "true"); globalOnlyParams.put(MetaData.GLOBAL_ONLY_PARAM, "true"); globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams); } else { formatParams = ToXContent.EMPTY_PARAMS; Map<String, String> globalOnlyParams = Maps.newHashMap(); globalOnlyParams.put(MetaData.PERSISTENT_ONLY_PARAM, "true"); globalOnlyParams.put(MetaData.GLOBAL_ONLY_PARAM, "true"); globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams); } this.autoImportDangled = AutoImportDangledState.fromString( settings.get( "gateway.local.auto_import_dangled", AutoImportDangledState.YES.toString())); this.danglingTimeout = settings.getAsTime("gateway.local.dangling_timeout", TimeValue.timeValueHours(2)); logger.debug( "using gateway.local.auto_import_dangled [{}], with gateway.local.dangling_timeout [{}]", this.autoImportDangled, this.danglingTimeout); if (DiscoveryNode.masterNode(settings)) { try { pre019Upgrade(); long start = System.currentTimeMillis(); loadState(); logger.debug( "took {} to load state", TimeValue.timeValueMillis(System.currentTimeMillis() - start)); } catch (Exception e) { logger.error("failed to read local state, exiting...", e); throw e; } } }
MergeSchedulerConfig(Settings settings) { maxThreadCount = settings.getAsInt( MAX_THREAD_COUNT, Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(settings) / 2))); maxMergeCount = settings.getAsInt(MAX_MERGE_COUNT, maxThreadCount + 5); this.autoThrottle = settings.getAsBoolean(AUTO_THROTTLE, true); }
public IcuTransformTokenFilterFactory( IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.id = settings.get("id", "Null"); String s = settings.get("dir", "forward"); this.dir = "forward".equals(s) ? Transliterator.FORWARD : Transliterator.REVERSE; this.transliterator = Transliterator.getInstance(id, dir); }
@Inject public SearchService( Settings settings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesQueryCache indicesQueryCache) { super(settings); this.threadPool = threadPool; this.clusterService = clusterService; this.indicesService = indicesService; indicesService .indicesLifecycle() .addListener( new IndicesLifecycle.Listener() { @Override public void afterIndexDeleted(Index index, @IndexSettings Settings indexSettings) { // once an index is closed we can just clean up all the pending search context // information // to release memory and let references to the filesystem go etc. freeAllContextForIndex(index); } }); this.indicesWarmer = indicesWarmer; this.scriptService = scriptService; this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays; this.dfsPhase = dfsPhase; this.queryPhase = queryPhase; this.fetchPhase = fetchPhase; this.indicesQueryCache = indicesQueryCache; TimeValue keepAliveInterval = settings.getAsTime(KEEPALIVE_INTERVAL_KEY, timeValueMinutes(1)); // we can have 5 minutes here, since we make sure to clean with search requests and when // shard/index closes this.defaultKeepAlive = settings.getAsTime(DEFAULT_KEEPALIVE_KEY, timeValueMinutes(5)).millis(); Map<String, SearchParseElement> elementParsers = new HashMap<>(); elementParsers.putAll(dfsPhase.parseElements()); elementParsers.putAll(queryPhase.parseElements()); elementParsers.putAll(fetchPhase.parseElements()); elementParsers.put("stats", new StatsGroupsParseElement()); this.elementParsers = ImmutableMap.copyOf(elementParsers); this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval); this.indicesWarmer.addListener(new NormsWarmer()); this.indicesWarmer.addListener(new FieldDataWarmer()); this.indicesWarmer.addListener(new SearchWarmer()); }