@Inject public SearchService( Settings settings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesQueryCache indicesQueryCache) { super(settings); this.threadPool = threadPool; this.clusterService = clusterService; this.indicesService = indicesService; indicesService .indicesLifecycle() .addListener( new IndicesLifecycle.Listener() { @Override public void afterIndexDeleted(Index index, @IndexSettings Settings indexSettings) { // once an index is closed we can just clean up all the pending search context // information // to release memory and let references to the filesystem go etc. freeAllContextForIndex(index); } }); this.indicesWarmer = indicesWarmer; this.scriptService = scriptService; this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays; this.dfsPhase = dfsPhase; this.queryPhase = queryPhase; this.fetchPhase = fetchPhase; this.indicesQueryCache = indicesQueryCache; TimeValue keepAliveInterval = settings.getAsTime(KEEPALIVE_INTERVAL_KEY, timeValueMinutes(1)); // we can have 5 minutes here, since we make sure to clean with search requests and when // shard/index closes this.defaultKeepAlive = settings.getAsTime(DEFAULT_KEEPALIVE_KEY, timeValueMinutes(5)).millis(); Map<String, SearchParseElement> elementParsers = new HashMap<>(); elementParsers.putAll(dfsPhase.parseElements()); elementParsers.putAll(queryPhase.parseElements()); elementParsers.putAll(fetchPhase.parseElements()); elementParsers.put("stats", new StatsGroupsParseElement()); this.elementParsers = ImmutableMap.copyOf(elementParsers); this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval); this.indicesWarmer.addListener(new NormsWarmer()); this.indicesWarmer.addListener(new FieldDataWarmer()); this.indicesWarmer.addListener(new SearchWarmer()); }
@Inject public DiskThresholdDecider( Settings settings, NodeSettingsService nodeSettingsService, ClusterInfoService infoService, Client client) { super(settings); String lowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "85%"); String highWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "90%"); if (!validWatermarkSetting(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) { throw new ElasticsearchParseException("unable to parse low watermark [{}]", lowWatermark); } if (!validWatermarkSetting(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) { throw new ElasticsearchParseException("unable to parse high watermark [{}]", highWatermark); } // Watermark is expressed in terms of used data, but we need "free" data watermark this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark); this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark); this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK); this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK); this.includeRelocations = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true); this.rerouteInterval = settings.getAsTime( CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, TimeValue.timeValueSeconds(60)); this.enabled = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true); nodeSettingsService.addListener(new ApplySettings()); infoService.addListener(new DiskListener(client)); }
/** * Creates a new TranslogConfig instance * * @param shardId the shard ID this translog belongs to * @param translogPath the path to use for the transaction log files * @param indexSettings the index settings used to set internal variables * @param durabilty the default durability setting for the translog * @param bigArrays a bigArrays instance used for temporarily allocating write operations * @param threadPool a {@link ThreadPool} to schedule async sync durability */ public TranslogConfig( ShardId shardId, Path translogPath, Settings indexSettings, Translog.Durabilty durabilty, BigArrays bigArrays, @Nullable ThreadPool threadPool) { this.indexSettings = indexSettings; this.shardId = shardId; this.translogPath = translogPath; this.durabilty = durabilty; this.threadPool = threadPool; this.bigArrays = bigArrays; this.type = TranslogWriter.Type.fromString( indexSettings.get(INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.BUFFERED.name())); this.bufferSize = (int) indexSettings .getAsBytesSize( INDEX_TRANSLOG_BUFFER_SIZE, IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER) .bytes(); // Not really interesting, updated by IndexingMemoryController... syncInterval = indexSettings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5)); if (syncInterval.millis() > 0 && threadPool != null) { syncOnEachOperation = false; } else if (syncInterval.millis() == 0) { syncOnEachOperation = true; } else { syncOnEachOperation = false; } }
@Inject public LocalGatewayMetaState( Settings settings, ThreadPool threadPool, NodeEnvironment nodeEnv, TransportNodesListGatewayMetaState nodesListGatewayMetaState, LocalAllocateDangledIndices allocateDangledIndices, NodeIndexDeletedAction nodeIndexDeletedAction) throws Exception { super(settings); this.nodeEnv = nodeEnv; this.threadPool = threadPool; this.format = XContentType.fromRestContentType(settings.get("format", "smile")); this.allocateDangledIndices = allocateDangledIndices; this.nodeIndexDeletedAction = nodeIndexDeletedAction; nodesListGatewayMetaState.init(this); if (this.format == XContentType.SMILE) { Map<String, String> params = Maps.newHashMap(); params.put("binary", "true"); formatParams = new ToXContent.MapParams(params); Map<String, String> globalOnlyParams = Maps.newHashMap(); globalOnlyParams.put("binary", "true"); globalOnlyParams.put(MetaData.PERSISTENT_ONLY_PARAM, "true"); globalOnlyParams.put(MetaData.GLOBAL_ONLY_PARAM, "true"); globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams); } else { formatParams = ToXContent.EMPTY_PARAMS; Map<String, String> globalOnlyParams = Maps.newHashMap(); globalOnlyParams.put(MetaData.PERSISTENT_ONLY_PARAM, "true"); globalOnlyParams.put(MetaData.GLOBAL_ONLY_PARAM, "true"); globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams); } this.autoImportDangled = AutoImportDangledState.fromString( settings.get( "gateway.local.auto_import_dangled", AutoImportDangledState.YES.toString())); this.danglingTimeout = settings.getAsTime("gateway.local.dangling_timeout", TimeValue.timeValueHours(2)); logger.debug( "using gateway.local.auto_import_dangled [{}], with gateway.local.dangling_timeout [{}]", this.autoImportDangled, this.danglingTimeout); if (DiscoveryNode.masterNode(settings)) { try { pre019Upgrade(); long start = System.currentTimeMillis(); loadState(); logger.debug( "took {} to load state", TimeValue.timeValueMillis(System.currentTimeMillis() - start)); } catch (Exception e) { logger.error("failed to read local state, exiting...", e); throw e; } } }
@Inject public IndicesRequestCache( Settings settings, ClusterService clusterService, ThreadPool threadPool) { super(settings); this.clusterService = clusterService; this.threadPool = threadPool; this.cleanInterval = settings.getAsTime(INDICES_CACHE_REQUEST_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60)); String size = settings.get(INDICES_CACHE_QUERY_SIZE); if (size == null) { size = settings.get(DEPRECATED_INDICES_CACHE_QUERY_SIZE); if (size != null) { deprecationLogger.deprecated( "The [" + DEPRECATED_INDICES_CACHE_QUERY_SIZE + "] settings is now deprecated, use [" + INDICES_CACHE_QUERY_SIZE + "] instead"); } } if (size == null) { // this cache can be very small yet still be very effective size = "1%"; } this.size = size; this.expire = settings.getAsTime(INDICES_CACHE_QUERY_EXPIRE, null); // defaults to 4, but this is a busy map for all indices, increase it a bit by default this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, 16); if (concurrencyLevel <= 0) { throw new IllegalArgumentException( "concurrency_level must be > 0 but was: " + concurrencyLevel); } buildCache(); this.reaper = new Reaper(); threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper); }
@Inject public RoutingService( Settings settings, ThreadPool threadPool, ClusterService clusterService, AllocationService allocationService) { super(settings); this.threadPool = threadPool; this.clusterService = clusterService; this.allocationService = allocationService; this.schedule = settings.getAsTime("cluster.routing.schedule", timeValueSeconds(10)); clusterService.addFirst(this); }
/** Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */ public EngineConfig( ShardId shardId, boolean optimizeAutoGenerateId, ThreadPool threadPool, ShardIndexingService indexingService, IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, Translog translog, MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener) { this.shardId = shardId; this.optimizeAutoGenerateId = optimizeAutoGenerateId; this.threadPool = threadPool; this.indexingService = indexingService; this.indexSettingsService = indexSettingsService; this.warmer = warmer; this.store = store; this.deletionPolicy = deletionPolicy; this.translog = translog; this.mergePolicyProvider = mergePolicyProvider; this.mergeScheduler = mergeScheduler; this.analyzer = analyzer; this.similarity = similarity; this.codecService = codecService; this.failedEngineListener = failedEngineListener; Settings indexSettings = indexSettingsService.getSettings(); this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush); this.indexConcurrency = indexSettings.getAsInt( EngineConfig.INDEX_CONCURRENCY_SETTING, Math.max( IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65))); codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME); indexingBufferSize = indexSettings.getAsBytesSize(INDEX_BUFFER_SIZE_SETTING, DEFAUTL_INDEX_BUFFER_SIZE); failEngineOnCorruption = indexSettings.getAsBoolean(INDEX_FAIL_ON_CORRUPTION_SETTING, true); failOnMergeFailure = indexSettings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE_SETTING, true); gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis(); }
@Override public void onRefreshSettings(Settings settings) { TimeValue snapshotInterval = settings.getAsTime( INDEX_GATEWAY_SNAPSHOT_INTERVAL, IndexShardGatewayService.this.snapshotInterval); if (!snapshotInterval.equals(IndexShardGatewayService.this.snapshotInterval)) { logger.info( "updating snapshot_interval from [{}] to [{}]", IndexShardGatewayService.this.snapshotInterval, snapshotInterval); IndexShardGatewayService.this.snapshotInterval = snapshotInterval; if (snapshotScheduleFuture != null) { snapshotScheduleFuture.cancel(false); snapshotScheduleFuture = null; } scheduleSnapshotIfNeeded(); } }
@Inject public GatewayAllocator( Settings settings, TransportNodesListGatewayStartedShards listGatewayStartedShards, TransportNodesListShardStoreMetaData listShardStoreMetaData) { super(settings); this.listGatewayStartedShards = listGatewayStartedShards; this.listShardStoreMetaData = listShardStoreMetaData; this.listTimeout = componentSettings.getAsTime( "list_timeout", settings.getAsTime("gateway.local.list_timeout", TimeValue.timeValueSeconds(30))); this.initialShards = componentSettings.get( "initial_shards", settings.get("gateway.local.initial_shards", "quorum")); logger.debug("using initial_shards [{}], list_timeout [{}]", initialShards, listTimeout); }
protected TransportReplicationAction( Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request, Supplier<ReplicaRequest> replicaRequest, String executor) { super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver); this.transportService = transportService; this.clusterService = clusterService; this.indicesService = indicesService; this.shardStateAction = shardStateAction; this.mappingUpdatedAction = mappingUpdatedAction; this.transportReplicaAction = actionName + "[r]"; this.executor = executor; this.checkWriteConsistency = checkWriteConsistency(); transportService.registerRequestHandler( actionName, request, ThreadPool.Names.SAME, new OperationTransportHandler()); // we must never reject on because of thread pool capacity on replicas transportService.registerRequestHandler( transportReplicaAction, replicaRequest, executor, true, new ReplicaOperationTransportHandler()); this.transportOptions = transportOptions(); this.defaultWriteConsistencyLevel = WriteConsistencyLevel.fromString(settings.get("action.write_consistency", "quorum")); // TODO: set a default timeout shardFailedTimeout = settings.getAsTime(SHARD_FAILURE_TIMEOUT, null); }
@Override public void onRefreshSettings(Settings settings) { String newLowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, null); String newHighWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, null); Boolean newRelocationsSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, null); Boolean newEnableSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null); TimeValue newRerouteInterval = settings.getAsTime(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, null); if (newEnableSetting != null) { logger.info( "updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, DiskThresholdDecider.this.enabled, newEnableSetting); DiskThresholdDecider.this.enabled = newEnableSetting; } if (newRelocationsSetting != null) { logger.info( "updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, DiskThresholdDecider.this.includeRelocations, newRelocationsSetting); DiskThresholdDecider.this.includeRelocations = newRelocationsSetting; } if (newLowWatermark != null) { if (!validWatermarkSetting( newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) { throw new ElasticsearchParseException( "unable to parse low watermark [{}]", newLowWatermark); } logger.info( "updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, newLowWatermark); DiskThresholdDecider.this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(newLowWatermark); DiskThresholdDecider.this.freeBytesThresholdLow = thresholdBytesFromWatermark( newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK); } if (newHighWatermark != null) { if (!validWatermarkSetting( newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) { throw new ElasticsearchParseException( "unable to parse high watermark [{}]", newHighWatermark); } logger.info( "updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, newHighWatermark); DiskThresholdDecider.this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(newHighWatermark); DiskThresholdDecider.this.freeBytesThresholdHigh = thresholdBytesFromWatermark( newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK); } if (newRerouteInterval != null) { logger.info( "updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, newRerouteInterval); DiskThresholdDecider.this.rerouteInterval = newRerouteInterval; } }
@Override public final void onRefreshSettings(Settings settings) { boolean change = false; long gcDeletesInMillis = settings .getAsTime( EngineConfig.INDEX_GC_DELETES_SETTING, TimeValue.timeValueMillis(config.getGcDeletesInMillis())) .millis(); if (gcDeletesInMillis != config.getGcDeletesInMillis()) { logger.info( "updating {} from [{}] to [{}]", EngineConfig.INDEX_GC_DELETES_SETTING, TimeValue.timeValueMillis(config.getGcDeletesInMillis()), TimeValue.timeValueMillis(gcDeletesInMillis)); config.gcDeletesInMillis = gcDeletesInMillis; change = true; } final boolean compoundOnFlush = settings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, config.isCompoundOnFlush()); if (compoundOnFlush != config.isCompoundOnFlush()) { logger.info( "updating {} from [{}] to [{}]", EngineConfig.INDEX_COMPOUND_ON_FLUSH, config.isCompoundOnFlush(), compoundOnFlush); config.compoundOnFlush = compoundOnFlush; change = true; } final boolean failEngineOnCorruption = settings.getAsBoolean( EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, config.isFailEngineOnCorruption()); if (failEngineOnCorruption != config.isFailEngineOnCorruption()) { logger.info( "updating {} from [{}] to [{}]", EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, config.isFailEngineOnCorruption(), failEngineOnCorruption); config.failEngineOnCorruption = failEngineOnCorruption; change = true; } int indexConcurrency = settings.getAsInt(EngineConfig.INDEX_CONCURRENCY_SETTING, config.getIndexConcurrency()); if (indexConcurrency != config.getIndexConcurrency()) { logger.info( "updating index.index_concurrency from [{}] to [{}]", config.getIndexConcurrency(), indexConcurrency); config.setIndexConcurrency(indexConcurrency); // we have to flush in this case, since it only applies on a new index writer change = true; } final String codecName = settings.get(EngineConfig.INDEX_CODEC_SETTING, config.codecName); if (!codecName.equals(config.codecName)) { logger.info( "updating {} from [{}] to [{}]", EngineConfig.INDEX_CODEC_SETTING, config.codecName, codecName); config.codecName = codecName; // we want to flush in this case, so the new codec will be reflected right away... change = true; } final boolean failOnMergeFailure = settings.getAsBoolean( EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING, config.isFailOnMergeFailure()); if (failOnMergeFailure != config.isFailOnMergeFailure()) { logger.info( "updating {} from [{}] to [{}]", EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING, config.isFailOnMergeFailure(), failOnMergeFailure); config.failOnMergeFailure = failOnMergeFailure; change = true; } if (change) { onChange(); } }
@Inject public ZenDiscovery( Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, NodeSettingsService nodeSettingsService, DiscoveryNodeService discoveryNodeService, ZenPingService pingService, Version version) { super(settings); this.clusterName = clusterName; this.threadPool = threadPool; this.clusterService = clusterService; this.transportService = transportService; this.discoveryNodeService = discoveryNodeService; this.pingService = pingService; this.version = version; // also support direct discovery.zen settings, for cases when it gets extended this.pingTimeout = settings.getAsTime( "discovery.zen.ping.timeout", settings.getAsTime( "discovery.zen.ping_timeout", componentSettings.getAsTime( "ping_timeout", componentSettings.getAsTime("initial_ping_timeout", timeValueSeconds(3))))); this.sendLeaveRequest = componentSettings.getAsBoolean("send_leave_request", true); this.masterElectionFilterClientNodes = settings.getAsBoolean("discovery.zen.master_election.filter_client", true); this.masterElectionFilterDataNodes = settings.getAsBoolean("discovery.zen.master_election.filter_data", false); logger.debug( "using ping.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", pingTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes); this.electMaster = new ElectMasterService(settings); nodeSettingsService.addListener(new ApplySettings()); this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, this); this.masterFD.addListener(new MasterNodeFailureListener()); this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService); this.nodesFD.addListener(new NodeFailureListener()); this.publishClusterState = new PublishClusterStateAction( settings, transportService, this, new NewClusterStateListener()); this.pingService.setNodesProvider(this); this.membership = new MembershipAction(settings, transportService, this, new MembershipListener()); transportService.registerHandler( RejoinClusterRequestHandler.ACTION, new RejoinClusterRequestHandler()); }
private ExecutorHolder rebuild( String name, ExecutorHolder previousExecutorHolder, @Nullable Settings settings, Settings defaultSettings) { if (Names.SAME.equals(name)) { // Don't allow to change the "same" thread executor return previousExecutorHolder; } if (settings == null) { settings = ImmutableSettings.Builder.EMPTY_SETTINGS; } Info previousInfo = previousExecutorHolder != null ? previousExecutorHolder.info : null; String type = settings.get( "type", previousInfo != null ? previousInfo.getType() : defaultSettings.get("type")); ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, name); if ("same".equals(type)) { if (previousExecutorHolder != null) { logger.debug("updating thread_pool [{}], type [{}]", name, type); } else { logger.debug("creating thread_pool [{}], type [{}]", name, type); } return new ExecutorHolder(MoreExecutors.sameThreadExecutor(), new Info(name, type)); } else if ("cached".equals(type)) { TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5)); if (previousExecutorHolder != null) { if ("cached".equals(previousInfo.getType())) { TimeValue updatedKeepAlive = settings.getAsTime("keep_alive", previousInfo.getKeepAlive()); if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) { logger.debug( "updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, updatedKeepAlive); ((EsThreadPoolExecutor) previousExecutorHolder.executor) .setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS); return new ExecutorHolder( previousExecutorHolder.executor, new Info(name, type, -1, -1, updatedKeepAlive, null)); } return previousExecutorHolder; } if (previousInfo.getKeepAlive() != null) { defaultKeepAlive = previousInfo.getKeepAlive(); } } TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive); if (previousExecutorHolder != null) { logger.debug( "updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive); } else { logger.debug( "creating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive); } Executor executor = EsExecutors.newCached(keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory); return new ExecutorHolder(executor, new Info(name, type, -1, -1, keepAlive, null)); } else if ("fixed".equals(type)) { int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings)); SizeValue defaultQueueSize = defaultSettings.getAsSize("queue", defaultSettings.getAsSize("queue_size", null)); if (previousExecutorHolder != null) { if ("fixed".equals(previousInfo.getType())) { SizeValue updatedQueueSize = settings.getAsSize( "capacity", settings.getAsSize( "queue", settings.getAsSize("queue_size", previousInfo.getQueueSize()))); if (Objects.equal(previousInfo.getQueueSize(), updatedQueueSize)) { int updatedSize = settings.getAsInt("size", previousInfo.getMax()); if (previousInfo.getMax() != updatedSize) { logger.debug( "updating thread_pool [{}], type [{}], size [{}], queue_size [{}]", name, type, updatedSize, updatedQueueSize); ((EsThreadPoolExecutor) previousExecutorHolder.executor).setCorePoolSize(updatedSize); ((EsThreadPoolExecutor) previousExecutorHolder.executor) .setMaximumPoolSize(updatedSize); return new ExecutorHolder( previousExecutorHolder.executor, new Info(name, type, updatedSize, updatedSize, null, updatedQueueSize)); } return previousExecutorHolder; } } if (previousInfo.getMax() >= 0) { defaultSize = previousInfo.getMax(); } defaultQueueSize = previousInfo.getQueueSize(); } int size = settings.getAsInt("size", defaultSize); SizeValue queueSize = settings.getAsSize( "capacity", settings.getAsSize("queue", settings.getAsSize("queue_size", defaultQueueSize))); logger.debug( "creating thread_pool [{}], type [{}], size [{}], queue_size [{}]", name, type, size, queueSize); Executor executor = EsExecutors.newFixed( size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory); return new ExecutorHolder(executor, new Info(name, type, size, size, null, queueSize)); } else if ("scaling".equals(type)) { TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5)); int defaultMin = defaultSettings.getAsInt("min", 1); int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings)); if (previousExecutorHolder != null) { if ("scaling".equals(previousInfo.getType())) { TimeValue updatedKeepAlive = settings.getAsTime("keep_alive", previousInfo.getKeepAlive()); int updatedMin = settings.getAsInt("min", previousInfo.getMin()); int updatedSize = settings.getAsInt("max", settings.getAsInt("size", previousInfo.getMax())); if (!previousInfo.getKeepAlive().equals(updatedKeepAlive) || previousInfo.getMin() != updatedMin || previousInfo.getMax() != updatedSize) { logger.debug( "updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, updatedKeepAlive); if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) { ((EsThreadPoolExecutor) previousExecutorHolder.executor) .setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS); } if (previousInfo.getMin() != updatedMin) { ((EsThreadPoolExecutor) previousExecutorHolder.executor).setCorePoolSize(updatedMin); } if (previousInfo.getMax() != updatedSize) { ((EsThreadPoolExecutor) previousExecutorHolder.executor) .setMaximumPoolSize(updatedSize); } return new ExecutorHolder( previousExecutorHolder.executor, new Info(name, type, updatedMin, updatedSize, updatedKeepAlive, null)); } return previousExecutorHolder; } if (previousInfo.getKeepAlive() != null) { defaultKeepAlive = previousInfo.getKeepAlive(); } if (previousInfo.getMin() >= 0) { defaultMin = previousInfo.getMin(); } if (previousInfo.getMax() >= 0) { defaultSize = previousInfo.getMax(); } } TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive); int min = settings.getAsInt("min", defaultMin); int size = settings.getAsInt("max", settings.getAsInt("size", defaultSize)); if (previousExecutorHolder != null) { logger.debug( "updating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive); } else { logger.debug( "creating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive); } Executor executor = EsExecutors.newScaling( min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory); return new ExecutorHolder(executor, new Info(name, type, min, size, keepAlive, null)); } throw new ElasticSearchIllegalArgumentException( "No type found [" + type + "], for [" + name + "]"); }