/**
   * Creates a new TranslogConfig instance
   *
   * @param shardId the shard ID this translog belongs to
   * @param translogPath the path to use for the transaction log files
   * @param indexSettings the index settings used to set internal variables
   * @param durabilty the default durability setting for the translog
   * @param bigArrays a bigArrays instance used for temporarily allocating write operations
   * @param threadPool a {@link ThreadPool} to schedule async sync durability
   */
  public TranslogConfig(
      ShardId shardId,
      Path translogPath,
      Settings indexSettings,
      Translog.Durabilty durabilty,
      BigArrays bigArrays,
      @Nullable ThreadPool threadPool) {
    this.indexSettings = indexSettings;
    this.shardId = shardId;
    this.translogPath = translogPath;
    this.durabilty = durabilty;
    this.threadPool = threadPool;
    this.bigArrays = bigArrays;
    this.type =
        TranslogWriter.Type.fromString(
            indexSettings.get(INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.BUFFERED.name()));
    this.bufferSize =
        (int)
            indexSettings
                .getAsBytesSize(
                    INDEX_TRANSLOG_BUFFER_SIZE,
                    IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER)
                .bytes(); // Not really interesting, updated by IndexingMemoryController...

    syncInterval =
        indexSettings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5));
    if (syncInterval.millis() > 0 && threadPool != null) {
      syncOnEachOperation = false;
    } else if (syncInterval.millis() == 0) {
      syncOnEachOperation = true;
    } else {
      syncOnEachOperation = false;
    }
  }
Example #2
0
 /**
  * Constructs new read-only URL-based blob store
  *
  * <p>The following settings are supported
  *
  * <dl>
  *   <dt>buffer_size
  *   <dd>- size of the read buffer, defaults to 100KB
  * </dl>
  *
  * @param settings settings
  * @param path base URL
  */
 public URLBlobStore(Settings settings, URL path) {
   super(settings);
   this.path = path;
   this.bufferSizeInBytes =
       (int)
           settings
               .getAsBytesSize(
                   "repositories.uri.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB))
               .bytes();
 }
  /**
   * Constructor. Sets up the container mostly.
   *
   * @param settings Settings for our repository. Only care about buffer size.
   * @param auth
   * @param container
   * @param executor
   */
  public SwiftBlobStore(Settings settings, Account auth, String container, Executor executor) {
    super(settings);
    this.executor = executor;
    this.bufferSizeInBytes =
        (int)
            settings.getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes();

    swift = auth.getContainer(container);
    if (!swift.exists()) {
      swift.create();
      swift.makePublic();
    }
  }
  @Inject
  public PercolatorService(
      Settings settings,
      IndicesService indicesService,
      CacheRecycler cacheRecycler,
      PageCacheRecycler pageCacheRecycler,
      BigArrays bigArrays,
      HighlightPhase highlightPhase,
      ClusterService clusterService,
      FacetPhase facetPhase,
      AggregationPhase aggregationPhase,
      ScriptService scriptService,
      MappingUpdatedAction mappingUpdatedAction) {
    super(settings);
    this.indicesService = indicesService;
    this.cacheRecycler = cacheRecycler;
    this.pageCacheRecycler = pageCacheRecycler;
    this.bigArrays = bigArrays;
    this.clusterService = clusterService;
    this.highlightPhase = highlightPhase;
    this.facetPhase = facetPhase;
    this.aggregationPhase = aggregationPhase;
    this.scriptService = scriptService;
    this.mappingUpdatedAction = mappingUpdatedAction;
    this.sortParseElement = new SortParseElement();

    final long maxReuseBytes =
        settings
            .getAsBytesSize(
                "indices.memory.memory_index.size_per_thread",
                new ByteSizeValue(1, ByteSizeUnit.MB))
            .bytes();
    cache =
        new CloseableThreadLocal<MemoryIndex>() {
          @Override
          protected MemoryIndex initialValue() {
            return new ExtendedMemoryIndex(true, maxReuseBytes);
          }
        };
    single = new SingleDocumentPercolatorIndex(cache);
    multi = new MultiDocumentPercolatorIndex(cache);

    percolatorTypes = new ByteObjectOpenHashMap<>(6);
    percolatorTypes.put(countPercolator.id(), countPercolator);
    percolatorTypes.put(queryCountPercolator.id(), queryCountPercolator);
    percolatorTypes.put(matchPercolator.id(), matchPercolator);
    percolatorTypes.put(queryPercolator.id(), queryPercolator);
    percolatorTypes.put(scoringPercolator.id(), scoringPercolator);
    percolatorTypes.put(topMatchingPercolator.id(), topMatchingPercolator);
  }
 /** Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */
 public EngineConfig(
     ShardId shardId,
     boolean optimizeAutoGenerateId,
     ThreadPool threadPool,
     ShardIndexingService indexingService,
     IndexSettingsService indexSettingsService,
     IndicesWarmer warmer,
     Store store,
     SnapshotDeletionPolicy deletionPolicy,
     Translog translog,
     MergePolicyProvider mergePolicyProvider,
     MergeSchedulerProvider mergeScheduler,
     Analyzer analyzer,
     Similarity similarity,
     CodecService codecService,
     Engine.FailedEngineListener failedEngineListener) {
   this.shardId = shardId;
   this.optimizeAutoGenerateId = optimizeAutoGenerateId;
   this.threadPool = threadPool;
   this.indexingService = indexingService;
   this.indexSettingsService = indexSettingsService;
   this.warmer = warmer;
   this.store = store;
   this.deletionPolicy = deletionPolicy;
   this.translog = translog;
   this.mergePolicyProvider = mergePolicyProvider;
   this.mergeScheduler = mergeScheduler;
   this.analyzer = analyzer;
   this.similarity = similarity;
   this.codecService = codecService;
   this.failedEngineListener = failedEngineListener;
   Settings indexSettings = indexSettingsService.getSettings();
   this.compoundOnFlush =
       indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
   this.indexConcurrency =
       indexSettings.getAsInt(
           EngineConfig.INDEX_CONCURRENCY_SETTING,
           Math.max(
               IndexWriterConfig.DEFAULT_MAX_THREAD_STATES,
               (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65)));
   codecName =
       indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
   indexingBufferSize =
       indexSettings.getAsBytesSize(INDEX_BUFFER_SIZE_SETTING, DEFAUTL_INDEX_BUFFER_SIZE);
   failEngineOnCorruption = indexSettings.getAsBoolean(INDEX_FAIL_ON_CORRUPTION_SETTING, true);
   failOnMergeFailure = indexSettings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE_SETTING, true);
   gcDeletesInMillis =
       indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis();
 }
    @Override
    public void onRefreshSettings(Settings settings) {
      ByteSizeValue minMergeSize =
          settings.getAsBytesSize(
              "index.merge.policy.min_merge_size",
              LogByteSizeMergePolicyProvider.this.minMergeSize);
      if (!minMergeSize.equals(LogByteSizeMergePolicyProvider.this.minMergeSize)) {
        logger.info(
            "updating min_merge_size from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.minMergeSize,
            minMergeSize);
        LogByteSizeMergePolicyProvider.this.minMergeSize = minMergeSize;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMinMergeMB(minMergeSize.mbFrac());
        }
      }

      ByteSizeValue maxMergeSize =
          settings.getAsBytesSize(
              "index.merge.policy.max_merge_size",
              LogByteSizeMergePolicyProvider.this.maxMergeSize);
      if (!maxMergeSize.equals(LogByteSizeMergePolicyProvider.this.maxMergeSize)) {
        logger.info(
            "updating max_merge_size from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.maxMergeSize,
            maxMergeSize);
        LogByteSizeMergePolicyProvider.this.maxMergeSize = maxMergeSize;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMaxMergeMB(maxMergeSize.mbFrac());
        }
      }

      int maxMergeDocs =
          settings.getAsInt(
              "index.merge.policy.max_merge_docs",
              LogByteSizeMergePolicyProvider.this.maxMergeDocs);
      if (maxMergeDocs != LogByteSizeMergePolicyProvider.this.maxMergeDocs) {
        logger.info(
            "updating max_merge_docs from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.maxMergeDocs,
            maxMergeDocs);
        LogByteSizeMergePolicyProvider.this.maxMergeDocs = maxMergeDocs;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMaxMergeDocs(maxMergeDocs);
        }
      }

      int mergeFactor =
          settings.getAsInt(
              "index.merge.policy.merge_factor", LogByteSizeMergePolicyProvider.this.mergeFactor);
      if (mergeFactor != LogByteSizeMergePolicyProvider.this.mergeFactor) {
        logger.info(
            "updating merge_factor from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.mergeFactor,
            mergeFactor);
        LogByteSizeMergePolicyProvider.this.mergeFactor = mergeFactor;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMergeFactor(mergeFactor);
        }
      }

      boolean compoundFormat =
          settings.getAsBoolean(
              "index.compound_format", LogByteSizeMergePolicyProvider.this.compoundFormat);
      if (compoundFormat != LogByteSizeMergePolicyProvider.this.compoundFormat) {
        logger.info(
            "updating index.compound_format from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.compoundFormat,
            compoundFormat);
        LogByteSizeMergePolicyProvider.this.compoundFormat = compoundFormat;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setUseCompoundFile(compoundFormat);
        }
      }
    }
  @Inject
  public NettyHttpServerTransport(
      Settings settings, NetworkService networkService, BigArrays bigArrays) {
    super(settings);
    this.networkService = networkService;
    this.bigArrays = bigArrays;

    if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
      System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
    }

    ByteSizeValue maxContentLength =
        settings.getAsBytesSize(
            "http.netty.max_content_length",
            settings.getAsBytesSize(
                "http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB)));
    this.maxChunkSize =
        settings.getAsBytesSize(
            "http.netty.max_chunk_size",
            settings.getAsBytesSize("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
    this.maxHeaderSize =
        settings.getAsBytesSize(
            "http.netty.max_header_size",
            settings.getAsBytesSize("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
    this.maxInitialLineLength =
        settings.getAsBytesSize(
            "http.netty.max_initial_line_length",
            settings.getAsBytesSize(
                "http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB)));
    // don't reset cookies by default, since I don't think we really need to
    // note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still,
    // currently, we don't need cookies
    this.resetCookies =
        settings.getAsBoolean(
            "http.netty.reset_cookies", settings.getAsBoolean("http.reset_cookies", false));
    this.maxCumulationBufferCapacity =
        settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null);
    this.maxCompositeBufferComponents =
        settings.getAsInt("http.netty.max_composite_buffer_components", -1);
    this.workerCount =
        settings.getAsInt(
            "http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
    this.blockingServer =
        settings.getAsBoolean(
            "http.netty.http.blocking_server",
            settings.getAsBoolean(TCP_BLOCKING_SERVER, settings.getAsBoolean(TCP_BLOCKING, false)));
    this.port = settings.get("http.netty.port", settings.get("http.port", "9200-9300"));
    this.bindHost =
        settings.get(
            "http.netty.bind_host", settings.get("http.bind_host", settings.get("http.host")));
    this.publishHost =
        settings.get(
            "http.netty.publish_host",
            settings.get("http.publish_host", settings.get("http.host")));
    this.publishPort =
        settings.getAsInt("http.netty.publish_port", settings.getAsInt("http.publish_port", 0));
    this.tcpNoDelay = settings.get("http.netty.tcp_no_delay", settings.get(TCP_NO_DELAY, "true"));
    this.tcpKeepAlive =
        settings.get("http.netty.tcp_keep_alive", settings.get(TCP_KEEP_ALIVE, "true"));
    this.reuseAddress =
        settings.getAsBoolean(
            "http.netty.reuse_address",
            settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
    this.tcpSendBufferSize =
        settings.getAsBytesSize(
            "http.netty.tcp_send_buffer_size",
            settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
    this.tcpReceiveBufferSize =
        settings.getAsBytesSize(
            "http.netty.tcp_receive_buffer_size",
            settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));
    this.detailedErrorsEnabled = settings.getAsBoolean(SETTING_HTTP_DETAILED_ERRORS_ENABLED, true);

    long defaultReceiverPredictor = 512 * 1024;
    if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
      // we can guess a better default...
      long l =
          (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / workerCount);
      defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
    }

    // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use
    // higher ones for us, even fixed one
    ByteSizeValue receivePredictorMin =
        settings.getAsBytesSize(
            "http.netty.receive_predictor_min",
            settings.getAsBytesSize(
                "http.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
    ByteSizeValue receivePredictorMax =
        settings.getAsBytesSize(
            "http.netty.receive_predictor_max",
            settings.getAsBytesSize(
                "http.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
    if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
      receiveBufferSizePredictorFactory =
          new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
    } else {
      receiveBufferSizePredictorFactory =
          new AdaptiveReceiveBufferSizePredictorFactory(
              (int) receivePredictorMin.bytes(),
              (int) receivePredictorMin.bytes(),
              (int) receivePredictorMax.bytes());
    }

    this.compression = settings.getAsBoolean(SETTING_HTTP_COMPRESSION, false);
    this.compressionLevel = settings.getAsInt(SETTING_HTTP_COMPRESSION_LEVEL, 6);
    this.pipelining = settings.getAsBoolean(SETTING_PIPELINING, DEFAULT_SETTING_PIPELINING);
    this.pipeliningMaxEvents =
        settings.getAsInt(SETTING_PIPELINING_MAX_EVENTS, DEFAULT_SETTING_PIPELINING_MAX_EVENTS);

    // validate max content length
    if (maxContentLength.bytes() > Integer.MAX_VALUE) {
      logger.warn(
          "maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]");
      maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
    }
    this.maxContentLength = maxContentLength;

    logger.debug(
        "using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], receive_predictor[{}->{}], pipelining[{}], pipelining_max_events[{}]",
        maxChunkSize,
        maxHeaderSize,
        maxInitialLineLength,
        this.maxContentLength,
        receivePredictorMin,
        receivePredictorMax,
        pipelining,
        pipeliningMaxEvents);
  }
  @Inject
  public TomcatHttpServerTransport(
      final Settings settings,
      final Environment environment,
      final NetworkService networkService,
      final ClusterName clusterName,
      final Client client,
      final SecurityService securityService) {
    super(settings);

    this.settings = settings;
    this.securityService = securityService;

    /*
     * TODO check if keep alive is managed by tomcat copy custom headers to
     * response check that user under tomcat/ea is running is not a
     * privilieged iuser tomcat props apply: tomcat.XXX
     */

    // _aliases test with more than one index mapped to an alias

    /*
     *
     * http.max_initial_line_length not respected http.reset_cookies not
     * respected workerCount http.cors.enabled http.cors.allow-origin
     * http.cors.max-age http.cors.allow-methods http.cors.allow-headers
     *
     *
     *
     * http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/
     * modules-network.html
     *
     * http://stackoverflow.com/questions/8038718/serializing-generic-java-
     * object-to-json-using-jackson
     * http://tomcatspnegoad.sourceforge.net/realms.html
     *
     * SSL options
     *
     *
     *
     * Realm options/login waffle, spnego ... security.kerberos.provider:
     * waffle
     *
     * Hardening EA - dynamic script disable
     */

    /*
    *
    *

    *
    *
    *

    */

    useSSL =
        componentSettings.getAsBoolean(
            "ssl.enabled", settings.getAsBoolean("security.ssl.enabled", false));

    useClientAuth =
        componentSettings.getAsBoolean(
            "ssl.clientauth.enabled",
            settings.getAsBoolean("security.ssl.clientauth.enabled", false));

    kerberosMode =
        componentSettings.get("kerberos.mode", settings.get("security.kerberos.mode", "none"));

    port = componentSettings.get("port", settings.get("http.port", "8080"));
    bindHost =
        componentSettings.get(
            "bind_host", settings.get("http.bind_host", settings.get("http.host")));
    publishHost =
        componentSettings.get(
            "publish_host", settings.get("http.publish_host", settings.get("http.host")));
    this.networkService = networkService;

    ByteSizeValue maxContentLength =
        componentSettings.getAsBytesSize(
            "max_content_length",
            settings.getAsBytesSize(
                "http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB)));
    maxChunkSize =
        componentSettings.getAsBytesSize(
            "max_chunk_size",
            settings.getAsBytesSize("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
    maxHeaderSize =
        componentSettings.getAsBytesSize(
            "max_header_size",
            settings.getAsBytesSize("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB)));

    blockingServer =
        settings.getAsBoolean(
            "http.blocking_server",
            settings.getAsBoolean(TCP_BLOCKING_SERVER, settings.getAsBoolean(TCP_BLOCKING, false)));

    tcpNoDelay =
        componentSettings.getAsBoolean("tcp_no_delay", settings.getAsBoolean(TCP_NO_DELAY, true));
    tcpKeepAlive =
        componentSettings.getAsBoolean(
            "tcp_keep_alive", settings.getAsBoolean(TCP_KEEP_ALIVE, true));
    reuseAddress =
        componentSettings.getAsBoolean(
            "reuse_address",
            settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
    tcpSendBufferSize =
        componentSettings.getAsBytesSize(
            "tcp_send_buffer_size",
            settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
    tcpReceiveBufferSize =
        componentSettings.getAsBytesSize(
            "tcp_receive_buffer_size",
            settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));

    compression = settings.getAsBoolean("http.compression", false);
    compressionLevel = settings.getAsInt("http.compression_level", 6);

    // validate max content length
    if (maxContentLength.bytes() > Integer.MAX_VALUE) {
      logger.warn(
          "maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]");
      maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
    }
    this.maxContentLength = maxContentLength;

    logger.debug("port: " + port);
    logger.debug("bindHost: " + bindHost);
    logger.debug("publishHost: " + publishHost);

    logger.debug("componentsettings: " + componentSettings.getAsMap());
    logger.debug("settings: " + settings.getAsMap());
  }
    @Override
    public void onRefreshSettings(Settings settings) {
      double expungeDeletesPctAllowed =
          settings.getAsDouble(
              "index.merge.policy.expunge_deletes_allowed",
              TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed);
      if (expungeDeletesPctAllowed != TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed) {
        logger.info(
            "updating [expunge_deletes_allowed] from [{}] to [{}]",
            TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed,
            expungeDeletesPctAllowed);
        TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed = expungeDeletesPctAllowed;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setForceMergeDeletesPctAllowed(expungeDeletesPctAllowed);
        }
      }

      ByteSizeValue floorSegment =
          settings.getAsBytesSize(
              "index.merge.policy.floor_segment", TieredMergePolicyProvider.this.floorSegment);
      if (!floorSegment.equals(TieredMergePolicyProvider.this.floorSegment)) {
        logger.info(
            "updating [floor_segment] from [{}] to [{}]",
            TieredMergePolicyProvider.this.floorSegment,
            floorSegment);
        TieredMergePolicyProvider.this.floorSegment = floorSegment;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setFloorSegmentMB(floorSegment.mbFrac());
        }
      }

      int maxMergeAtOnce =
          settings.getAsInt(
              "index.merge.policy.max_merge_at_once",
              TieredMergePolicyProvider.this.maxMergeAtOnce);
      if (maxMergeAtOnce != TieredMergePolicyProvider.this.maxMergeAtOnce) {
        logger.info(
            "updating [max_merge_at_once] from [{}] to [{}]",
            TieredMergePolicyProvider.this.maxMergeAtOnce,
            maxMergeAtOnce);
        TieredMergePolicyProvider.this.maxMergeAtOnce = maxMergeAtOnce;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setMaxMergeAtOnce(maxMergeAtOnce);
        }
      }

      int maxMergeAtOnceExplicit =
          settings.getAsInt(
              "index.merge.policy.max_merge_at_once_explicit",
              TieredMergePolicyProvider.this.maxMergeAtOnceExplicit);
      if (maxMergeAtOnceExplicit != TieredMergePolicyProvider.this.maxMergeAtOnceExplicit) {
        logger.info(
            "updating [max_merge_at_once_explicit] from [{}] to [{}]",
            TieredMergePolicyProvider.this.maxMergeAtOnceExplicit,
            maxMergeAtOnceExplicit);
        TieredMergePolicyProvider.this.maxMergeAtOnceExplicit = maxMergeAtOnceExplicit;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
        }
      }

      ByteSizeValue maxMergedSegment =
          settings.getAsBytesSize(
              "index.merge.policy.max_merged_segment",
              TieredMergePolicyProvider.this.maxMergedSegment);
      if (!maxMergedSegment.equals(TieredMergePolicyProvider.this.maxMergedSegment)) {
        logger.info(
            "updating [max_merged_segment] from [{}] to [{}]",
            TieredMergePolicyProvider.this.maxMergedSegment,
            maxMergedSegment);
        TieredMergePolicyProvider.this.maxMergedSegment = maxMergedSegment;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setFloorSegmentMB(maxMergedSegment.mbFrac());
        }
      }

      double segmentsPerTier =
          settings.getAsDouble(
              "index.merge.policy.segments_per_tier",
              TieredMergePolicyProvider.this.segmentsPerTier);
      if (segmentsPerTier != TieredMergePolicyProvider.this.segmentsPerTier) {
        logger.info(
            "updating [segments_per_tier] from [{}] to [{}]",
            TieredMergePolicyProvider.this.segmentsPerTier,
            segmentsPerTier);
        TieredMergePolicyProvider.this.segmentsPerTier = segmentsPerTier;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setSegmentsPerTier(segmentsPerTier);
        }
      }

      double reclaimDeletesWeight =
          settings.getAsDouble(
              "index.merge.policy.reclaim_deletes_weight",
              TieredMergePolicyProvider.this.reclaimDeletesWeight);
      if (reclaimDeletesWeight != TieredMergePolicyProvider.this.reclaimDeletesWeight) {
        logger.info(
            "updating [reclaim_deletes_weight] from [{}] to [{}]",
            TieredMergePolicyProvider.this.reclaimDeletesWeight,
            reclaimDeletesWeight);
        TieredMergePolicyProvider.this.reclaimDeletesWeight = reclaimDeletesWeight;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setReclaimDeletesWeight(reclaimDeletesWeight);
        }
      }

      boolean compoundFormat =
          settings.getAsBoolean(
              "index.compound_format", TieredMergePolicyProvider.this.compoundFormat);
      if (compoundFormat != TieredMergePolicyProvider.this.compoundFormat) {
        logger.info(
            "updating index.compound_format from [{}] to [{}]",
            TieredMergePolicyProvider.this.compoundFormat,
            compoundFormat);
        TieredMergePolicyProvider.this.compoundFormat = compoundFormat;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setUseCompoundFile(compoundFormat);
        }
      }

      fixSettingsIfNeeded();
    }