List<String> getIndexSettingsValidationErrors(Settings settings) {
   String customPath = IndexMetaData.INDEX_DATA_PATH_SETTING.get(settings);
   List<String> validationErrors = new ArrayList<>();
   if (Strings.isEmpty(customPath) == false && env.sharedDataFile() == null) {
     validationErrors.add("path.shared_data must be set in order to use custom data paths");
   } else if (Strings.isEmpty(customPath) == false) {
     Path resolvedPath = PathUtils.get(new Path[] {env.sharedDataFile()}, customPath);
     if (resolvedPath == null) {
       validationErrors.add(
           "custom path ["
               + customPath
               + "] is not a sub-path of path.shared_data ["
               + env.sharedDataFile()
               + "]");
     }
   }
   // norelease - this can be removed?
   Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
   Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null);
   if (number_of_primaries != null && number_of_primaries <= 0) {
     validationErrors.add("index must have 1 or more primary shards");
   }
   if (number_of_replicas != null && number_of_replicas < 0) {
     validationErrors.add("index must have 0 or more replica shards");
   }
   return validationErrors;
 }
 MergeSchedulerConfig(Settings settings) {
   maxThreadCount =
       settings.getAsInt(
           MAX_THREAD_COUNT,
           Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(settings) / 2)));
   maxMergeCount = settings.getAsInt(MAX_MERGE_COUNT, maxThreadCount + 5);
   this.autoThrottle = settings.getAsBoolean(AUTO_THROTTLE, true);
 }
 /**
  * Tries to lock all local shards for the given index. If any of the shard locks can't be acquired
  * an {@link LockObtainFailedException} is thrown and all previously acquired locks are released.
  *
  * @param index the index to lock shards for
  * @param lockTimeoutMS how long to wait for acquiring the indices shard locks
  * @return the {@link ShardLock} instances for this index.
  * @throws IOException if an IOException occurs.
  */
 public List<ShardLock> lockAllForIndex(
     Index index, @IndexSettings Settings settings, long lockTimeoutMS) throws IOException {
   final Integer numShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
   if (numShards == null || numShards <= 0) {
     throw new IllegalArgumentException("settings must contain a non-null > 0 number of shards");
   }
   logger.trace("locking all shards for index {} - [{}]", index, numShards);
   List<ShardLock> allLocks = new ArrayList<>(numShards);
   boolean success = false;
   long startTimeNS = System.nanoTime();
   try {
     for (int i = 0; i < numShards; i++) {
       long timeoutLeftMS =
           Math.max(0, lockTimeoutMS - TimeValue.nsecToMSec((System.nanoTime() - startTimeNS)));
       allLocks.add(shardLock(new ShardId(index, i), timeoutLeftMS));
     }
     success = true;
   } finally {
     if (success == false) {
       logger.trace("unable to lock all shards for index {}", index);
       IOUtils.closeWhileHandlingException(allLocks);
     }
   }
   return allLocks;
 }
 @Inject
 public LengthTokenFilterFactory(
     IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
   super(indexSettings, name, settings);
   min = settings.getAsInt("min", 0);
   max = settings.getAsInt("max", Integer.MAX_VALUE);
   if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) {
     throw new IllegalArgumentException(
         ENABLE_POS_INC_KEY
             + " is not supported anymore. Please fix your analysis chain or use"
             + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
   }
   enablePositionIncrements =
       version.onOrAfter(Version.LUCENE_4_4)
           ? true
           : settings.getAsBoolean(ENABLE_POS_INC_KEY, true);
 }
 @Inject
 public KeywordTokenizerFactory(
     Index index,
     IndexSettingsService indexSettingsService,
     @Assisted String name,
     @Assisted Settings settings) {
   super(index, indexSettingsService.getSettings(), name, settings);
   bufferSize = settings.getAsInt("buffer_size", 256);
 }
 @Inject
 public StandardTokenizerFactory(
     Index index,
     @IndexSettings Settings indexSettings,
     @Assisted String name,
     @Assisted Settings settings) {
   super(index, indexSettings, name, settings);
   maxTokenLength =
       settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
 }
Beispiel #7
0
 private DocIndexMetaData buildDocIndexMetaDataFromTemplate(String index, String templateName) {
   IndexTemplateMetaData indexTemplateMetaData = metaData.getTemplates().get(templateName);
   DocIndexMetaData docIndexMetaData;
   try {
     IndexMetaData.Builder builder = new IndexMetaData.Builder(index);
     builder.putMapping(
         Constants.DEFAULT_MAPPING_TYPE,
         indexTemplateMetaData.getMappings().get(Constants.DEFAULT_MAPPING_TYPE).toString());
     Settings settings = indexTemplateMetaData.settings();
     builder.settings(settings);
     // default values
     builder.numberOfShards(settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5));
     builder.numberOfReplicas(settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));
     docIndexMetaData = new DocIndexMetaData(functions, builder.build(), ident);
   } catch (IOException e) {
     throw new UnhandledServerException("Unable to build DocIndexMetaData from template", e);
   }
   return docIndexMetaData.build();
 }
  private IndexMetaData(
      String index,
      long version,
      State state,
      Settings settings,
      ImmutableMap<String, MappingMetaData> mappings,
      ImmutableMap<String, AliasMetaData> aliases,
      ImmutableMap<String, Custom> customs) {
    Preconditions.checkArgument(
        settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1) != -1,
        "must specify numberOfShards for index [" + index + "]");
    Preconditions.checkArgument(
        settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1) != -1,
        "must specify numberOfReplicas for index [" + index + "]");
    this.index = index;
    this.version = version;
    this.state = state;
    this.settings = settings;
    this.mappings = mappings;
    this.customs = customs;
    this.totalNumberOfShards = numberOfShards() * (numberOfReplicas() + 1);

    this.aliases = aliases;

    ImmutableMap<String, String> includeMap =
        settings.getByPrefix("index.routing.allocation.include.").getAsMap();
    if (includeMap.isEmpty()) {
      includeFilters = null;
    } else {
      includeFilters = DiscoveryNodeFilters.buildFromKeyValue(includeMap);
    }
    ImmutableMap<String, String> excludeMap =
        settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
    if (excludeMap.isEmpty()) {
      excludeFilters = null;
    } else {
      excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(excludeMap);
    }
  }
  @Inject
  public PatternTokenizerFactory(
      IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
    super(indexSettings, name, settings);

    String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
    if (sPattern == null) {
      throw new IllegalArgumentException(
          "pattern is missing for [" + name + "] tokenizer of type 'pattern'");
    }

    this.pattern = Regex.compile(sPattern, settings.get("flags"));
    this.group = settings.getAsInt("group", -1);
  }
 @Override
 public void onRefreshSettings(Settings settings) {
   int minimumMasterNodes =
       settings.getAsInt(
           "discovery.zen.minimum_master_nodes",
           ZenDiscovery.this.electMaster.minimumMasterNodes());
   if (minimumMasterNodes != ZenDiscovery.this.electMaster.minimumMasterNodes()) {
     logger.info(
         "updating discovery.zen.minimum_master_nodes from [{}] to [{}]",
         ZenDiscovery.this.electMaster.minimumMasterNodes(),
         minimumMasterNodes);
     handleMinimumMasterNodesChanged(minimumMasterNodes);
   }
 }
 /** Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */
 public EngineConfig(
     ShardId shardId,
     boolean optimizeAutoGenerateId,
     ThreadPool threadPool,
     ShardIndexingService indexingService,
     IndexSettingsService indexSettingsService,
     IndicesWarmer warmer,
     Store store,
     SnapshotDeletionPolicy deletionPolicy,
     Translog translog,
     MergePolicyProvider mergePolicyProvider,
     MergeSchedulerProvider mergeScheduler,
     Analyzer analyzer,
     Similarity similarity,
     CodecService codecService,
     Engine.FailedEngineListener failedEngineListener) {
   this.shardId = shardId;
   this.optimizeAutoGenerateId = optimizeAutoGenerateId;
   this.threadPool = threadPool;
   this.indexingService = indexingService;
   this.indexSettingsService = indexSettingsService;
   this.warmer = warmer;
   this.store = store;
   this.deletionPolicy = deletionPolicy;
   this.translog = translog;
   this.mergePolicyProvider = mergePolicyProvider;
   this.mergeScheduler = mergeScheduler;
   this.analyzer = analyzer;
   this.similarity = similarity;
   this.codecService = codecService;
   this.failedEngineListener = failedEngineListener;
   Settings indexSettings = indexSettingsService.getSettings();
   this.compoundOnFlush =
       indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
   this.indexConcurrency =
       indexSettings.getAsInt(
           EngineConfig.INDEX_CONCURRENCY_SETTING,
           Math.max(
               IndexWriterConfig.DEFAULT_MAX_THREAD_STATES,
               (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65)));
   codecName =
       indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
   indexingBufferSize =
       indexSettings.getAsBytesSize(INDEX_BUFFER_SIZE_SETTING, DEFAUTL_INDEX_BUFFER_SIZE);
   failEngineOnCorruption = indexSettings.getAsBoolean(INDEX_FAIL_ON_CORRUPTION_SETTING, true);
   failOnMergeFailure = indexSettings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE_SETTING, true);
   gcDeletesInMillis =
       indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis();
 }
 @Inject
 public StandardAnalyzerProvider(
     Index index,
     @IndexSettings Settings indexSettings,
     Environment env,
     @Assisted String name,
     @Assisted Settings settings) {
   super(index, indexSettings, name, settings);
   CharArraySet stopWords =
       Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, version);
   int maxTokenLength =
       settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
   standardAnalyzer = new StandardAnalyzer(version, stopWords);
   standardAnalyzer.setMaxTokenLength(maxTokenLength);
 }
 @Inject
 public EdgeNGramTokenizerFactory(
     Index index,
     @IndexSettings Settings indexSettings,
     @Assisted String name,
     @Assisted Settings settings) {
   super(index, indexSettings, name, settings);
   this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
   this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
   this.side =
       Lucene43EdgeNGramTokenizer.Side.getSide(
           settings.get("side", Lucene43EdgeNGramTokenizer.DEFAULT_SIDE.getLabel()));
   this.matcher = parseTokenChars(settings.getAsArray("token_chars"));
   this.esVersion =
       indexSettings.getAsVersion(
           IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT);
 }
  @Inject
  public GatewayService(
      Settings settings,
      AllocationService allocationService,
      ClusterService clusterService,
      ThreadPool threadPool,
      GatewayMetaState metaState,
      TransportNodesListGatewayMetaState listGatewayMetaState,
      Discovery discovery,
      IndicesService indicesService) {
    super(settings);
    this.gateway =
        new Gateway(
            settings, clusterService, metaState, listGatewayMetaState, discovery, indicesService);
    this.allocationService = allocationService;
    this.clusterService = clusterService;
    this.threadPool = threadPool;
    // allow to control a delay of when indices will get created
    this.expectedNodes = EXPECTED_NODES_SETTING.get(this.settings);
    this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(this.settings);
    this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(this.settings);

    if (RECOVER_AFTER_TIME_SETTING.exists(this.settings)) {
      recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(this.settings);
    } else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) {
      recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET;
    } else {
      recoverAfterTime = null;
    }
    this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(this.settings);
    this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(this.settings);
    // default the recover after master nodes to the minimum master nodes in the discovery
    if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(this.settings)) {
      recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(this.settings);
    } else {
      // TODO: change me once the minimum_master_nodes is changed too
      recoverAfterMasterNodes = settings.getAsInt("discovery.zen.minimum_master_nodes", -1);
    }

    // Add the not recovered as initial state block, we don't allow anything until
    this.clusterService.addInitialStateBlock(STATE_NOT_RECOVERED_BLOCK);
  }
    public static TermsEnum filter(
        TermsEnum toFilter, Terms terms, AtomicReader reader, Settings settings)
        throws IOException {
      int docCount = terms.getDocCount();
      if (docCount == -1) {
        docCount = reader.maxDoc();
      }
      final double minFrequency = settings.getAsDouble("min", 0d);
      final double maxFrequency = settings.getAsDouble("max", docCount + 1d);
      final double minSegmentSize = settings.getAsInt("min_segment_size", 0);
      if (minSegmentSize < docCount) {
        final int minFreq =
            minFrequency >= 1.0 ? (int) minFrequency : (int) (docCount * minFrequency);
        final int maxFreq =
            maxFrequency >= 1.0 ? (int) maxFrequency : (int) (docCount * maxFrequency);
        assert minFreq < maxFreq;
        return new FrequencyFilter(toFilter, minFreq, maxFreq);
      }

      return toFilter;
    }
  @Inject
  public IndicesRequestCache(
      Settings settings, ClusterService clusterService, ThreadPool threadPool) {
    super(settings);
    this.clusterService = clusterService;
    this.threadPool = threadPool;
    this.cleanInterval =
        settings.getAsTime(INDICES_CACHE_REQUEST_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60));

    String size = settings.get(INDICES_CACHE_QUERY_SIZE);
    if (size == null) {
      size = settings.get(DEPRECATED_INDICES_CACHE_QUERY_SIZE);
      if (size != null) {
        deprecationLogger.deprecated(
            "The ["
                + DEPRECATED_INDICES_CACHE_QUERY_SIZE
                + "] settings is now deprecated, use ["
                + INDICES_CACHE_QUERY_SIZE
                + "] instead");
      }
    }
    if (size == null) {
      // this cache can be very small yet still be very effective
      size = "1%";
    }
    this.size = size;

    this.expire = settings.getAsTime(INDICES_CACHE_QUERY_EXPIRE, null);
    // defaults to 4, but this is a busy map for all indices, increase it a bit by default
    this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, 16);
    if (concurrencyLevel <= 0) {
      throw new IllegalArgumentException(
          "concurrency_level must be > 0 but was: " + concurrencyLevel);
    }
    buildCache();

    this.reaper = new Reaper();
    threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper);
  }
  @Before
  public void setupElasticAndRedis() throws Exception {

    Settings globalSettings = settingsBuilder().loadFromClasspath("settings.yml").build();
    String json = copyToStringFromClasspath("/simple-redis-river.json");
    Settings riverSettings = settingsBuilder().loadFromSource(json).build();

    index = riverSettings.get("index.name");
    channel = riverSettings.get("redis.channels").split(",")[0];

    String hostname = riverSettings.get("redis.hostname");
    int port = riverSettings.getAsInt("redis.port", 6379);
    logger.debug("Connecting to Redis [hostname={} port={}]...", hostname, port);
    jedis = new Jedis(hostname, port, 0);
    logger.debug("... connected");

    logger.debug("Starting local elastic...");
    node = nodeBuilder().local(true).settings(globalSettings).node();

    logger.info("Create river [{}]", river);
    node.client().prepareIndex("_river", river, "_meta").setSource(json).execute().actionGet();

    logger.debug("Running Cluster Health");
    ClusterHealthResponse clusterHealth =
        node.client()
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForGreenStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());

    GetResponse response = node.client().prepareGet("_river", river, "_meta").execute().actionGet();
    assertTrue(response.isExists());

    logger.debug("...elasticized ok");
  }
Beispiel #18
0
  private ExecutorHolder rebuild(
      String name,
      ExecutorHolder previousExecutorHolder,
      @Nullable Settings settings,
      Settings defaultSettings) {
    if (Names.SAME.equals(name)) {
      // Don't allow to change the "same" thread executor
      return previousExecutorHolder;
    }
    if (settings == null) {
      settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
    }
    Info previousInfo = previousExecutorHolder != null ? previousExecutorHolder.info : null;
    String type =
        settings.get(
            "type", previousInfo != null ? previousInfo.getType() : defaultSettings.get("type"));
    ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, name);
    if ("same".equals(type)) {
      if (previousExecutorHolder != null) {
        logger.debug("updating thread_pool [{}], type [{}]", name, type);
      } else {
        logger.debug("creating thread_pool [{}], type [{}]", name, type);
      }
      return new ExecutorHolder(MoreExecutors.sameThreadExecutor(), new Info(name, type));
    } else if ("cached".equals(type)) {
      TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
      if (previousExecutorHolder != null) {
        if ("cached".equals(previousInfo.getType())) {
          TimeValue updatedKeepAlive =
              settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
          if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {
            logger.debug(
                "updating thread_pool [{}], type [{}], keep_alive [{}]",
                name,
                type,
                updatedKeepAlive);
            ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                .setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);
            return new ExecutorHolder(
                previousExecutorHolder.executor,
                new Info(name, type, -1, -1, updatedKeepAlive, null));
          }
          return previousExecutorHolder;
        }
        if (previousInfo.getKeepAlive() != null) {
          defaultKeepAlive = previousInfo.getKeepAlive();
        }
      }
      TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive);
      if (previousExecutorHolder != null) {
        logger.debug(
            "updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
      } else {
        logger.debug(
            "creating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
      }
      Executor executor =
          EsExecutors.newCached(keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
      return new ExecutorHolder(executor, new Info(name, type, -1, -1, keepAlive, null));
    } else if ("fixed".equals(type)) {
      int defaultSize =
          defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
      SizeValue defaultQueueSize =
          defaultSettings.getAsSize("queue", defaultSettings.getAsSize("queue_size", null));

      if (previousExecutorHolder != null) {
        if ("fixed".equals(previousInfo.getType())) {
          SizeValue updatedQueueSize =
              settings.getAsSize(
                  "capacity",
                  settings.getAsSize(
                      "queue", settings.getAsSize("queue_size", previousInfo.getQueueSize())));
          if (Objects.equal(previousInfo.getQueueSize(), updatedQueueSize)) {
            int updatedSize = settings.getAsInt("size", previousInfo.getMax());
            if (previousInfo.getMax() != updatedSize) {
              logger.debug(
                  "updating thread_pool [{}], type [{}], size [{}], queue_size [{}]",
                  name,
                  type,
                  updatedSize,
                  updatedQueueSize);
              ((EsThreadPoolExecutor) previousExecutorHolder.executor).setCorePoolSize(updatedSize);
              ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                  .setMaximumPoolSize(updatedSize);
              return new ExecutorHolder(
                  previousExecutorHolder.executor,
                  new Info(name, type, updatedSize, updatedSize, null, updatedQueueSize));
            }
            return previousExecutorHolder;
          }
        }
        if (previousInfo.getMax() >= 0) {
          defaultSize = previousInfo.getMax();
        }
        defaultQueueSize = previousInfo.getQueueSize();
      }

      int size = settings.getAsInt("size", defaultSize);
      SizeValue queueSize =
          settings.getAsSize(
              "capacity",
              settings.getAsSize("queue", settings.getAsSize("queue_size", defaultQueueSize)));
      logger.debug(
          "creating thread_pool [{}], type [{}], size [{}], queue_size [{}]",
          name,
          type,
          size,
          queueSize);
      Executor executor =
          EsExecutors.newFixed(
              size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory);
      return new ExecutorHolder(executor, new Info(name, type, size, size, null, queueSize));
    } else if ("scaling".equals(type)) {
      TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
      int defaultMin = defaultSettings.getAsInt("min", 1);
      int defaultSize =
          defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
      if (previousExecutorHolder != null) {
        if ("scaling".equals(previousInfo.getType())) {
          TimeValue updatedKeepAlive =
              settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
          int updatedMin = settings.getAsInt("min", previousInfo.getMin());
          int updatedSize =
              settings.getAsInt("max", settings.getAsInt("size", previousInfo.getMax()));
          if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)
              || previousInfo.getMin() != updatedMin
              || previousInfo.getMax() != updatedSize) {
            logger.debug(
                "updating thread_pool [{}], type [{}], keep_alive [{}]",
                name,
                type,
                updatedKeepAlive);
            if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {
              ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                  .setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);
            }
            if (previousInfo.getMin() != updatedMin) {
              ((EsThreadPoolExecutor) previousExecutorHolder.executor).setCorePoolSize(updatedMin);
            }
            if (previousInfo.getMax() != updatedSize) {
              ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                  .setMaximumPoolSize(updatedSize);
            }
            return new ExecutorHolder(
                previousExecutorHolder.executor,
                new Info(name, type, updatedMin, updatedSize, updatedKeepAlive, null));
          }
          return previousExecutorHolder;
        }
        if (previousInfo.getKeepAlive() != null) {
          defaultKeepAlive = previousInfo.getKeepAlive();
        }
        if (previousInfo.getMin() >= 0) {
          defaultMin = previousInfo.getMin();
        }
        if (previousInfo.getMax() >= 0) {
          defaultSize = previousInfo.getMax();
        }
      }
      TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive);
      int min = settings.getAsInt("min", defaultMin);
      int size = settings.getAsInt("max", settings.getAsInt("size", defaultSize));
      if (previousExecutorHolder != null) {
        logger.debug(
            "updating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]",
            name,
            type,
            min,
            size,
            keepAlive);
      } else {
        logger.debug(
            "creating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]",
            name,
            type,
            min,
            size,
            keepAlive);
      }
      Executor executor =
          EsExecutors.newScaling(
              min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
      return new ExecutorHolder(executor, new Info(name, type, min, size, keepAlive, null));
    }
    throw new ElasticSearchIllegalArgumentException(
        "No type found [" + type + "], for [" + name + "]");
  }
    @Override
    public final void onRefreshSettings(Settings settings) {
      boolean change = false;
      long gcDeletesInMillis =
          settings
              .getAsTime(
                  EngineConfig.INDEX_GC_DELETES_SETTING,
                  TimeValue.timeValueMillis(config.getGcDeletesInMillis()))
              .millis();
      if (gcDeletesInMillis != config.getGcDeletesInMillis()) {
        logger.info(
            "updating {} from [{}] to [{}]",
            EngineConfig.INDEX_GC_DELETES_SETTING,
            TimeValue.timeValueMillis(config.getGcDeletesInMillis()),
            TimeValue.timeValueMillis(gcDeletesInMillis));
        config.gcDeletesInMillis = gcDeletesInMillis;
        change = true;
      }

      final boolean compoundOnFlush =
          settings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, config.isCompoundOnFlush());
      if (compoundOnFlush != config.isCompoundOnFlush()) {
        logger.info(
            "updating {} from [{}] to [{}]",
            EngineConfig.INDEX_COMPOUND_ON_FLUSH,
            config.isCompoundOnFlush(),
            compoundOnFlush);
        config.compoundOnFlush = compoundOnFlush;
        change = true;
      }

      final boolean failEngineOnCorruption =
          settings.getAsBoolean(
              EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, config.isFailEngineOnCorruption());
      if (failEngineOnCorruption != config.isFailEngineOnCorruption()) {
        logger.info(
            "updating {} from [{}] to [{}]",
            EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING,
            config.isFailEngineOnCorruption(),
            failEngineOnCorruption);
        config.failEngineOnCorruption = failEngineOnCorruption;
        change = true;
      }
      int indexConcurrency =
          settings.getAsInt(EngineConfig.INDEX_CONCURRENCY_SETTING, config.getIndexConcurrency());
      if (indexConcurrency != config.getIndexConcurrency()) {
        logger.info(
            "updating index.index_concurrency from [{}] to [{}]",
            config.getIndexConcurrency(),
            indexConcurrency);
        config.setIndexConcurrency(indexConcurrency);
        // we have to flush in this case, since it only applies on a new index writer
        change = true;
      }
      final String codecName = settings.get(EngineConfig.INDEX_CODEC_SETTING, config.codecName);
      if (!codecName.equals(config.codecName)) {
        logger.info(
            "updating {} from [{}] to [{}]",
            EngineConfig.INDEX_CODEC_SETTING,
            config.codecName,
            codecName);
        config.codecName = codecName;
        // we want to flush in this case, so the new codec will be reflected right away...
        change = true;
      }
      final boolean failOnMergeFailure =
          settings.getAsBoolean(
              EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING, config.isFailOnMergeFailure());
      if (failOnMergeFailure != config.isFailOnMergeFailure()) {
        logger.info(
            "updating {} from [{}] to [{}]",
            EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING,
            config.isFailOnMergeFailure(),
            failOnMergeFailure);
        config.failOnMergeFailure = failOnMergeFailure;
        change = true;
      }

      if (change) {
        onChange();
      }
    }
  @Inject
  public NettyHttpServerTransport(
      Settings settings, NetworkService networkService, BigArrays bigArrays) {
    super(settings);
    this.networkService = networkService;
    this.bigArrays = bigArrays;

    if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
      System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
    }

    ByteSizeValue maxContentLength =
        settings.getAsBytesSize(
            "http.netty.max_content_length",
            settings.getAsBytesSize(
                "http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB)));
    this.maxChunkSize =
        settings.getAsBytesSize(
            "http.netty.max_chunk_size",
            settings.getAsBytesSize("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
    this.maxHeaderSize =
        settings.getAsBytesSize(
            "http.netty.max_header_size",
            settings.getAsBytesSize("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
    this.maxInitialLineLength =
        settings.getAsBytesSize(
            "http.netty.max_initial_line_length",
            settings.getAsBytesSize(
                "http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB)));
    // don't reset cookies by default, since I don't think we really need to
    // note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still,
    // currently, we don't need cookies
    this.resetCookies =
        settings.getAsBoolean(
            "http.netty.reset_cookies", settings.getAsBoolean("http.reset_cookies", false));
    this.maxCumulationBufferCapacity =
        settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null);
    this.maxCompositeBufferComponents =
        settings.getAsInt("http.netty.max_composite_buffer_components", -1);
    this.workerCount =
        settings.getAsInt(
            "http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
    this.blockingServer =
        settings.getAsBoolean(
            "http.netty.http.blocking_server",
            settings.getAsBoolean(TCP_BLOCKING_SERVER, settings.getAsBoolean(TCP_BLOCKING, false)));
    this.port = settings.get("http.netty.port", settings.get("http.port", "9200-9300"));
    this.bindHost =
        settings.get(
            "http.netty.bind_host", settings.get("http.bind_host", settings.get("http.host")));
    this.publishHost =
        settings.get(
            "http.netty.publish_host",
            settings.get("http.publish_host", settings.get("http.host")));
    this.publishPort =
        settings.getAsInt("http.netty.publish_port", settings.getAsInt("http.publish_port", 0));
    this.tcpNoDelay = settings.get("http.netty.tcp_no_delay", settings.get(TCP_NO_DELAY, "true"));
    this.tcpKeepAlive =
        settings.get("http.netty.tcp_keep_alive", settings.get(TCP_KEEP_ALIVE, "true"));
    this.reuseAddress =
        settings.getAsBoolean(
            "http.netty.reuse_address",
            settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
    this.tcpSendBufferSize =
        settings.getAsBytesSize(
            "http.netty.tcp_send_buffer_size",
            settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
    this.tcpReceiveBufferSize =
        settings.getAsBytesSize(
            "http.netty.tcp_receive_buffer_size",
            settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));
    this.detailedErrorsEnabled = settings.getAsBoolean(SETTING_HTTP_DETAILED_ERRORS_ENABLED, true);

    long defaultReceiverPredictor = 512 * 1024;
    if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
      // we can guess a better default...
      long l =
          (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / workerCount);
      defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
    }

    // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use
    // higher ones for us, even fixed one
    ByteSizeValue receivePredictorMin =
        settings.getAsBytesSize(
            "http.netty.receive_predictor_min",
            settings.getAsBytesSize(
                "http.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
    ByteSizeValue receivePredictorMax =
        settings.getAsBytesSize(
            "http.netty.receive_predictor_max",
            settings.getAsBytesSize(
                "http.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
    if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
      receiveBufferSizePredictorFactory =
          new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
    } else {
      receiveBufferSizePredictorFactory =
          new AdaptiveReceiveBufferSizePredictorFactory(
              (int) receivePredictorMin.bytes(),
              (int) receivePredictorMin.bytes(),
              (int) receivePredictorMax.bytes());
    }

    this.compression = settings.getAsBoolean(SETTING_HTTP_COMPRESSION, false);
    this.compressionLevel = settings.getAsInt(SETTING_HTTP_COMPRESSION_LEVEL, 6);
    this.pipelining = settings.getAsBoolean(SETTING_PIPELINING, DEFAULT_SETTING_PIPELINING);
    this.pipeliningMaxEvents =
        settings.getAsInt(SETTING_PIPELINING_MAX_EVENTS, DEFAULT_SETTING_PIPELINING_MAX_EVENTS);

    // validate max content length
    if (maxContentLength.bytes() > Integer.MAX_VALUE) {
      logger.warn(
          "maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]");
      maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
    }
    this.maxContentLength = maxContentLength;

    logger.debug(
        "using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], receive_predictor[{}->{}], pipelining[{}], pipelining_max_events[{}]",
        maxChunkSize,
        maxHeaderSize,
        maxInitialLineLength,
        this.maxContentLength,
        receivePredictorMin,
        receivePredictorMax,
        pipelining,
        pipeliningMaxEvents);
  }
 public int numberOfReplicas() {
   return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1);
 }
 public int numberOfShards() {
   return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1);
 }
  @Inject
  public CacheRecycler(Settings settings) {
    super(settings);
    final Type type = Type.parse(settings.get("type"));
    int limit = settings.getAsInt("limit", 10);
    int smartSize = settings.getAsInt("smart_size", 1024);
    final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);

    hashMap =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<ObjectObjectOpenHashMap>() {
              @Override
              public ObjectObjectOpenHashMap newInstance(int sizing) {
                return new ObjectObjectOpenHashMap(size(sizing));
              }

              @Override
              public void clear(ObjectObjectOpenHashMap value) {
                value.clear();
              }
            });
    hashSet =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<ObjectOpenHashSet>() {
              @Override
              public ObjectOpenHashSet newInstance(int sizing) {
                return new ObjectOpenHashSet(size(sizing), 0.5f);
              }

              @Override
              public void clear(ObjectOpenHashSet value) {
                value.clear();
              }
            });
    doubleObjectMap =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<DoubleObjectOpenHashMap>() {
              @Override
              public DoubleObjectOpenHashMap newInstance(int sizing) {
                return new DoubleObjectOpenHashMap(size(sizing));
              }

              @Override
              public void clear(DoubleObjectOpenHashMap value) {
                value.clear();
              }
            });
    longObjectMap =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<LongObjectOpenHashMap>() {
              @Override
              public LongObjectOpenHashMap newInstance(int sizing) {
                return new LongObjectOpenHashMap(size(sizing));
              }

              @Override
              public void clear(LongObjectOpenHashMap value) {
                value.clear();
              }
            });
    longLongMap =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<LongLongOpenHashMap>() {
              @Override
              public LongLongOpenHashMap newInstance(int sizing) {
                return new LongLongOpenHashMap(size(sizing));
              }

              @Override
              public void clear(LongLongOpenHashMap value) {
                value.clear();
              }
            });
    intIntMap =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<IntIntOpenHashMap>() {
              @Override
              public IntIntOpenHashMap newInstance(int sizing) {
                return new IntIntOpenHashMap(size(sizing));
              }

              @Override
              public void clear(IntIntOpenHashMap value) {
                value.clear();
              }
            });
    floatIntMap =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<FloatIntOpenHashMap>() {
              @Override
              public FloatIntOpenHashMap newInstance(int sizing) {
                return new FloatIntOpenHashMap(size(sizing));
              }

              @Override
              public void clear(FloatIntOpenHashMap value) {
                value.clear();
              }
            });
    doubleIntMap =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<DoubleIntOpenHashMap>() {
              @Override
              public DoubleIntOpenHashMap newInstance(int sizing) {
                return new DoubleIntOpenHashMap(size(sizing));
              }

              @Override
              public void clear(DoubleIntOpenHashMap value) {
                value.clear();
              }
            });
    longIntMap =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<LongIntOpenHashMap>() {
              @Override
              public LongIntOpenHashMap newInstance(int sizing) {
                return new LongIntOpenHashMap(size(sizing));
              }

              @Override
              public void clear(LongIntOpenHashMap value) {
                value.clear();
              }
            });
    objectIntMap =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<ObjectIntOpenHashMap>() {
              @Override
              public ObjectIntOpenHashMap newInstance(int sizing) {
                return new ObjectIntOpenHashMap(size(sizing));
              }

              @Override
              public void clear(ObjectIntOpenHashMap value) {
                value.clear();
              }
            });
    intObjectMap =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<IntObjectOpenHashMap>() {
              @Override
              public IntObjectOpenHashMap newInstance(int sizing) {
                return new IntObjectOpenHashMap(size(sizing));
              }

              @Override
              public void clear(IntObjectOpenHashMap value) {
                value.clear();
              }
            });
    objectFloatMap =
        build(
            type,
            limit,
            smartSize,
            availableProcessors,
            new Recycler.C<ObjectFloatOpenHashMap>() {
              @Override
              public ObjectFloatOpenHashMap newInstance(int sizing) {
                return new ObjectFloatOpenHashMap(size(sizing));
              }

              @Override
              public void clear(ObjectFloatOpenHashMap value) {
                value.clear();
              }
            });
  }
    @Override
    public void onRefreshSettings(Settings settings) {
      ByteSizeValue minMergeSize =
          settings.getAsBytesSize(
              "index.merge.policy.min_merge_size",
              LogByteSizeMergePolicyProvider.this.minMergeSize);
      if (!minMergeSize.equals(LogByteSizeMergePolicyProvider.this.minMergeSize)) {
        logger.info(
            "updating min_merge_size from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.minMergeSize,
            minMergeSize);
        LogByteSizeMergePolicyProvider.this.minMergeSize = minMergeSize;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMinMergeMB(minMergeSize.mbFrac());
        }
      }

      ByteSizeValue maxMergeSize =
          settings.getAsBytesSize(
              "index.merge.policy.max_merge_size",
              LogByteSizeMergePolicyProvider.this.maxMergeSize);
      if (!maxMergeSize.equals(LogByteSizeMergePolicyProvider.this.maxMergeSize)) {
        logger.info(
            "updating max_merge_size from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.maxMergeSize,
            maxMergeSize);
        LogByteSizeMergePolicyProvider.this.maxMergeSize = maxMergeSize;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMaxMergeMB(maxMergeSize.mbFrac());
        }
      }

      int maxMergeDocs =
          settings.getAsInt(
              "index.merge.policy.max_merge_docs",
              LogByteSizeMergePolicyProvider.this.maxMergeDocs);
      if (maxMergeDocs != LogByteSizeMergePolicyProvider.this.maxMergeDocs) {
        logger.info(
            "updating max_merge_docs from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.maxMergeDocs,
            maxMergeDocs);
        LogByteSizeMergePolicyProvider.this.maxMergeDocs = maxMergeDocs;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMaxMergeDocs(maxMergeDocs);
        }
      }

      int mergeFactor =
          settings.getAsInt(
              "index.merge.policy.merge_factor", LogByteSizeMergePolicyProvider.this.mergeFactor);
      if (mergeFactor != LogByteSizeMergePolicyProvider.this.mergeFactor) {
        logger.info(
            "updating merge_factor from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.mergeFactor,
            mergeFactor);
        LogByteSizeMergePolicyProvider.this.mergeFactor = mergeFactor;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMergeFactor(mergeFactor);
        }
      }

      boolean compoundFormat =
          settings.getAsBoolean(
              "index.compound_format", LogByteSizeMergePolicyProvider.this.compoundFormat);
      if (compoundFormat != LogByteSizeMergePolicyProvider.this.compoundFormat) {
        logger.info(
            "updating index.compound_format from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.compoundFormat,
            compoundFormat);
        LogByteSizeMergePolicyProvider.this.compoundFormat = compoundFormat;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setUseCompoundFile(compoundFormat);
        }
      }
    }
    public IndexMetaData build() {
      ImmutableOpenMap.Builder<String, AliasMetaData> tmpAliases = aliases;
      Settings tmpSettings = settings;

      // update default mapping on the MappingMetaData
      if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
        MappingMetaData defaultMapping = mappings.get(MapperService.DEFAULT_MAPPING);
        for (ObjectCursor<MappingMetaData> cursor : mappings.values()) {
          cursor.value.updateDefaultMapping(defaultMapping);
        }
      }

      Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
      if (maybeNumberOfShards == null) {
        throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
      }
      int numberOfShards = maybeNumberOfShards;
      if (numberOfShards <= 0) {
        throw new IllegalArgumentException(
            "must specify positive number of shards for index [" + index + "]");
      }

      Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
      if (maybeNumberOfReplicas == null) {
        throw new IllegalArgumentException(
            "must specify numberOfReplicas for index [" + index + "]");
      }
      int numberOfReplicas = maybeNumberOfReplicas;
      if (numberOfReplicas < 0) {
        throw new IllegalArgumentException(
            "must specify non-negative number of shards for index [" + index + "]");
      }

      // fill missing slots in activeAllocationIds with empty set if needed and make all entries
      // immutable
      ImmutableOpenIntMap.Builder<Set<String>> filledActiveAllocationIds =
          ImmutableOpenIntMap.builder();
      for (int i = 0; i < numberOfShards; i++) {
        if (activeAllocationIds.containsKey(i)) {
          filledActiveAllocationIds.put(
              i, Collections.unmodifiableSet(new HashSet<>(activeAllocationIds.get(i))));
        } else {
          filledActiveAllocationIds.put(i, Collections.emptySet());
        }
      }
      final Map<String, String> requireMap =
          INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(settings).getAsMap();
      final DiscoveryNodeFilters requireFilters;
      if (requireMap.isEmpty()) {
        requireFilters = null;
      } else {
        requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
      }
      Map<String, String> includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.get(settings).getAsMap();
      final DiscoveryNodeFilters includeFilters;
      if (includeMap.isEmpty()) {
        includeFilters = null;
      } else {
        includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
      }
      Map<String, String> excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.get(settings).getAsMap();
      final DiscoveryNodeFilters excludeFilters;
      if (excludeMap.isEmpty()) {
        excludeFilters = null;
      } else {
        excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
      }
      Version indexCreatedVersion = Version.indexCreated(settings);
      Version indexUpgradedVersion =
          settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
      String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
      final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
      if (stringLuceneVersion != null) {
        try {
          minimumCompatibleLuceneVersion =
              org.apache.lucene.util.Version.parse(stringLuceneVersion);
        } catch (ParseException ex) {
          throw new IllegalStateException(
              "Cannot parse lucene version ["
                  + stringLuceneVersion
                  + "] in the ["
                  + SETTING_VERSION_MINIMUM_COMPATIBLE
                  + "] setting",
              ex);
        }
      } else {
        minimumCompatibleLuceneVersion = null;
      }

      final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
      return new IndexMetaData(
          new Index(index, uuid),
          version,
          state,
          numberOfShards,
          numberOfReplicas,
          tmpSettings,
          mappings.build(),
          tmpAliases.build(),
          customs.build(),
          filledActiveAllocationIds.build(),
          requireFilters,
          includeFilters,
          excludeFilters,
          indexCreatedVersion,
          indexUpgradedVersion,
          minimumCompatibleLuceneVersion);
    }
  @Override
  public IndexFieldData.WithOrdinals build(
      final IndexReader indexReader,
      IndexFieldData.WithOrdinals indexFieldData,
      Settings settings,
      CircuitBreakerService breakerService)
      throws IOException {
    assert indexReader.leaves().size() > 1;
    long startTime = System.currentTimeMillis();

    // It makes sense to make the overhead ratio configurable for the mapping from segment ords to
    // global ords
    // However, other mappings are never the bottleneck and only used to get the original value from
    // an ord, so
    // it makes sense to force COMPACT for them
    final float acceptableOverheadRatio =
        settings.getAsFloat("acceptable_overhead_ratio", PackedInts.FAST);
    final AppendingPackedLongBuffer globalOrdToFirstSegment =
        new AppendingPackedLongBuffer(PackedInts.COMPACT);
    final MonotonicAppendingLongBuffer globalOrdToFirstSegmentDelta =
        new MonotonicAppendingLongBuffer(PackedInts.COMPACT);

    FieldDataType fieldDataType = indexFieldData.getFieldDataType();
    int defaultThreshold =
        settings.getAsInt(
            ORDINAL_MAPPING_THRESHOLD_INDEX_SETTING_KEY, ORDINAL_MAPPING_THRESHOLD_DEFAULT);
    int threshold =
        fieldDataType.getSettings().getAsInt(ORDINAL_MAPPING_THRESHOLD_KEY, defaultThreshold);
    OrdinalMappingSourceBuilder ordinalMappingBuilder =
        new OrdinalMappingSourceBuilder(
            indexReader.leaves().size(), acceptableOverheadRatio, threshold);

    long currentGlobalOrdinal = 0;
    final AtomicFieldData.WithOrdinals[] withOrdinals =
        new AtomicFieldData.WithOrdinals[indexReader.leaves().size()];
    TermIterator termIterator =
        new TermIterator(indexFieldData, indexReader.leaves(), withOrdinals);
    for (BytesRef term = termIterator.next(); term != null; term = termIterator.next()) {
      globalOrdToFirstSegment.add(termIterator.firstReaderIndex());
      long globalOrdinalDelta = currentGlobalOrdinal - termIterator.firstLocalOrdinal();
      globalOrdToFirstSegmentDelta.add(globalOrdinalDelta);
      for (TermIterator.LeafSource leafSource : termIterator.competitiveLeafs()) {
        ordinalMappingBuilder.onOrdinal(
            leafSource.context.ord, leafSource.tenum.ord(), currentGlobalOrdinal);
      }
      currentGlobalOrdinal++;
    }

    // ram used for the globalOrd to segmentOrd and segmentOrd to firstReaderIndex lookups
    long memorySizeInBytesCounter = 0;
    globalOrdToFirstSegment.freeze();
    memorySizeInBytesCounter += globalOrdToFirstSegment.ramBytesUsed();
    globalOrdToFirstSegmentDelta.freeze();
    memorySizeInBytesCounter += globalOrdToFirstSegmentDelta.ramBytesUsed();

    final long maxOrd = currentGlobalOrdinal;
    OrdinalMappingSource[] segmentOrdToGlobalOrdLookups = ordinalMappingBuilder.build(maxOrd);
    // add ram used for the main segmentOrd to globalOrd lookups
    memorySizeInBytesCounter += ordinalMappingBuilder.getMemorySizeInBytes();

    final long memorySizeInBytes = memorySizeInBytesCounter;
    breakerService.getBreaker().addWithoutBreaking(memorySizeInBytes);

    if (logger.isDebugEnabled()) {
      // this does include the [] from the array in the impl name
      String implName = segmentOrdToGlobalOrdLookups.getClass().getSimpleName();
      logger.debug(
          "Global-ordinals[{}][{}][{}] took {} ms",
          implName,
          indexFieldData.getFieldNames().fullName(),
          maxOrd,
          (System.currentTimeMillis() - startTime));
    }
    return new InternalGlobalOrdinalsIndexFieldData(
        indexFieldData.index(),
        settings,
        indexFieldData.getFieldNames(),
        fieldDataType,
        withOrdinals,
        globalOrdToFirstSegment,
        globalOrdToFirstSegmentDelta,
        segmentOrdToGlobalOrdLookups,
        memorySizeInBytes);
  }
  @Inject
  public TomcatHttpServerTransport(
      final Settings settings,
      final Environment environment,
      final NetworkService networkService,
      final ClusterName clusterName,
      final Client client,
      final SecurityService securityService) {
    super(settings);

    this.settings = settings;
    this.securityService = securityService;

    /*
     * TODO check if keep alive is managed by tomcat copy custom headers to
     * response check that user under tomcat/ea is running is not a
     * privilieged iuser tomcat props apply: tomcat.XXX
     */

    // _aliases test with more than one index mapped to an alias

    /*
     *
     * http.max_initial_line_length not respected http.reset_cookies not
     * respected workerCount http.cors.enabled http.cors.allow-origin
     * http.cors.max-age http.cors.allow-methods http.cors.allow-headers
     *
     *
     *
     * http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/
     * modules-network.html
     *
     * http://stackoverflow.com/questions/8038718/serializing-generic-java-
     * object-to-json-using-jackson
     * http://tomcatspnegoad.sourceforge.net/realms.html
     *
     * SSL options
     *
     *
     *
     * Realm options/login waffle, spnego ... security.kerberos.provider:
     * waffle
     *
     * Hardening EA - dynamic script disable
     */

    /*
    *
    *

    *
    *
    *

    */

    useSSL =
        componentSettings.getAsBoolean(
            "ssl.enabled", settings.getAsBoolean("security.ssl.enabled", false));

    useClientAuth =
        componentSettings.getAsBoolean(
            "ssl.clientauth.enabled",
            settings.getAsBoolean("security.ssl.clientauth.enabled", false));

    kerberosMode =
        componentSettings.get("kerberos.mode", settings.get("security.kerberos.mode", "none"));

    port = componentSettings.get("port", settings.get("http.port", "8080"));
    bindHost =
        componentSettings.get(
            "bind_host", settings.get("http.bind_host", settings.get("http.host")));
    publishHost =
        componentSettings.get(
            "publish_host", settings.get("http.publish_host", settings.get("http.host")));
    this.networkService = networkService;

    ByteSizeValue maxContentLength =
        componentSettings.getAsBytesSize(
            "max_content_length",
            settings.getAsBytesSize(
                "http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB)));
    maxChunkSize =
        componentSettings.getAsBytesSize(
            "max_chunk_size",
            settings.getAsBytesSize("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
    maxHeaderSize =
        componentSettings.getAsBytesSize(
            "max_header_size",
            settings.getAsBytesSize("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB)));

    blockingServer =
        settings.getAsBoolean(
            "http.blocking_server",
            settings.getAsBoolean(TCP_BLOCKING_SERVER, settings.getAsBoolean(TCP_BLOCKING, false)));

    tcpNoDelay =
        componentSettings.getAsBoolean("tcp_no_delay", settings.getAsBoolean(TCP_NO_DELAY, true));
    tcpKeepAlive =
        componentSettings.getAsBoolean(
            "tcp_keep_alive", settings.getAsBoolean(TCP_KEEP_ALIVE, true));
    reuseAddress =
        componentSettings.getAsBoolean(
            "reuse_address",
            settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
    tcpSendBufferSize =
        componentSettings.getAsBytesSize(
            "tcp_send_buffer_size",
            settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
    tcpReceiveBufferSize =
        componentSettings.getAsBytesSize(
            "tcp_receive_buffer_size",
            settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));

    compression = settings.getAsBoolean("http.compression", false);
    compressionLevel = settings.getAsInt("http.compression_level", 6);

    // validate max content length
    if (maxContentLength.bytes() > Integer.MAX_VALUE) {
      logger.warn(
          "maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]");
      maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
    }
    this.maxContentLength = maxContentLength;

    logger.debug("port: " + port);
    logger.debug("bindHost: " + bindHost);
    logger.debug("publishHost: " + publishHost);

    logger.debug("componentsettings: " + componentSettings.getAsMap());
    logger.debug("settings: " + settings.getAsMap());
  }
  @Inject
  public CouchbaseCAPITransportImpl(
      Settings settings,
      RestController restController,
      NetworkService networkService,
      IndicesService indicesService,
      MetaDataMappingService metaDataMappingService,
      Client client) {
    super(settings);
    this.networkService = networkService;
    this.indicesService = indicesService;
    this.metaDataMappingService = metaDataMappingService;
    this.client = client;
    this.port = settings.get("couchbase.port", "9091-10091");
    this.bindHost = componentSettings.get("bind_host");
    this.publishHost = componentSettings.get("publish_host");
    this.username = settings.get("couchbase.username", "Administrator");
    this.password = settings.get("couchbase.password", "");

    this.bucketUUIDCacheEvictMs = settings.getAsLong("couchbase.bucketUUIDCacheEvictMs", 300000L);
    this.bucketUUIDCache =
        CacheBuilder.newBuilder()
            .expireAfterWrite(this.bucketUUIDCacheEvictMs, TimeUnit.MILLISECONDS)
            .build();

    int defaultNumVbuckets = 1024;
    if (System.getProperty("os.name").toLowerCase().contains("mac")) {
      logger.info("Detected platform is Mac, changing default num_vbuckets to 64");
      defaultNumVbuckets = 64;
    }
    this.numVbuckets = settings.getAsInt("couchbase.num_vbuckets", defaultNumVbuckets);

    pluginSettings = new PluginSettings();
    pluginSettings.setCheckpointDocumentType(
        settings.get(
            "couchbase.typeSelector.checkpointDocumentType",
            PluginSettings.DEFAULT_DOCUMENT_TYPE_CHECKPOINT));
    pluginSettings.setDynamicTypePath(settings.get("couchbase.dynamicTypePath"));
    pluginSettings.setResolveConflicts(settings.getAsBoolean("couchbase.resolveConflicts", true));
    pluginSettings.setWrapCounters(settings.getAsBoolean("couchbase.wrapCounters", false));
    pluginSettings.setMaxConcurrentRequests(
        settings.getAsLong("couchbase.maxConcurrentRequests", 1024L));
    pluginSettings.setBulkIndexRetries(settings.getAsLong("couchbase.bulkIndexRetries", 10L));
    pluginSettings.setBulkIndexRetryWaitMs(
        settings.getAsLong("couchbase.bulkIndexRetryWaitMs", 1000L));
    pluginSettings.setIgnoreDeletes(
        new ArrayList<String>(
            Arrays.asList(settings.get("couchbase.ignoreDeletes", "").split("[:,;\\s]"))));
    pluginSettings.getIgnoreDeletes().removeAll(Arrays.asList("", null));
    pluginSettings.setIgnoreFailures(settings.getAsBoolean("couchbase.ignoreFailures", false));
    pluginSettings.setDocumentTypeRoutingFields(
        settings.getByPrefix("couchbase.documentTypeRoutingFields.").getAsMap());
    pluginSettings.setIgnoreDotIndexes(settings.getAsBoolean("couchbase.ignoreDotIndexes", true));
    pluginSettings.setIncludeIndexes(
        new ArrayList<String>(
            Arrays.asList(settings.get("couchbase.includeIndexes", "").split("[:,;\\s]"))));
    pluginSettings.getIncludeIndexes().removeAll(Arrays.asList("", null));

    TypeSelector typeSelector;
    Class<? extends TypeSelector> typeSelectorClass =
        settings.<TypeSelector>getAsClass("couchbase.typeSelector", DefaultTypeSelector.class);
    try {
      typeSelector = typeSelectorClass.newInstance();
    } catch (Exception e) {
      throw new ElasticsearchException("couchbase.typeSelector", e);
    }
    typeSelector.configure(settings);
    pluginSettings.setTypeSelector(typeSelector);

    ParentSelector parentSelector;
    Class<? extends ParentSelector> parentSelectorClass =
        settings.<ParentSelector>getAsClass(
            "couchbase.parentSelector", DefaultParentSelector.class);
    try {
      parentSelector = parentSelectorClass.newInstance();
    } catch (Exception e) {
      throw new ElasticsearchException("couchbase.parentSelector", e);
    }
    parentSelector.configure(settings);
    pluginSettings.setParentSelector(parentSelector);

    KeyFilter keyFilter;
    Class<? extends KeyFilter> keyFilterClass =
        settings.<KeyFilter>getAsClass("couchbase.keyFilter", DefaultKeyFilter.class);
    try {
      keyFilter = keyFilterClass.newInstance();
    } catch (Exception e) {
      throw new ElasticsearchException("couchbase.keyFilter", e);
    }
    keyFilter.configure(settings);
    pluginSettings.setKeyFilter(keyFilter);

    // Log settings info
    logger.info(
        "Couchbase transport will ignore delete/expiration operations for these buckets: {}",
        pluginSettings.getIgnoreDeletes());
    logger.info(
        "Couchbase transport will ignore indexing failures and not throw exception to Couchbase: {}",
        pluginSettings.getIgnoreFailures());
    logger.info(
        "Couchbase transport is using type selector: {}",
        typeSelector.getClass().getCanonicalName());
    logger.info(
        "Couchbase transport is using parent selector: {}",
        parentSelector.getClass().getCanonicalName());
    logger.info(
        "Couchbase transport is using key filter: {}", keyFilter.getClass().getCanonicalName());
    for (String key : pluginSettings.getDocumentTypeRoutingFields().keySet()) {
      String routingField = pluginSettings.getDocumentTypeRoutingFields().get(key);
      logger.info("Using field {} as routing for type {}", routingField, key);
    }
    logger.info("Plugin Settings: {}", pluginSettings.toString());
  }
  @Inject
  @SuppressForbidden(reason = "System.out.*")
  public NodeEnvironment(Settings settings, Environment environment) throws IOException {
    super(settings);

    this.addNodeId = settings.getAsBoolean(ADD_NODE_ID_TO_CUSTOM_PATH, true);
    this.customPathsEnabled = settings.getAsBoolean(SETTING_CUSTOM_DATA_PATH_ENABLED, false);

    if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) {
      nodePaths = null;
      locks = null;
      localNodeId = -1;
      return;
    }

    final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length];
    final Lock[] locks = new Lock[nodePaths.length];

    int localNodeId = -1;
    IOException lastException = null;
    int maxLocalStorageNodes = settings.getAsInt("node.max_local_storage_nodes", 50);
    for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) {
      for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) {
        Path dir =
            environment
                .dataWithClusterFiles()[dirIndex]
                .resolve(NODES_FOLDER)
                .resolve(Integer.toString(possibleLockId));
        Files.createDirectories(dir);

        try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
          logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
          try {
            locks[dirIndex] = Lucene.acquireLock(luceneDir, NODE_LOCK_FILENAME, 0);
            nodePaths[dirIndex] = new NodePath(dir, environment);
            localNodeId = possibleLockId;
          } catch (LockObtainFailedException ex) {
            logger.trace("failed to obtain node lock on {}", dir.toAbsolutePath());
            // release all the ones that were obtained up until now
            releaseAndNullLocks(locks);
            break;
          }

        } catch (IOException e) {
          logger.trace("failed to obtain node lock on {}", e, dir.toAbsolutePath());
          lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
          // release all the ones that were obtained up until now
          releaseAndNullLocks(locks);
          break;
        }
      }
      if (locks[0] != null) {
        // we found a lock, break
        break;
      }
    }

    if (locks[0] == null) {
      throw new IllegalStateException(
          "Failed to obtain node lock, is the following location writable?: "
              + Arrays.toString(environment.dataWithClusterFiles()),
          lastException);
    }

    this.localNodeId = localNodeId;
    this.locks = locks;
    this.nodePaths = nodePaths;

    if (logger.isDebugEnabled()) {
      logger.debug("using node location [{}], local_node_id [{}]", nodePaths, localNodeId);
    }

    maybeLogPathDetails();

    if (settings.getAsBoolean(SETTING_ENABLE_LUCENE_SEGMENT_INFOS_TRACE, false)) {
      SegmentInfos.setInfoStream(System.out);
    }
  }
    @Override
    public void onRefreshSettings(Settings settings) {
      double expungeDeletesPctAllowed =
          settings.getAsDouble(
              "index.merge.policy.expunge_deletes_allowed",
              TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed);
      if (expungeDeletesPctAllowed != TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed) {
        logger.info(
            "updating [expunge_deletes_allowed] from [{}] to [{}]",
            TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed,
            expungeDeletesPctAllowed);
        TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed = expungeDeletesPctAllowed;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setForceMergeDeletesPctAllowed(expungeDeletesPctAllowed);
        }
      }

      ByteSizeValue floorSegment =
          settings.getAsBytesSize(
              "index.merge.policy.floor_segment", TieredMergePolicyProvider.this.floorSegment);
      if (!floorSegment.equals(TieredMergePolicyProvider.this.floorSegment)) {
        logger.info(
            "updating [floor_segment] from [{}] to [{}]",
            TieredMergePolicyProvider.this.floorSegment,
            floorSegment);
        TieredMergePolicyProvider.this.floorSegment = floorSegment;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setFloorSegmentMB(floorSegment.mbFrac());
        }
      }

      int maxMergeAtOnce =
          settings.getAsInt(
              "index.merge.policy.max_merge_at_once",
              TieredMergePolicyProvider.this.maxMergeAtOnce);
      if (maxMergeAtOnce != TieredMergePolicyProvider.this.maxMergeAtOnce) {
        logger.info(
            "updating [max_merge_at_once] from [{}] to [{}]",
            TieredMergePolicyProvider.this.maxMergeAtOnce,
            maxMergeAtOnce);
        TieredMergePolicyProvider.this.maxMergeAtOnce = maxMergeAtOnce;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setMaxMergeAtOnce(maxMergeAtOnce);
        }
      }

      int maxMergeAtOnceExplicit =
          settings.getAsInt(
              "index.merge.policy.max_merge_at_once_explicit",
              TieredMergePolicyProvider.this.maxMergeAtOnceExplicit);
      if (maxMergeAtOnceExplicit != TieredMergePolicyProvider.this.maxMergeAtOnceExplicit) {
        logger.info(
            "updating [max_merge_at_once_explicit] from [{}] to [{}]",
            TieredMergePolicyProvider.this.maxMergeAtOnceExplicit,
            maxMergeAtOnceExplicit);
        TieredMergePolicyProvider.this.maxMergeAtOnceExplicit = maxMergeAtOnceExplicit;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
        }
      }

      ByteSizeValue maxMergedSegment =
          settings.getAsBytesSize(
              "index.merge.policy.max_merged_segment",
              TieredMergePolicyProvider.this.maxMergedSegment);
      if (!maxMergedSegment.equals(TieredMergePolicyProvider.this.maxMergedSegment)) {
        logger.info(
            "updating [max_merged_segment] from [{}] to [{}]",
            TieredMergePolicyProvider.this.maxMergedSegment,
            maxMergedSegment);
        TieredMergePolicyProvider.this.maxMergedSegment = maxMergedSegment;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setFloorSegmentMB(maxMergedSegment.mbFrac());
        }
      }

      double segmentsPerTier =
          settings.getAsDouble(
              "index.merge.policy.segments_per_tier",
              TieredMergePolicyProvider.this.segmentsPerTier);
      if (segmentsPerTier != TieredMergePolicyProvider.this.segmentsPerTier) {
        logger.info(
            "updating [segments_per_tier] from [{}] to [{}]",
            TieredMergePolicyProvider.this.segmentsPerTier,
            segmentsPerTier);
        TieredMergePolicyProvider.this.segmentsPerTier = segmentsPerTier;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setSegmentsPerTier(segmentsPerTier);
        }
      }

      double reclaimDeletesWeight =
          settings.getAsDouble(
              "index.merge.policy.reclaim_deletes_weight",
              TieredMergePolicyProvider.this.reclaimDeletesWeight);
      if (reclaimDeletesWeight != TieredMergePolicyProvider.this.reclaimDeletesWeight) {
        logger.info(
            "updating [reclaim_deletes_weight] from [{}] to [{}]",
            TieredMergePolicyProvider.this.reclaimDeletesWeight,
            reclaimDeletesWeight);
        TieredMergePolicyProvider.this.reclaimDeletesWeight = reclaimDeletesWeight;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setReclaimDeletesWeight(reclaimDeletesWeight);
        }
      }

      boolean compoundFormat =
          settings.getAsBoolean(
              "index.compound_format", TieredMergePolicyProvider.this.compoundFormat);
      if (compoundFormat != TieredMergePolicyProvider.this.compoundFormat) {
        logger.info(
            "updating index.compound_format from [{}] to [{}]",
            TieredMergePolicyProvider.this.compoundFormat,
            compoundFormat);
        TieredMergePolicyProvider.this.compoundFormat = compoundFormat;
        for (CustomTieredMergePolicyProvider policy : policies) {
          policy.setUseCompoundFile(compoundFormat);
        }
      }

      fixSettingsIfNeeded();
    }