@Override
 ScalingExecutorSettings getSettings(Settings settings) {
   final String nodeName = Node.NODE_NAME_SETTING.get(settings);
   final int coreThreads = coreSetting.get(settings);
   final int maxThreads = maxSetting.get(settings);
   final TimeValue keepAlive = keepAliveSetting.get(settings);
   return new ScalingExecutorSettings(nodeName, coreThreads, maxThreads, keepAlive);
 }
 public static final class TcpSettings {
   public static final Setting<Boolean> TCP_NO_DELAY =
       Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER);
   public static final Setting<Boolean> TCP_KEEP_ALIVE =
       Setting.boolSetting("network.tcp.keep_alive", true, false, Setting.Scope.CLUSTER);
   public static final Setting<Boolean> TCP_REUSE_ADDRESS =
       Setting.boolSetting(
           "network.tcp.reuse_address",
           NetworkUtils.defaultReuseAddress(),
           false,
           Setting.Scope.CLUSTER);
   public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE =
       Setting.byteSizeSetting(
           "network.tcp.send_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
   public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE =
       Setting.byteSizeSetting(
           "network.tcp.receive_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
   public static final Setting<Boolean> TCP_BLOCKING =
       Setting.boolSetting("network.tcp.blocking", false, false, Setting.Scope.CLUSTER);
   public static final Setting<Boolean> TCP_BLOCKING_SERVER =
       Setting.boolSetting(
           "network.tcp.blocking_server", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
   public static final Setting<Boolean> TCP_BLOCKING_CLIENT =
       Setting.boolSetting(
           "network.tcp.blocking_client", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
   public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT =
       Setting.timeSetting(
           "network.tcp.connect_timeout",
           new TimeValue(30, TimeUnit.SECONDS),
           false,
           Setting.Scope.CLUSTER);
 }
 public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
   super(settings);
   setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.get(settings));
   setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.get(settings));
   setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.get(settings));
   clusterSettings.addSettingsUpdateConsumer(
       CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters);
   clusterSettings.addSettingsUpdateConsumer(
       CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters);
   clusterSettings.addSettingsUpdateConsumer(
       CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters);
 }
 public Supplier<Transport> getTransportSupplier() {
   final String name;
   if (TRANSPORT_TYPE_SETTING.exists(settings)) {
     name = TRANSPORT_TYPE_SETTING.get(settings);
   } else {
     name = TRANSPORT_DEFAULT_TYPE_SETTING.get(settings);
   }
   final Supplier<Transport> factory = transportFactories.get(name);
   if (factory == null) {
     throw new IllegalStateException("Unsupported transport.type [" + name + "]");
   }
   return factory;
 }
 public Supplier<HttpServerTransport> getHttpServerTransportSupplier() {
   final String name;
   if (HTTP_TYPE_SETTING.exists(settings)) {
     name = HTTP_TYPE_SETTING.get(settings);
   } else {
     name = HTTP_DEFAULT_TYPE_SETTING.get(settings);
   }
   final Supplier<HttpServerTransport> factory = transportHttpFactories.get(name);
   if (factory == null) {
     throw new IllegalStateException("Unsupported http.type [" + name + "]");
   }
   return factory;
 }
 /**
  * Construct a scaling executor builder; the settings will have the specified key prefix.
  *
  * @param name the name of the executor
  * @param core the minimum number of threads in the pool
  * @param max the maximum number of threads in the pool
  * @param keepAlive the time that spare threads above {@code core} threads will be kept alive
  * @param prefix the prefix for the settings keys
  */
 public ScalingExecutorBuilder(
     final String name,
     final int core,
     final int max,
     final TimeValue keepAlive,
     final String prefix) {
   super(name);
   this.coreSetting =
       Setting.intSetting(settingsKey(prefix, "core"), core, Setting.Property.NodeScope);
   this.maxSetting =
       Setting.intSetting(settingsKey(prefix, "max"), max, Setting.Property.NodeScope);
   this.keepAliveSetting =
       Setting.timeSetting(
           settingsKey(prefix, "keep_alive"), keepAlive, Setting.Property.NodeScope);
 }
 @Inject
 public ElectMasterService(Settings settings, Version version) {
   super(settings);
   this.minMasterVersion = version.minimumCompatibilityVersion();
   this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings);
   logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes);
 }
/**
 * A base class for read operations that needs to be performed on the master node. Can also be
 * executed on the local node if needed.
 */
public abstract class TransportMasterNodeReadAction<
        Request extends MasterNodeReadRequest<Request>, Response extends ActionResponse>
    extends TransportMasterNodeAction<Request, Response> {

  public static final Setting<Boolean> FORCE_LOCAL_SETTING =
      Setting.boolSetting("action.master.force_local", false, Property.NodeScope);

  private final boolean forceLocal;

  protected TransportMasterNodeReadAction(
      Settings settings,
      String actionName,
      TransportService transportService,
      ClusterService clusterService,
      ThreadPool threadPool,
      ActionFilters actionFilters,
      IndexNameExpressionResolver indexNameExpressionResolver,
      Supplier<Request> request) {
    super(
        settings,
        actionName,
        transportService,
        clusterService,
        threadPool,
        actionFilters,
        indexNameExpressionResolver,
        request);
    this.forceLocal = FORCE_LOCAL_SETTING.get(settings);
  }

  @Override
  protected final boolean localExecute(Request request) {
    return forceLocal || request.local();
  }
}
 public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
   super(settings);
   this.clusterConcurrentRebalance =
       CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings);
   logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance);
   clusterSettings.addSettingsUpdateConsumer(
       CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
       this::setClusterConcurrentRebalance);
 }
Esempio n. 10
0
  public ProcessService(Settings settings) {
    super(settings);
    this.probe = ProcessProbe.getInstance();

    final TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
    processStatsCache = new ProcessStatsCache(refreshInterval, probe.processStats());
    this.info = probe.processInfo();
    this.info.refreshInterval = refreshInterval.millis();
    logger.debug("using refresh_interval [{}]", refreshInterval);
  }
  public FaultDetection(
      Settings settings,
      ThreadPool threadPool,
      TransportService transportService,
      ClusterName clusterName) {
    super(settings);
    this.threadPool = threadPool;
    this.transportService = transportService;
    this.clusterName = clusterName;

    this.connectOnNetworkDisconnect = CONNECT_ON_NETWORK_DISCONNECT_SETTING.get(settings);
    this.pingInterval = PING_INTERVAL_SETTING.get(settings);
    this.pingRetryTimeout = PING_TIMEOUT_SETTING.get(settings);
    this.pingRetryCount = PING_RETRIES_SETTING.get(settings);
    this.registerConnectionListener = REGISTER_CONNECTION_LISTENER_SETTING.get(settings);

    this.connectionListener = new FDConnectionListener();
    if (registerConnectionListener) {
      transportService.addConnectionListener(connectionListener);
    }
  }
Esempio n. 12
0
 public HunspellService(
     final Settings settings,
     final Environment env,
     final Map<String, Dictionary> knownDictionaries)
     throws IOException {
   super(settings);
   this.knownDictionaries = Collections.unmodifiableMap(knownDictionaries);
   this.hunspellDir = resolveHunspellDirectory(env);
   this.defaultIgnoreCase = HUNSPELL_IGNORE_CASE.get(settings);
   this.loadingFunction =
       (locale) -> {
         try {
           return loadDictionary(locale, settings, env);
         } catch (Throwable e) {
           throw new IllegalStateException(
               "failed to load hunspell dictionary for locale: " + locale, e);
         }
       };
   if (!HUNSPELL_LAZY_LOAD.get(settings)) {
     scanAndLoadDictionaries();
   }
 }
Esempio n. 13
0
  /**
   * Loads the hunspell dictionary for the given local.
   *
   * @param locale The locale of the hunspell dictionary to be loaded.
   * @param nodeSettings The node level settings
   * @param env The node environment (from which the conf path will be resolved)
   * @return The loaded Hunspell dictionary
   * @throws Exception when loading fails (due to IO errors or malformed dictionary files)
   */
  private Dictionary loadDictionary(String locale, Settings nodeSettings, Environment env)
      throws Exception {
    if (logger.isDebugEnabled()) {
      logger.debug("Loading hunspell dictionary [{}]...", locale);
    }
    Path dicDir = hunspellDir.resolve(locale);
    if (FileSystemUtils.isAccessibleDirectory(dicDir, logger) == false) {
      throw new ElasticsearchException(
          String.format(Locale.ROOT, "Could not find hunspell dictionary [%s]", locale));
    }

    // merging node settings with hunspell dictionary specific settings
    Settings dictSettings = HUNSPELL_DICTIONARY_OPTIONS.get(nodeSettings);
    nodeSettings = loadDictionarySettings(dicDir, dictSettings.getByPrefix(locale));

    boolean ignoreCase = nodeSettings.getAsBoolean("ignore_case", defaultIgnoreCase);

    Path[] affixFiles = FileSystemUtils.files(dicDir, "*.aff");
    if (affixFiles.length == 0) {
      throw new ElasticsearchException(
          String.format(Locale.ROOT, "Missing affix file for hunspell dictionary [%s]", locale));
    }
    if (affixFiles.length != 1) {
      throw new ElasticsearchException(
          String.format(
              Locale.ROOT, "Too many affix files exist for hunspell dictionary [%s]", locale));
    }
    InputStream affixStream = null;

    Path[] dicFiles = FileSystemUtils.files(dicDir, "*.dic");
    List<InputStream> dicStreams = new ArrayList<>(dicFiles.length);
    try {

      for (int i = 0; i < dicFiles.length; i++) {
        dicStreams.add(Files.newInputStream(dicFiles[i]));
      }

      affixStream = Files.newInputStream(affixFiles[0]);

      try (Directory tmp = new SimpleFSDirectory(env.tmpFile())) {
        return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase);
      }

    } catch (Exception e) {
      logger.error("Could not load hunspell dictionary [{}]", e, locale);
      throw e;
    } finally {
      IOUtils.close(affixStream);
      IOUtils.close(dicStreams);
    }
  }
/**
 * Similar to the {@link ClusterRebalanceAllocationDecider} this {@link AllocationDecider} controls
 * the number of currently in-progress re-balance (relocation) operations and restricts node
 * allocations if the configured threshold is reached. The default number of concurrent rebalance
 * operations is set to <tt>2</tt>
 *
 * <p>Re-balance operations can be controlled in real-time via the cluster update API using
 * <tt>cluster.routing.allocation.cluster_concurrent_rebalance</tt>. Iff this setting is set to
 * <tt>-1</tt> the number of concurrent re-balance operations are unlimited.
 */
public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {

  public static final String NAME = "concurrent_rebalance";

  public static final Setting<Integer>
      CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING =
          Setting.intSetting(
              "cluster.routing.allocation.cluster_concurrent_rebalance",
              2,
              -1,
              Property.Dynamic,
              Property.NodeScope);
  private volatile int clusterConcurrentRebalance;

  public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
    super(settings);
    this.clusterConcurrentRebalance =
        CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings);
    logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance);
    clusterSettings.addSettingsUpdateConsumer(
        CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
        this::setClusterConcurrentRebalance);
  }

  private void setClusterConcurrentRebalance(int concurrentRebalance) {
    clusterConcurrentRebalance = concurrentRebalance;
  }

  @Override
  public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
    if (clusterConcurrentRebalance == -1) {
      return allocation.decision(Decision.YES, NAME, "unlimited concurrent rebalances are allowed");
    }
    int relocatingShards = allocation.routingNodes().getRelocatingShardCount();
    if (relocatingShards >= clusterConcurrentRebalance) {
      return allocation.decision(
          Decision.NO,
          NAME,
          "too many shards are concurrently rebalancing [%d], limit: [%d]",
          relocatingShards,
          clusterConcurrentRebalance);
    }
    return allocation.decision(
        Decision.YES,
        NAME,
        "below threshold [%d] for concurrent rebalances, current rebalance shard count [%d]",
        clusterConcurrentRebalance,
        relocatingShards);
  }
}
Esempio n. 15
0
  @Inject
  public GatewayService(
      Settings settings,
      AllocationService allocationService,
      ClusterService clusterService,
      ThreadPool threadPool,
      GatewayMetaState metaState,
      TransportNodesListGatewayMetaState listGatewayMetaState,
      Discovery discovery,
      IndicesService indicesService) {
    super(settings);
    this.gateway =
        new Gateway(
            settings, clusterService, metaState, listGatewayMetaState, discovery, indicesService);
    this.allocationService = allocationService;
    this.clusterService = clusterService;
    this.threadPool = threadPool;
    // allow to control a delay of when indices will get created
    this.expectedNodes = EXPECTED_NODES_SETTING.get(this.settings);
    this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(this.settings);
    this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(this.settings);

    if (RECOVER_AFTER_TIME_SETTING.exists(this.settings)) {
      recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(this.settings);
    } else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) {
      recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET;
    } else {
      recoverAfterTime = null;
    }
    this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(this.settings);
    this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(this.settings);
    // default the recover after master nodes to the minimum master nodes in the discovery
    if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(this.settings)) {
      recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(this.settings);
    } else {
      // TODO: change me once the minimum_master_nodes is changed too
      recoverAfterMasterNodes = settings.getAsInt("discovery.zen.minimum_master_nodes", -1);
    }

    // Add the not recovered as initial state block, we don't allow anything until
    this.clusterService.addInitialStateBlock(STATE_NOT_RECOVERED_BLOCK);
  }
 /**
  * Resolve the custom path for a index's shard. Uses the {@code IndexMetaData.SETTING_DATA_PATH}
  * setting to determine the root path for the index.
  *
  * @param indexSettings settings for the index
  */
 public Path resolveBaseCustomLocation(IndexSettings indexSettings) {
   String customDataDir = indexSettings.customDataPath();
   if (customDataDir != null) {
     // This assert is because this should be caught by MetaDataCreateIndexService
     assert sharedDataPath != null;
     if (ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.get(indexSettings.getNodeSettings())) {
       return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(this.nodeLockId));
     } else {
       return sharedDataPath.resolve(customDataDir);
     }
   } else {
     throw new IllegalArgumentException(
         "no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available");
   }
 }
 public PercolatorFieldMapper(
     String simpleName,
     MappedFieldType fieldType,
     MappedFieldType defaultFieldType,
     Settings indexSettings,
     MultiFields multiFields,
     CopyTo copyTo,
     QueryShardContext queryShardContext,
     KeywordFieldMapper queryTermsField,
     KeywordFieldMapper extractionResultField,
     BinaryFieldMapper queryBuilderField) {
   super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
   this.queryShardContext = queryShardContext;
   this.queryTermsField = queryTermsField;
   this.extractionResultField = extractionResultField;
   this.queryBuilderField = queryBuilderField;
   this.mapUnmappedFieldAsString = INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings);
 }
 @Inject
 public IndicesTTLService(
     Settings settings,
     ClusterService clusterService,
     IndicesService indicesService,
     ClusterSettings clusterSettings,
     TransportBulkAction bulkAction) {
   super(settings);
   this.clusterService = clusterService;
   this.indicesService = indicesService;
   TimeValue interval = INDICES_TTL_INTERVAL_SETTING.get(settings);
   this.bulkAction = bulkAction;
   this.bulkSize = this.settings.getAsInt("indices.ttl.bulk_size", 10000);
   this.purgerThread =
       new PurgerThread(EsExecutors.threadName(settings, "[ttl_expire]"), interval);
   clusterSettings.addSettingsUpdateConsumer(
       INDICES_TTL_INTERVAL_SETTING, this.purgerThread::resetInterval);
 }
 /**
  * Creates a network module that custom networking classes can be plugged into.
  *
  * @param settings The settings for the node
  * @param transportClient True if only transport classes should be allowed to be registered, false
  *     otherwise.
  */
 public NetworkModule(
     Settings settings,
     boolean transportClient,
     List<NetworkPlugin> plugins,
     ThreadPool threadPool,
     BigArrays bigArrays,
     CircuitBreakerService circuitBreakerService,
     NamedWriteableRegistry namedWriteableRegistry,
     NetworkService networkService) {
   this.settings = settings;
   this.transportClient = transportClient;
   for (NetworkPlugin plugin : plugins) {
     if (transportClient == false && HTTP_ENABLED.get(settings)) {
       Map<String, Supplier<HttpServerTransport>> httpTransportFactory =
           plugin.getHttpTransports(
               settings,
               threadPool,
               bigArrays,
               circuitBreakerService,
               namedWriteableRegistry,
               networkService);
       for (Map.Entry<String, Supplier<HttpServerTransport>> entry :
           httpTransportFactory.entrySet()) {
         registerHttpTransport(entry.getKey(), entry.getValue());
       }
     }
     Map<String, Supplier<Transport>> httpTransportFactory =
         plugin.getTransports(
             settings,
             threadPool,
             bigArrays,
             circuitBreakerService,
             namedWriteableRegistry,
             networkService);
     for (Map.Entry<String, Supplier<Transport>> entry : httpTransportFactory.entrySet()) {
       registerTransport(entry.getKey(), entry.getValue());
     }
     List<TransportInterceptor> transportInterceptors =
         plugin.getTransportInterceptors(namedWriteableRegistry);
     for (TransportInterceptor interceptor : transportInterceptors) {
       registerTransportInterceptor(interceptor);
     }
   }
 }
Esempio n. 20
0
public final class ProcessService extends AbstractComponent {

  private final ProcessProbe probe;
  private final ProcessInfo info;
  private final SingleObjectCache<ProcessStats> processStatsCache;

  public static final Setting<TimeValue> REFRESH_INTERVAL_SETTING =
      Setting.timeSetting(
          "monitor.process.refresh_interval",
          TimeValue.timeValueSeconds(1),
          TimeValue.timeValueSeconds(1),
          Property.NodeScope);

  public ProcessService(Settings settings) {
    super(settings);
    this.probe = ProcessProbe.getInstance();

    final TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
    processStatsCache = new ProcessStatsCache(refreshInterval, probe.processStats());
    this.info = probe.processInfo();
    this.info.refreshInterval = refreshInterval.millis();
    logger.debug("using refresh_interval [{}]", refreshInterval);
  }

  public ProcessInfo info() {
    return this.info;
  }

  public ProcessStats stats() {
    return processStatsCache.getOrRefresh();
  }

  private class ProcessStatsCache extends SingleObjectCache<ProcessStats> {
    public ProcessStatsCache(TimeValue interval, ProcessStats initValue) {
      super(interval, initValue);
    }

    @Override
    protected ProcessStats refresh() {
      return probe.processStats();
    }
  }
}
  public ClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) {
    super(settings);
    this.operationRouting = new OperationRouting(settings, clusterSettings);
    this.threadPool = threadPool;
    this.clusterSettings = clusterSettings;
    this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
    // will be replaced on doStart.
    this.clusterState = ClusterState.builder(clusterName).build();

    this.clusterSettings.addSettingsUpdateConsumer(
        CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold);

    this.slowTaskLoggingThreshold =
        CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings);

    localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);

    initialBlocks = ClusterBlocks.builder();
  }
 protected TransportMasterNodeReadAction(
     Settings settings,
     String actionName,
     TransportService transportService,
     ClusterService clusterService,
     ThreadPool threadPool,
     ActionFilters actionFilters,
     IndexNameExpressionResolver indexNameExpressionResolver,
     Supplier<Request> request) {
   super(
       settings,
       actionName,
       transportService,
       clusterService,
       threadPool,
       actionFilters,
       indexNameExpressionResolver,
       request);
   this.forceLocal = FORCE_LOCAL_SETTING.get(settings);
 }
  @Inject
  public RecoverySettings(Settings settings, ClusterSettings clusterSettings) {
    super(settings);

    this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings);
    // doesn't have to be fast as nodes are reconnected every 10s by default (see
    // InternalClusterService.ReconnectToNodes)
    // and we want to give the master time to remove a faulty node
    this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings);

    this.internalActionTimeout = INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(settings);
    this.internalActionLongTimeout =
        INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.get(settings);

    this.activityTimeout = INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.get(settings);
    this.maxBytesPerSec = INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.get(settings);
    if (maxBytesPerSec.getBytes() <= 0) {
      rateLimiter = null;
    } else {
      rateLimiter = new SimpleRateLimiter(maxBytesPerSec.getMbFrac());
    }

    logger.debug("using max_bytes_per_sec[{}]", maxBytesPerSec);

    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, this::setInternalActionLongTimeout);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout);
  }
Esempio n. 24
0
 /**
  * Returns the number of available processors. Defaults to {@link Runtime#availableProcessors()}
  * but can be overridden by passing a {@link Settings} instance with the key "processors" set to
  * the desired value.
  *
  * @param settings a {@link Settings} instance from which to derive the available processors
  * @return the number of available processors
  */
 public static int numberOfProcessors(final Settings settings) {
   return PROCESSORS_SETTING.get(settings);
 }
Esempio n. 25
0
public class EsExecutors {

  /**
   * Settings key to manually set the number of available processors. This is used to adjust thread
   * pools sizes etc. per node.
   */
  public static final Setting<Integer> PROCESSORS_SETTING =
      Setting.intSetting(
          "processors", Runtime.getRuntime().availableProcessors(), 1, Property.NodeScope);

  /**
   * Returns the number of available processors. Defaults to {@link Runtime#availableProcessors()}
   * but can be overridden by passing a {@link Settings} instance with the key "processors" set to
   * the desired value.
   *
   * @param settings a {@link Settings} instance from which to derive the available processors
   * @return the number of available processors
   */
  public static int numberOfProcessors(final Settings settings) {
    return PROCESSORS_SETTING.get(settings);
  }

  public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(
      String name, ThreadFactory threadFactory, ThreadContext contextHolder) {
    return new PrioritizedEsThreadPoolExecutor(
        name, 1, 1, 0L, TimeUnit.MILLISECONDS, threadFactory, contextHolder);
  }

  public static EsThreadPoolExecutor newScaling(
      String name,
      int min,
      int max,
      long keepAliveTime,
      TimeUnit unit,
      ThreadFactory threadFactory,
      ThreadContext contextHolder) {
    ExecutorScalingQueue<Runnable> queue = new ExecutorScalingQueue<>();
    EsThreadPoolExecutor executor =
        new EsThreadPoolExecutor(
            name,
            min,
            max,
            keepAliveTime,
            unit,
            queue,
            threadFactory,
            new ForceQueuePolicy(),
            contextHolder);
    queue.executor = executor;
    return executor;
  }

  public static EsThreadPoolExecutor newFixed(
      String name,
      int size,
      int queueCapacity,
      ThreadFactory threadFactory,
      ThreadContext contextHolder) {
    BlockingQueue<Runnable> queue;
    if (queueCapacity < 0) {
      queue = ConcurrentCollections.newBlockingQueue();
    } else {
      queue =
          new SizeBlockingQueue<>(
              ConcurrentCollections.<Runnable>newBlockingQueue(), queueCapacity);
    }
    return new EsThreadPoolExecutor(
        name,
        size,
        size,
        0,
        TimeUnit.MILLISECONDS,
        queue,
        threadFactory,
        new EsAbortPolicy(),
        contextHolder);
  }

  public static String threadName(Settings settings, String... names) {
    String namePrefix =
        Arrays.stream(names)
            .filter(name -> name != null)
            .collect(Collectors.joining(".", "[", "]"));
    return threadName(settings, namePrefix);
  }

  public static String threadName(Settings settings, String namePrefix) {
    if (Node.NODE_NAME_SETTING.exists(settings)) {
      return threadName(Node.NODE_NAME_SETTING.get(settings), namePrefix);
    } else {
      return threadName("", namePrefix);
    }
  }

  public static String threadName(final String nodeName, final String namePrefix) {
    return "elasticsearch"
        + (nodeName.isEmpty() ? "" : "[")
        + nodeName
        + (nodeName.isEmpty() ? "" : "]")
        + "["
        + namePrefix
        + "]";
  }

  public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) {
    return daemonThreadFactory(threadName(settings, namePrefix));
  }

  public static ThreadFactory daemonThreadFactory(Settings settings, String... names) {
    return daemonThreadFactory(threadName(settings, names));
  }

  public static ThreadFactory daemonThreadFactory(String namePrefix) {
    return new EsThreadFactory(namePrefix);
  }

  static class EsThreadFactory implements ThreadFactory {

    final ThreadGroup group;
    final AtomicInteger threadNumber = new AtomicInteger(1);
    final String namePrefix;

    public EsThreadFactory(String namePrefix) {
      this.namePrefix = namePrefix;
      SecurityManager s = System.getSecurityManager();
      group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup();
    }

    @Override
    public Thread newThread(Runnable r) {
      Thread t = new Thread(group, r, namePrefix + "[T#" + threadNumber.getAndIncrement() + "]", 0);
      t.setDaemon(true);
      return t;
    }
  }

  /** Cannot instantiate. */
  private EsExecutors() {}

  static class ExecutorScalingQueue<E> extends LinkedTransferQueue<E> {

    ThreadPoolExecutor executor;

    public ExecutorScalingQueue() {}

    @Override
    public boolean offer(E e) {
      // first try to transfer to a waiting worker thread
      if (!tryTransfer(e)) {
        // check if there might be spare capacity in the thread
        // pool executor
        int left = executor.getMaximumPoolSize() - executor.getCorePoolSize();
        if (left > 0) {
          // reject queuing the task to force the thread pool
          // executor to add a worker if it can; combined
          // with ForceQueuePolicy, this causes the thread
          // pool to always scale up to max pool size and we
          // only queue when there is no spare capacity
          return false;
        } else {
          return super.offer(e);
        }
      } else {
        return true;
      }
    }
  }

  /**
   * A handler for rejected tasks that adds the specified element to this queue, waiting if
   * necessary for space to become available.
   */
  static class ForceQueuePolicy implements XRejectedExecutionHandler {
    @Override
    public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
      try {
        executor.getQueue().put(r);
      } catch (InterruptedException e) {
        // should never happen since we never wait
        throw new EsRejectedExecutionException(e);
      }
    }

    @Override
    public long rejected() {
      return 0;
    }
  }
}
Esempio n. 26
0
public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener {

  public static final Setting<Integer> EXPECTED_NODES_SETTING =
      Setting.intSetting("gateway.expected_nodes", -1, -1, Property.NodeScope);
  public static final Setting<Integer> EXPECTED_DATA_NODES_SETTING =
      Setting.intSetting("gateway.expected_data_nodes", -1, -1, Property.NodeScope);
  public static final Setting<Integer> EXPECTED_MASTER_NODES_SETTING =
      Setting.intSetting("gateway.expected_master_nodes", -1, -1, Property.NodeScope);
  public static final Setting<TimeValue> RECOVER_AFTER_TIME_SETTING =
      Setting.positiveTimeSetting(
          "gateway.recover_after_time", TimeValue.timeValueMillis(0), Property.NodeScope);
  public static final Setting<Integer> RECOVER_AFTER_NODES_SETTING =
      Setting.intSetting("gateway.recover_after_nodes", -1, -1, Property.NodeScope);
  public static final Setting<Integer> RECOVER_AFTER_DATA_NODES_SETTING =
      Setting.intSetting("gateway.recover_after_data_nodes", -1, -1, Property.NodeScope);
  public static final Setting<Integer> RECOVER_AFTER_MASTER_NODES_SETTING =
      Setting.intSetting("gateway.recover_after_master_nodes", 0, 0, Property.NodeScope);

  public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK =
      new ClusterBlock(
          1,
          "state not recovered / initialized",
          true,
          true,
          RestStatus.SERVICE_UNAVAILABLE,
          ClusterBlockLevel.ALL);

  public static final TimeValue DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET =
      TimeValue.timeValueMinutes(5);

  private final Gateway gateway;

  private final ThreadPool threadPool;

  private final AllocationService allocationService;

  private final ClusterService clusterService;

  private final TimeValue recoverAfterTime;
  private final int recoverAfterNodes;
  private final int expectedNodes;
  private final int recoverAfterDataNodes;
  private final int expectedDataNodes;
  private final int recoverAfterMasterNodes;
  private final int expectedMasterNodes;

  private final AtomicBoolean recovered = new AtomicBoolean();
  private final AtomicBoolean scheduledRecovery = new AtomicBoolean();

  @Inject
  public GatewayService(
      Settings settings,
      AllocationService allocationService,
      ClusterService clusterService,
      ThreadPool threadPool,
      GatewayMetaState metaState,
      TransportNodesListGatewayMetaState listGatewayMetaState,
      Discovery discovery,
      IndicesService indicesService) {
    super(settings);
    this.gateway =
        new Gateway(
            settings, clusterService, metaState, listGatewayMetaState, discovery, indicesService);
    this.allocationService = allocationService;
    this.clusterService = clusterService;
    this.threadPool = threadPool;
    // allow to control a delay of when indices will get created
    this.expectedNodes = EXPECTED_NODES_SETTING.get(this.settings);
    this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(this.settings);
    this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(this.settings);

    if (RECOVER_AFTER_TIME_SETTING.exists(this.settings)) {
      recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(this.settings);
    } else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) {
      recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET;
    } else {
      recoverAfterTime = null;
    }
    this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(this.settings);
    this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(this.settings);
    // default the recover after master nodes to the minimum master nodes in the discovery
    if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(this.settings)) {
      recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(this.settings);
    } else {
      // TODO: change me once the minimum_master_nodes is changed too
      recoverAfterMasterNodes = settings.getAsInt("discovery.zen.minimum_master_nodes", -1);
    }

    // Add the not recovered as initial state block, we don't allow anything until
    this.clusterService.addInitialStateBlock(STATE_NOT_RECOVERED_BLOCK);
  }

  @Override
  protected void doStart() {
    // use post applied so that the state will be visible to the background recovery thread we spawn
    // in performStateRecovery
    clusterService.addListener(this);
  }

  @Override
  protected void doStop() {
    clusterService.removeListener(this);
  }

  @Override
  protected void doClose() {}

  @Override
  public void clusterChanged(final ClusterChangedEvent event) {
    if (lifecycle.stoppedOrClosed()) {
      return;
    }

    final ClusterState state = event.state();

    if (state.nodes().isLocalNodeElectedMaster() == false) {
      // not our job to recover
      return;
    }
    if (state.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) {
      // already recovered
      return;
    }

    DiscoveryNodes nodes = state.nodes();
    if (state.nodes().getMasterNodeId() == null) {
      logger.debug("not recovering from gateway, no master elected yet");
    } else if (recoverAfterNodes != -1
        && (nodes.getMasterAndDataNodes().size()) < recoverAfterNodes) {
      logger.debug(
          "not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]",
          nodes.getMasterAndDataNodes().size(),
          recoverAfterNodes);
    } else if (recoverAfterDataNodes != -1 && nodes.getDataNodes().size() < recoverAfterDataNodes) {
      logger.debug(
          "not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]",
          nodes.getDataNodes().size(),
          recoverAfterDataNodes);
    } else if (recoverAfterMasterNodes != -1
        && nodes.getMasterNodes().size() < recoverAfterMasterNodes) {
      logger.debug(
          "not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]",
          nodes.getMasterNodes().size(),
          recoverAfterMasterNodes);
    } else {
      boolean enforceRecoverAfterTime;
      String reason;
      if (expectedNodes == -1 && expectedMasterNodes == -1 && expectedDataNodes == -1) {
        // no expected is set, honor the setting if they are there
        enforceRecoverAfterTime = true;
        reason = "recover_after_time was set to [" + recoverAfterTime + "]";
      } else {
        // one of the expected is set, see if all of them meet the need, and ignore the timeout in
        // this case
        enforceRecoverAfterTime = false;
        reason = "";
        if (expectedNodes != -1
            && (nodes.getMasterAndDataNodes().size()
                < expectedNodes)) { // does not meet the expected...
          enforceRecoverAfterTime = true;
          reason =
              "expecting ["
                  + expectedNodes
                  + "] nodes, but only have ["
                  + nodes.getMasterAndDataNodes().size()
                  + "]";
        } else if (expectedDataNodes != -1
            && (nodes.getDataNodes().size() < expectedDataNodes)) { // does not meet the expected...
          enforceRecoverAfterTime = true;
          reason =
              "expecting ["
                  + expectedDataNodes
                  + "] data nodes, but only have ["
                  + nodes.getDataNodes().size()
                  + "]";
        } else if (expectedMasterNodes != -1
            && (nodes.getMasterNodes().size()
                < expectedMasterNodes)) { // does not meet the expected...
          enforceRecoverAfterTime = true;
          reason =
              "expecting ["
                  + expectedMasterNodes
                  + "] master nodes, but only have ["
                  + nodes.getMasterNodes().size()
                  + "]";
        }
      }
      performStateRecovery(enforceRecoverAfterTime, reason);
    }
  }

  private void performStateRecovery(boolean enforceRecoverAfterTime, String reason) {
    final Gateway.GatewayStateRecoveredListener recoveryListener = new GatewayRecoveryListener();

    if (enforceRecoverAfterTime && recoverAfterTime != null) {
      if (scheduledRecovery.compareAndSet(false, true)) {
        logger.info("delaying initial state recovery for [{}]. {}", recoverAfterTime, reason);
        threadPool.schedule(
            recoverAfterTime,
            ThreadPool.Names.GENERIC,
            () -> {
              if (recovered.compareAndSet(false, true)) {
                logger.info(
                    "recover_after_time [{}] elapsed. performing state recovery...",
                    recoverAfterTime);
                gateway.performStateRecovery(recoveryListener);
              }
            });
      }
    } else {
      if (recovered.compareAndSet(false, true)) {
        threadPool
            .generic()
            .execute(
                new AbstractRunnable() {
                  @Override
                  public void onFailure(Exception e) {
                    logger.warn("Recovery failed", e);
                    // we reset `recovered` in the listener don't reset it here otherwise there
                    // might be a race
                    // that resets it to false while a new recover is already running?
                    recoveryListener.onFailure("state recovery failed: " + e.getMessage());
                  }

                  @Override
                  protected void doRun() throws Exception {
                    gateway.performStateRecovery(recoveryListener);
                  }
                });
      }
    }
  }

  public Gateway getGateway() {
    return gateway;
  }

  class GatewayRecoveryListener implements Gateway.GatewayStateRecoveredListener {

    @Override
    public void onSuccess(final ClusterState recoveredState) {
      logger.trace("successful state recovery, importing cluster state...");
      clusterService.submitStateUpdateTask(
          "local-gateway-elected-state",
          new ClusterStateUpdateTask() {
            @Override
            public ClusterState execute(ClusterState currentState) {
              assert currentState.metaData().indices().isEmpty();

              // remove the block, since we recovered from gateway
              ClusterBlocks.Builder blocks =
                  ClusterBlocks.builder()
                      .blocks(currentState.blocks())
                      .blocks(recoveredState.blocks())
                      .removeGlobalBlock(STATE_NOT_RECOVERED_BLOCK);

              MetaData.Builder metaDataBuilder = MetaData.builder(recoveredState.metaData());
              // automatically generate a UID for the metadata if we need to
              metaDataBuilder.generateClusterUuidIfNeeded();

              if (MetaData.SETTING_READ_ONLY_SETTING.get(recoveredState.metaData().settings())
                  || MetaData.SETTING_READ_ONLY_SETTING.get(currentState.metaData().settings())) {
                blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
              }

              for (IndexMetaData indexMetaData : recoveredState.metaData()) {
                metaDataBuilder.put(indexMetaData, false);
                blocks.addBlocks(indexMetaData);
              }

              // update the state to reflect the new metadata and routing
              ClusterState updatedState =
                  ClusterState.builder(currentState)
                      .blocks(blocks)
                      .metaData(metaDataBuilder)
                      .build();

              // initialize all index routing tables as empty
              RoutingTable.Builder routingTableBuilder =
                  RoutingTable.builder(updatedState.routingTable());
              for (ObjectCursor<IndexMetaData> cursor :
                  updatedState.metaData().indices().values()) {
                routingTableBuilder.addAsRecovery(cursor.value);
              }
              // start with 0 based versions for routing table
              routingTableBuilder.version(0);

              // now, reroute
              updatedState =
                  ClusterState.builder(updatedState)
                      .routingTable(routingTableBuilder.build())
                      .build();
              return allocationService.reroute(updatedState, "state recovered");
            }

            @Override
            public void onFailure(String source, Exception e) {
              logger.error(
                  (Supplier<?>)
                      () -> new ParameterizedMessage("unexpected failure during [{}]", source),
                  e);
              GatewayRecoveryListener.this.onFailure("failed to updated cluster state");
            }

            @Override
            public void clusterStateProcessed(
                String source, ClusterState oldState, ClusterState newState) {
              logger.info(
                  "recovered [{}] indices into cluster_state",
                  newState.metaData().indices().size());
            }
          });
    }

    @Override
    public void onFailure(String message) {
      recovered.set(false);
      scheduledRecovery.set(false);
      // don't remove the block here, we don't want to allow anything in such a case
      logger.info("metadata state not restored, reason: {}", message);
    }
  }

  // used for testing
  public TimeValue recoverAfterTime() {
    return recoverAfterTime;
  }
}
/** A node level service that delete expired docs on node primary shards. */
public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLService> {

  public static final Setting<TimeValue> INDICES_TTL_INTERVAL_SETTING =
      Setting.positiveTimeSetting(
          "indices.ttl.interval",
          TimeValue.timeValueSeconds(60),
          Property.Dynamic,
          Property.NodeScope);

  private final ClusterService clusterService;
  private final IndicesService indicesService;
  private final TransportBulkAction bulkAction;

  private final int bulkSize;
  private PurgerThread purgerThread;

  @Inject
  public IndicesTTLService(
      Settings settings,
      ClusterService clusterService,
      IndicesService indicesService,
      ClusterSettings clusterSettings,
      TransportBulkAction bulkAction) {
    super(settings);
    this.clusterService = clusterService;
    this.indicesService = indicesService;
    TimeValue interval = INDICES_TTL_INTERVAL_SETTING.get(settings);
    this.bulkAction = bulkAction;
    this.bulkSize = this.settings.getAsInt("indices.ttl.bulk_size", 10000);
    this.purgerThread =
        new PurgerThread(EsExecutors.threadName(settings, "[ttl_expire]"), interval);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_TTL_INTERVAL_SETTING, this.purgerThread::resetInterval);
  }

  @Override
  protected void doStart() {
    this.purgerThread.start();
  }

  @Override
  protected void doStop() {
    try {
      this.purgerThread.shutdown();
    } catch (InterruptedException e) {
      // we intentionally do not want to restore the interruption flag, we're about to shutdown
      // anyway
    }
  }

  @Override
  protected void doClose() {}

  private class PurgerThread extends Thread {
    private final AtomicBoolean running = new AtomicBoolean(true);
    private final Notifier notifier;
    private final CountDownLatch shutdownLatch = new CountDownLatch(1);

    public PurgerThread(String name, TimeValue interval) {
      super(name);
      setDaemon(true);
      this.notifier = new Notifier(interval);
    }

    public void shutdown() throws InterruptedException {
      if (running.compareAndSet(true, false)) {
        notifier.doNotify();
        shutdownLatch.await();
      }
    }

    public void resetInterval(TimeValue interval) {
      notifier.setTimeout(interval);
    }

    @Override
    public void run() {
      try {
        while (running.get()) {
          try {
            List<IndexShard> shardsToPurge = getShardsToPurge();
            purgeShards(shardsToPurge);
          } catch (Throwable e) {
            if (running.get()) {
              logger.warn("failed to execute ttl purge", e);
            }
          }
          if (running.get()) {
            notifier.await();
          }
        }
      } finally {
        shutdownLatch.countDown();
      }
    }

    /**
     * Returns the shards to purge, i.e. the local started primary shards that have ttl enabled and
     * disable_purge to false
     */
    private List<IndexShard> getShardsToPurge() {
      List<IndexShard> shardsToPurge = new ArrayList<>();
      MetaData metaData = clusterService.state().metaData();
      for (IndexService indexService : indicesService) {
        // check the value of disable_purge for this index
        IndexMetaData indexMetaData = metaData.index(indexService.index());
        if (indexMetaData == null) {
          continue;
        }
        if (indexService.getIndexSettings().isTTLPurgeDisabled()) {
          continue;
        }

        // check if ttl is enabled for at least one type of this index
        boolean hasTTLEnabled = false;
        for (String type : indexService.mapperService().types()) {
          DocumentMapper documentType = indexService.mapperService().documentMapper(type);
          if (documentType.TTLFieldMapper().enabled()) {
            hasTTLEnabled = true;
            break;
          }
        }
        if (hasTTLEnabled) {
          for (IndexShard indexShard : indexService) {
            if (indexShard.state() == IndexShardState.STARTED
                && indexShard.routingEntry().primary()
                && indexShard.routingEntry().started()) {
              shardsToPurge.add(indexShard);
            }
          }
        }
      }
      return shardsToPurge;
    }

    public TimeValue getInterval() {
      return notifier.getTimeout();
    }
  }

  private void purgeShards(List<IndexShard> shardsToPurge) {
    for (IndexShard shardToPurge : shardsToPurge) {
      Query query =
          shardToPurge
              .mapperService()
              .fullName(TTLFieldMapper.NAME)
              .rangeQuery(null, System.currentTimeMillis(), false, true);
      Engine.Searcher searcher = shardToPurge.acquireSearcher("indices_ttl");
      try {
        logger.debug(
            "[{}][{}] purging shard",
            shardToPurge.routingEntry().index(),
            shardToPurge.routingEntry().id());
        ExpiredDocsCollector expiredDocsCollector = new ExpiredDocsCollector();
        searcher.searcher().search(query, expiredDocsCollector);
        List<DocToPurge> docsToPurge = expiredDocsCollector.getDocsToPurge();

        BulkRequest bulkRequest = new BulkRequest();
        for (DocToPurge docToPurge : docsToPurge) {

          bulkRequest.add(
              new DeleteRequest()
                  .index(shardToPurge.routingEntry().getIndexName())
                  .type(docToPurge.type)
                  .id(docToPurge.id)
                  .version(docToPurge.version)
                  .routing(docToPurge.routing));
          bulkRequest = processBulkIfNeeded(bulkRequest, false);
        }
        processBulkIfNeeded(bulkRequest, true);
      } catch (Exception e) {
        logger.warn("failed to purge", e);
      } finally {
        searcher.close();
      }
    }
  }

  private static class DocToPurge {
    public final String type;
    public final String id;
    public final long version;
    public final String routing;

    public DocToPurge(String type, String id, long version, String routing) {
      this.type = type;
      this.id = id;
      this.version = version;
      this.routing = routing;
    }
  }

  private class ExpiredDocsCollector extends SimpleCollector {
    private LeafReaderContext context;
    private List<DocToPurge> docsToPurge = new ArrayList<>();

    public ExpiredDocsCollector() {}

    @Override
    public void setScorer(Scorer scorer) {}

    @Override
    public boolean needsScores() {
      return false;
    }

    @Override
    public void collect(int doc) {
      try {
        FieldsVisitor fieldsVisitor = new FieldsVisitor(false);
        context.reader().document(doc, fieldsVisitor);
        Uid uid = fieldsVisitor.uid();
        final long version =
            Versions.loadVersion(context.reader(), new Term(UidFieldMapper.NAME, uid.toBytesRef()));
        docsToPurge.add(new DocToPurge(uid.type(), uid.id(), version, fieldsVisitor.routing()));
      } catch (Exception e) {
        logger.trace("failed to collect doc", e);
      }
    }

    @Override
    public void doSetNextReader(LeafReaderContext context) throws IOException {
      this.context = context;
    }

    public List<DocToPurge> getDocsToPurge() {
      return this.docsToPurge;
    }
  }

  private BulkRequest processBulkIfNeeded(BulkRequest bulkRequest, boolean force) {
    if ((force && bulkRequest.numberOfActions() > 0) || bulkRequest.numberOfActions() >= bulkSize) {
      try {
        bulkAction.executeBulk(
            bulkRequest,
            new ActionListener<BulkResponse>() {
              @Override
              public void onResponse(BulkResponse bulkResponse) {
                if (bulkResponse.hasFailures()) {
                  int failedItems = 0;
                  for (BulkItemResponse response : bulkResponse) {
                    if (response.isFailed()) failedItems++;
                  }
                  if (logger.isTraceEnabled()) {
                    logger.trace(
                        "bulk deletion failures for [{}]/[{}] items, failure message: [{}]",
                        failedItems,
                        bulkResponse.getItems().length,
                        bulkResponse.buildFailureMessage());
                  } else {
                    logger.error(
                        "bulk deletion failures for [{}]/[{}] items",
                        failedItems,
                        bulkResponse.getItems().length);
                  }
                } else {
                  logger.trace("bulk deletion took {}ms", bulkResponse.getTookInMillis());
                }
              }

              @Override
              public void onFailure(Throwable e) {
                if (logger.isTraceEnabled()) {
                  logger.trace("failed to execute bulk", e);
                } else {
                  logger.warn("failed to execute bulk: ", e);
                }
              }
            });
      } catch (Exception e) {
        logger.warn("failed to process bulk", e);
      }
      bulkRequest = new BulkRequest();
    }
    return bulkRequest;
  }

  private static final class Notifier {

    private final ReentrantLock lock = new ReentrantLock();
    private final Condition condition = lock.newCondition();
    private volatile TimeValue timeout;

    public Notifier(TimeValue timeout) {
      assert timeout != null;
      this.timeout = timeout;
    }

    public void await() {
      lock.lock();
      try {
        condition.await(timeout.millis(), TimeUnit.MILLISECONDS);
      } catch (InterruptedException e) {
        // we intentionally do not want to restore the interruption flag, we're about to shutdown
        // anyway
      } finally {
        lock.unlock();
      }
    }

    public void setTimeout(TimeValue timeout) {
      assert timeout != null;
      this.timeout = timeout;
      doNotify();
    }

    public TimeValue getTimeout() {
      return timeout;
    }

    public void doNotify() {
      lock.lock();
      try {
        condition.signalAll();
      } finally {
        lock.unlock();
      }
    }
  }
}
public class PercolatorFieldMapper extends FieldMapper {

  public static final XContentType QUERY_BUILDER_CONTENT_TYPE = XContentType.SMILE;
  public static final Setting<Boolean> INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING =
      Setting.boolSetting(
          "index.percolator.map_unmapped_fields_as_string", false, Setting.Property.IndexScope);
  public static final String CONTENT_TYPE = "percolator";
  private static final FieldType FIELD_TYPE = new FieldType();

  static final byte FIELD_VALUE_SEPARATOR = 0; // nul code point
  static final String EXTRACTION_COMPLETE = "complete";
  static final String EXTRACTION_PARTIAL = "partial";
  static final String EXTRACTION_FAILED = "failed";

  public static final String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms";
  public static final String EXTRACTION_RESULT_FIELD_NAME = "extraction_result";
  public static final String QUERY_BUILDER_FIELD_NAME = "query_builder_field";

  public static class Builder extends FieldMapper.Builder<Builder, PercolatorFieldMapper> {

    private final QueryShardContext queryShardContext;

    public Builder(String fieldName, QueryShardContext queryShardContext) {
      super(fieldName, FIELD_TYPE, FIELD_TYPE);
      this.queryShardContext = queryShardContext;
    }

    @Override
    public PercolatorFieldMapper build(BuilderContext context) {
      context.path().add(name());
      FieldType fieldType = (FieldType) this.fieldType;
      KeywordFieldMapper extractedTermsField =
          createExtractQueryFieldBuilder(EXTRACTED_TERMS_FIELD_NAME, context);
      fieldType.queryTermsField = extractedTermsField.fieldType();
      KeywordFieldMapper extractionResultField =
          createExtractQueryFieldBuilder(EXTRACTION_RESULT_FIELD_NAME, context);
      fieldType.extractionResultField = extractionResultField.fieldType();
      BinaryFieldMapper queryBuilderField = createQueryBuilderFieldBuilder(context);
      fieldType.queryBuilderField = queryBuilderField.fieldType();
      context.path().remove();
      setupFieldType(context);
      return new PercolatorFieldMapper(
          name(),
          fieldType,
          defaultFieldType,
          context.indexSettings(),
          multiFieldsBuilder.build(this, context),
          copyTo,
          queryShardContext,
          extractedTermsField,
          extractionResultField,
          queryBuilderField);
    }

    static KeywordFieldMapper createExtractQueryFieldBuilder(String name, BuilderContext context) {
      KeywordFieldMapper.Builder queryMetaDataFieldBuilder = new KeywordFieldMapper.Builder(name);
      queryMetaDataFieldBuilder.docValues(false);
      queryMetaDataFieldBuilder.store(false);
      queryMetaDataFieldBuilder.indexOptions(IndexOptions.DOCS);
      return queryMetaDataFieldBuilder.build(context);
    }

    static BinaryFieldMapper createQueryBuilderFieldBuilder(BuilderContext context) {
      BinaryFieldMapper.Builder builder = new BinaryFieldMapper.Builder(QUERY_BUILDER_FIELD_NAME);
      builder.docValues(true);
      builder.indexOptions(IndexOptions.NONE);
      builder.store(false);
      builder.fieldType().setDocValuesType(DocValuesType.BINARY);
      return builder.build(context);
    }
  }

  public static class TypeParser implements FieldMapper.TypeParser {

    @Override
    public Builder parse(String name, Map<String, Object> node, ParserContext parserContext)
        throws MapperParsingException {
      return new Builder(name, parserContext.queryShardContext());
    }
  }

  public static class FieldType extends MappedFieldType {

    MappedFieldType queryTermsField;
    MappedFieldType extractionResultField;
    MappedFieldType queryBuilderField;

    public FieldType() {
      setIndexOptions(IndexOptions.NONE);
      setDocValuesType(DocValuesType.NONE);
      setStored(false);
    }

    public FieldType(FieldType ref) {
      super(ref);
      queryTermsField = ref.queryTermsField;
      extractionResultField = ref.extractionResultField;
      queryBuilderField = ref.queryBuilderField;
    }

    @Override
    public MappedFieldType clone() {
      return new FieldType(this);
    }

    @Override
    public String typeName() {
      return CONTENT_TYPE;
    }

    @Override
    public Query termQuery(Object value, QueryShardContext context) {
      throw new QueryShardException(
          context, "Percolator fields are not searchable directly, use a percolate query instead");
    }

    public Query percolateQuery(
        String documentType,
        PercolateQuery.QueryStore queryStore,
        BytesReference documentSource,
        IndexSearcher searcher)
        throws IOException {
      IndexReader indexReader = searcher.getIndexReader();
      Query candidateMatchesQuery = createCandidateQuery(indexReader);
      Query verifiedMatchesQuery;
      // We can only skip the MemoryIndex verification when percolating a single document.
      // When the document being percolated contains a nested object field then the MemoryIndex
      // contains multiple
      // documents. In this case the term query that indicates whether memory index verification can
      // be skipped
      // can incorrectly indicate that non nested queries would match, while their nested variants
      // would not.
      if (indexReader.maxDoc() == 1) {
        verifiedMatchesQuery =
            new TermQuery(new Term(extractionResultField.name(), EXTRACTION_COMPLETE));
      } else {
        verifiedMatchesQuery = new MatchNoDocsQuery("nested docs, so no verified matches");
      }
      return new PercolateQuery(
          documentType,
          queryStore,
          documentSource,
          candidateMatchesQuery,
          searcher,
          verifiedMatchesQuery);
    }

    Query createCandidateQuery(IndexReader indexReader) throws IOException {
      List<Term> extractedTerms = new ArrayList<>();
      // include extractionResultField:failed, because docs with this term have no
      // extractedTermsField
      // and otherwise we would fail to return these docs. Docs that failed query term extraction
      // always need to be verified by MemoryIndex:
      extractedTerms.add(new Term(extractionResultField.name(), EXTRACTION_FAILED));

      LeafReader reader = indexReader.leaves().get(0).reader();
      Fields fields = reader.fields();
      for (String field : fields) {
        Terms terms = fields.terms(field);
        if (terms == null) {
          continue;
        }

        BytesRef fieldBr = new BytesRef(field);
        TermsEnum tenum = terms.iterator();
        for (BytesRef term = tenum.next(); term != null; term = tenum.next()) {
          BytesRefBuilder builder = new BytesRefBuilder();
          builder.append(fieldBr);
          builder.append(FIELD_VALUE_SEPARATOR);
          builder.append(term);
          extractedTerms.add(new Term(queryTermsField.name(), builder.toBytesRef()));
        }
      }
      return new TermsQuery(extractedTerms);
    }
  }

  private final boolean mapUnmappedFieldAsString;
  private final QueryShardContext queryShardContext;
  private KeywordFieldMapper queryTermsField;
  private KeywordFieldMapper extractionResultField;
  private BinaryFieldMapper queryBuilderField;

  public PercolatorFieldMapper(
      String simpleName,
      MappedFieldType fieldType,
      MappedFieldType defaultFieldType,
      Settings indexSettings,
      MultiFields multiFields,
      CopyTo copyTo,
      QueryShardContext queryShardContext,
      KeywordFieldMapper queryTermsField,
      KeywordFieldMapper extractionResultField,
      BinaryFieldMapper queryBuilderField) {
    super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
    this.queryShardContext = queryShardContext;
    this.queryTermsField = queryTermsField;
    this.extractionResultField = extractionResultField;
    this.queryBuilderField = queryBuilderField;
    this.mapUnmappedFieldAsString = INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings);
  }

  @Override
  public FieldMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) {
    PercolatorFieldMapper updated =
        (PercolatorFieldMapper) super.updateFieldType(fullNameToFieldType);
    KeywordFieldMapper queryTermsUpdated =
        (KeywordFieldMapper) queryTermsField.updateFieldType(fullNameToFieldType);
    KeywordFieldMapper extractionResultUpdated =
        (KeywordFieldMapper) extractionResultField.updateFieldType(fullNameToFieldType);
    BinaryFieldMapper queryBuilderUpdated =
        (BinaryFieldMapper) queryBuilderField.updateFieldType(fullNameToFieldType);

    if (updated == this
        && queryTermsUpdated == queryTermsField
        && extractionResultUpdated == extractionResultField
        && queryBuilderUpdated == queryBuilderField) {
      return this;
    }
    if (updated == this) {
      updated = (PercolatorFieldMapper) updated.clone();
    }
    updated.queryTermsField = queryTermsUpdated;
    updated.extractionResultField = extractionResultUpdated;
    updated.queryBuilderField = queryBuilderUpdated;
    return updated;
  }

  @Override
  public Mapper parse(ParseContext context) throws IOException {
    QueryShardContext queryShardContext = new QueryShardContext(this.queryShardContext);
    if (context.doc().getField(queryBuilderField.name()) != null) {
      // If a percolator query has been defined in an array object then multiple percolator queries
      // could be provided. In order to prevent this we fail if we try to parse more than one query
      // for the current document.
      throw new IllegalArgumentException("a document can only contain one percolator query");
    }

    XContentParser parser = context.parser();
    QueryBuilder queryBuilder =
        parseQueryBuilder(queryShardContext.newParseContext(parser), parser.getTokenLocation());
    verifyQuery(queryBuilder);
    // Fetching of terms, shapes and indexed scripts happen during this rewrite:
    queryBuilder = queryBuilder.rewrite(queryShardContext);

    try (XContentBuilder builder = XContentFactory.contentBuilder(QUERY_BUILDER_CONTENT_TYPE)) {
      queryBuilder.toXContent(builder, new MapParams(Collections.emptyMap()));
      builder.flush();
      byte[] queryBuilderAsBytes = BytesReference.toBytes(builder.bytes());
      context
          .doc()
          .add(
              new Field(
                  queryBuilderField.name(), queryBuilderAsBytes, queryBuilderField.fieldType()));
    }

    Query query = toQuery(queryShardContext, mapUnmappedFieldAsString, queryBuilder);
    processQuery(query, context);
    return null;
  }

  void processQuery(Query query, ParseContext context) {
    ParseContext.Document doc = context.doc();
    FieldType pft = (FieldType) this.fieldType();
    QueryAnalyzer.Result result;
    try {
      result = QueryAnalyzer.analyze(query);
    } catch (QueryAnalyzer.UnsupportedQueryException e) {
      doc.add(
          new Field(
              pft.extractionResultField.name(),
              EXTRACTION_FAILED,
              extractionResultField.fieldType()));
      return;
    }
    for (Term term : result.terms) {
      BytesRefBuilder builder = new BytesRefBuilder();
      builder.append(new BytesRef(term.field()));
      builder.append(FIELD_VALUE_SEPARATOR);
      builder.append(term.bytes());
      doc.add(new Field(queryTermsField.name(), builder.toBytesRef(), queryTermsField.fieldType()));
    }
    if (result.verified) {
      doc.add(
          new Field(
              extractionResultField.name(),
              EXTRACTION_COMPLETE,
              extractionResultField.fieldType()));
    } else {
      doc.add(
          new Field(
              extractionResultField.name(), EXTRACTION_PARTIAL, extractionResultField.fieldType()));
    }
  }

  public static Query parseQuery(
      QueryShardContext context, boolean mapUnmappedFieldsAsString, XContentParser parser)
      throws IOException {
    return parseQuery(context, mapUnmappedFieldsAsString, context.newParseContext(parser), parser);
  }

  public static Query parseQuery(
      QueryShardContext context,
      boolean mapUnmappedFieldsAsString,
      QueryParseContext queryParseContext,
      XContentParser parser)
      throws IOException {
    return toQuery(
        context,
        mapUnmappedFieldsAsString,
        parseQueryBuilder(queryParseContext, parser.getTokenLocation()));
  }

  static Query toQuery(
      QueryShardContext context, boolean mapUnmappedFieldsAsString, QueryBuilder queryBuilder)
      throws IOException {
    // This means that fields in the query need to exist in the mapping prior to registering this
    // query
    // The reason that this is required, is that if a field doesn't exist then the query assumes
    // defaults, which may be undesired.
    //
    // Even worse when fields mentioned in percolator queries do go added to map after the queries
    // have been registered
    // then the percolator queries don't work as expected any more.
    //
    // Query parsing can't introduce new fields in mappings (which happens when registering a
    // percolator query),
    // because field type can't be inferred from queries (like document do) so the best option here
    // is to disallow
    // the usage of unmapped fields in percolator queries to avoid unexpected behaviour
    //
    // if index.percolator.map_unmapped_fields_as_string is set to true, query can contain unmapped
    // fields which will be mapped
    // as an analyzed string.
    context.setAllowUnmappedFields(false);
    context.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString);
    return queryBuilder.toQuery(context);
  }

  private static QueryBuilder parseQueryBuilder(
      QueryParseContext context, XContentLocation location) {
    try {
      return context
          .parseInnerQueryBuilder()
          .orElseThrow(
              () -> new ParsingException(location, "Failed to parse inner query, was empty"));
    } catch (IOException e) {
      throw new ParsingException(location, "Failed to parse", e);
    }
  }

  @Override
  public Iterator<Mapper> iterator() {
    return Arrays.<Mapper>asList(queryTermsField, extractionResultField, queryBuilderField)
        .iterator();
  }

  @Override
  protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
    throw new UnsupportedOperationException("should not be invoked");
  }

  @Override
  protected String contentType() {
    return CONTENT_TYPE;
  }

  /**
   * Fails if a percolator contains an unsupported query. The following queries are not supported:
   * 1) a range query with a date range based on current time 2) a has_child query 3) a has_parent
   * query
   */
  static void verifyQuery(QueryBuilder queryBuilder) {
    if (queryBuilder instanceof RangeQueryBuilder) {
      RangeQueryBuilder rangeQueryBuilder = (RangeQueryBuilder) queryBuilder;
      if (rangeQueryBuilder.from() instanceof String) {
        String from = (String) rangeQueryBuilder.from();
        String to = (String) rangeQueryBuilder.to();
        if (from.contains("now") || to.contains("now")) {
          throw new IllegalArgumentException(
              "percolator queries containing time range queries based on the "
                  + "current time is unsupported");
        }
      }
    } else if (queryBuilder instanceof HasChildQueryBuilder) {
      throw new IllegalArgumentException(
          "the [has_child] query is unsupported inside a percolator query");
    } else if (queryBuilder instanceof HasParentQueryBuilder) {
      throw new IllegalArgumentException(
          "the [has_parent] query is unsupported inside a percolator query");
    } else if (queryBuilder instanceof BoolQueryBuilder) {
      BoolQueryBuilder boolQueryBuilder = (BoolQueryBuilder) queryBuilder;
      List<QueryBuilder> clauses = new ArrayList<>();
      clauses.addAll(boolQueryBuilder.filter());
      clauses.addAll(boolQueryBuilder.must());
      clauses.addAll(boolQueryBuilder.mustNot());
      clauses.addAll(boolQueryBuilder.should());
      for (QueryBuilder clause : clauses) {
        verifyQuery(clause);
      }
    } else if (queryBuilder instanceof ConstantScoreQueryBuilder) {
      verifyQuery(((ConstantScoreQueryBuilder) queryBuilder).innerQuery());
    } else if (queryBuilder instanceof FunctionScoreQueryBuilder) {
      verifyQuery(((FunctionScoreQueryBuilder) queryBuilder).query());
    } else if (queryBuilder instanceof BoostingQueryBuilder) {
      verifyQuery(((BoostingQueryBuilder) queryBuilder).negativeQuery());
      verifyQuery(((BoostingQueryBuilder) queryBuilder).positiveQuery());
    }
  }
}
/**
 * The merge scheduler (<code>ConcurrentMergeScheduler</code>) controls the execution of merge
 * operations once they are needed (according to the merge policy). Merges run in separate threads,
 * and when the maximum number of threads is reached, further merges will wait until a merge thread
 * becomes available.
 *
 * <p>The merge scheduler supports the following <b>dynamic</b> settings:
 *
 * <ul>
 *   <li><code>index.merge.scheduler.max_thread_count</code>:
 *       <p>The maximum number of threads that may be merging at once. Defaults to <code>
 *       Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2))</code> which works
 *       well for a good solid-state-disk (SSD). If your index is on spinning platter drives
 *       instead, decrease this to 1.
 *   <li><code>index.merge.scheduler.auto_throttle</code>:
 *       <p>If this is true (the default), then the merge scheduler will rate-limit IO (writes) for
 *       merges to an adaptive value depending on how many merges are requested over time. An
 *       application with a low indexing rate that unluckily suddenly requires a large merge will
 *       see that merge aggressively throttled, while an application doing heavy indexing will see
 *       the throttle move higher to allow merges to keep up with ongoing indexing.
 * </ul>
 */
public final class MergeSchedulerConfig {

  public static final Setting<Integer> MAX_THREAD_COUNT_SETTING =
      new Setting<>(
          "index.merge.scheduler.max_thread_count",
          (s) ->
              Integer.toString(
                  Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))),
          (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"),
          Property.Dynamic,
          Property.IndexScope);
  public static final Setting<Integer> MAX_MERGE_COUNT_SETTING =
      new Setting<>(
          "index.merge.scheduler.max_merge_count",
          (s) -> Integer.toString(MAX_THREAD_COUNT_SETTING.get(s) + 5),
          (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_merge_count"),
          Property.Dynamic,
          Property.IndexScope);
  public static final Setting<Boolean> AUTO_THROTTLE_SETTING =
      Setting.boolSetting(
          "index.merge.scheduler.auto_throttle", true, Property.Dynamic, Property.IndexScope);

  private volatile boolean autoThrottle;
  private volatile int maxThreadCount;
  private volatile int maxMergeCount;

  MergeSchedulerConfig(IndexSettings indexSettings) {
    maxThreadCount = indexSettings.getValue(MAX_THREAD_COUNT_SETTING);
    maxMergeCount = indexSettings.getValue(MAX_MERGE_COUNT_SETTING);
    this.autoThrottle = indexSettings.getValue(AUTO_THROTTLE_SETTING);
  }

  /**
   * Returns <code>true</code> iff auto throttle is enabled.
   *
   * @see ConcurrentMergeScheduler#enableAutoIOThrottle()
   */
  public boolean isAutoThrottle() {
    return autoThrottle;
  }

  /** Enables / disables auto throttling on the {@link ConcurrentMergeScheduler} */
  void setAutoThrottle(boolean autoThrottle) {
    this.autoThrottle = autoThrottle;
  }

  /** Returns {@code maxThreadCount}. */
  public int getMaxThreadCount() {
    return maxThreadCount;
  }

  /** Expert: directly set the maximum number of merge threads and simultaneous merges allowed. */
  void setMaxThreadCount(int maxThreadCount) {
    this.maxThreadCount = maxThreadCount;
  }

  /** Returns {@code maxMergeCount}. */
  public int getMaxMergeCount() {
    return maxMergeCount;
  }

  /** Expert: set the maximum number of simultaneous merges allowed. */
  void setMaxMergeCount(int maxMergeCount) {
    this.maxMergeCount = maxMergeCount;
  }
}
Esempio n. 30
0
public class MapperService extends AbstractIndexComponent {

  /** The reason why a mapping is being merged. */
  public enum MergeReason {
    /** Create or update a mapping. */
    MAPPING_UPDATE,
    /**
     * Recovery of an existing mapping, for instance because of a restart, if a shard was moved to a
     * different node or for administrative purposes.
     */
    MAPPING_RECOVERY;
  }

  public static final String DEFAULT_MAPPING = "_default_";
  public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING =
      Setting.longSetting(
          "index.mapping.nested_fields.limit", 50L, 0, Property.Dynamic, Property.IndexScope);
  public static final Setting<Long> INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING =
      Setting.longSetting(
          "index.mapping.total_fields.limit", 1000L, 0, Property.Dynamic, Property.IndexScope);
  public static final Setting<Long> INDEX_MAPPING_DEPTH_LIMIT_SETTING =
      Setting.longSetting(
          "index.mapping.depth.limit", 20L, 1, Property.Dynamic, Property.IndexScope);
  public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true;
  public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING =
      Setting.boolSetting(
          "index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.IndexScope);
  private static ObjectHashSet<String> META_FIELDS =
      ObjectHashSet.from(
          "_uid",
          "_id",
          "_type",
          "_all",
          "_parent",
          "_routing",
          "_index",
          "_size",
          "_timestamp",
          "_ttl");

  private final AnalysisService analysisService;

  /** Will create types automatically if they do not exists in the mapping definition yet */
  private final boolean dynamic;

  private volatile String defaultMappingSource;

  private volatile Map<String, DocumentMapper> mappers = emptyMap();

  private volatile FieldTypeLookup fieldTypes;
  private volatile Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>();
  private boolean hasNested = false; // updated dynamically to true when a nested object is added

  private final DocumentMapperParser documentParser;

  private final MapperAnalyzerWrapper indexAnalyzer;
  private final MapperAnalyzerWrapper searchAnalyzer;
  private final MapperAnalyzerWrapper searchQuoteAnalyzer;

  private volatile Map<String, MappedFieldType> unmappedFieldTypes = emptyMap();

  private volatile Set<String> parentTypes = emptySet();

  final MapperRegistry mapperRegistry;

  public MapperService(
      IndexSettings indexSettings,
      AnalysisService analysisService,
      SimilarityService similarityService,
      MapperRegistry mapperRegistry,
      Supplier<QueryShardContext> queryShardContextSupplier) {
    super(indexSettings);
    this.analysisService = analysisService;
    this.fieldTypes = new FieldTypeLookup();
    this.documentParser =
        new DocumentMapperParser(
            indexSettings,
            this,
            analysisService,
            similarityService,
            mapperRegistry,
            queryShardContextSupplier);
    this.indexAnalyzer =
        new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer());
    this.searchAnalyzer =
        new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer());
    this.searchQuoteAnalyzer =
        new MapperAnalyzerWrapper(
            analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
    this.mapperRegistry = mapperRegistry;

    this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING);
    if (index().getName().equals(ScriptService.SCRIPT_INDEX)) {
      defaultMappingSource =
          "{"
              + "\"_default_\": {"
              + "\"properties\": {"
              + "\"script\": { \"enabled\": false },"
              + "\"template\": { \"enabled\": false }"
              + "}"
              + "}"
              + "}";
    } else {
      defaultMappingSource = "{\"_default_\":{}}";
    }

    if (logger.isTraceEnabled()) {
      logger.trace("using dynamic[{}], default mapping source[{}]", dynamic, defaultMappingSource);
    } else if (logger.isDebugEnabled()) {
      logger.debug("using dynamic[{}]", dynamic);
    }
  }

  public boolean hasNested() {
    return this.hasNested;
  }

  /**
   * returns an immutable iterator over current document mappers.
   *
   * @param includingDefaultMapping indicates whether the iterator should contain the {@link
   *     #DEFAULT_MAPPING} document mapper. As is this not really an active type, you would
   *     typically set this to false
   */
  public Iterable<DocumentMapper> docMappers(final boolean includingDefaultMapping) {
    return () -> {
      final Collection<DocumentMapper> documentMappers;
      if (includingDefaultMapping) {
        documentMappers = mappers.values();
      } else {
        documentMappers =
            mappers
                .values()
                .stream()
                .filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type()))
                .collect(Collectors.toList());
      }
      return Collections.unmodifiableCollection(documentMappers).iterator();
    };
  }

  public AnalysisService analysisService() {
    return this.analysisService;
  }

  public DocumentMapperParser documentMapperParser() {
    return this.documentParser;
  }

  public DocumentMapper merge(
      String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) {
    if (DEFAULT_MAPPING.equals(type)) {
      // verify we can parse it
      // NOTE: never apply the default here
      DocumentMapper mapper = documentParser.parse(type, mappingSource);
      // still add it as a document mapper so we have it registered and, for example, persisted back
      // into
      // the cluster meta data if needed, or checked for existence
      synchronized (this) {
        mappers = newMapBuilder(mappers).put(type, mapper).map();
      }
      try {
        defaultMappingSource = mappingSource.string();
      } catch (IOException e) {
        throw new ElasticsearchGenerationException("failed to un-compress", e);
      }
      return mapper;
    } else {
      synchronized (this) {
        final boolean applyDefault =
            // the default was already applied if we are recovering
            reason != MergeReason.MAPPING_RECOVERY
                // only apply the default mapping if we don't have the type yet
                && mappers.containsKey(type) == false;
        DocumentMapper mergeWith = parse(type, mappingSource, applyDefault);
        return merge(mergeWith, reason, updateAllTypes);
      }
    }
  }

  private synchronized DocumentMapper merge(
      DocumentMapper mapper, MergeReason reason, boolean updateAllTypes) {
    if (mapper.type().length() == 0) {
      throw new InvalidTypeNameException("mapping type name is empty");
    }
    if (mapper.type().length() > 255) {
      throw new InvalidTypeNameException(
          "mapping type name ["
              + mapper.type()
              + "] is too long; limit is length 255 but was ["
              + mapper.type().length()
              + "]");
    }
    if (mapper.type().charAt(0) == '_') {
      throw new InvalidTypeNameException(
          "mapping type name [" + mapper.type() + "] can't start with '_'");
    }
    if (mapper.type().contains("#")) {
      throw new InvalidTypeNameException(
          "mapping type name [" + mapper.type() + "] should not include '#' in it");
    }
    if (mapper.type().contains(",")) {
      throw new InvalidTypeNameException(
          "mapping type name [" + mapper.type() + "] should not include ',' in it");
    }
    if (mapper.type().equals(mapper.parentFieldMapper().type())) {
      throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
    }
    if (typeNameStartsWithIllegalDot(mapper)) {
      throw new IllegalArgumentException(
          "mapping type name [" + mapper.type() + "] must not start with a '.'");
    }

    // 1. compute the merged DocumentMapper
    DocumentMapper oldMapper = mappers.get(mapper.type());
    DocumentMapper newMapper;
    if (oldMapper != null) {
      newMapper = oldMapper.merge(mapper.mapping(), updateAllTypes);
    } else {
      newMapper = mapper;
    }

    // 2. check basic sanity of the new mapping
    List<ObjectMapper> objectMappers = new ArrayList<>();
    List<FieldMapper> fieldMappers = new ArrayList<>();
    Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers);
    MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers);
    checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers);
    checkObjectsCompatibility(newMapper.type(), objectMappers, fieldMappers, updateAllTypes);

    // 3. update lookup data-structures
    // this will in particular make sure that the merged fields are compatible with other types
    FieldTypeLookup fieldTypes =
        this.fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, updateAllTypes);

    boolean hasNested = this.hasNested;
    Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
    for (ObjectMapper objectMapper : objectMappers) {
      fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
      if (objectMapper.nested().isNested()) {
        hasNested = true;
      }
    }
    fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);

    if (reason == MergeReason.MAPPING_UPDATE) {
      // this check will only be performed on the master node when there is
      // a call to the update mapping API. For all other cases like
      // the master node restoring mappings from disk or data nodes
      // deserializing cluster state that was sent by the master node,
      // this check will be skipped.
      checkNestedFieldsLimit(fullPathObjectMappers);
      checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size());
      checkDepthLimit(fullPathObjectMappers.keySet());
      checkPercolatorFieldLimit(fieldTypes);
    }

    Set<String> parentTypes = this.parentTypes;
    if (oldMapper == null && newMapper.parentFieldMapper().active()) {
      parentTypes = new HashSet<>(parentTypes.size() + 1);
      parentTypes.addAll(this.parentTypes);
      parentTypes.add(mapper.parentFieldMapper().type());
      parentTypes = Collections.unmodifiableSet(parentTypes);
    }

    Map<String, DocumentMapper> mappers = new HashMap<>(this.mappers);
    mappers.put(newMapper.type(), newMapper);
    for (Map.Entry<String, DocumentMapper> entry : mappers.entrySet()) {
      if (entry.getKey().equals(DEFAULT_MAPPING)) {
        continue;
      }
      DocumentMapper m = entry.getValue();
      // apply changes to the field types back
      m = m.updateFieldType(fieldTypes.fullNameToFieldType);
      entry.setValue(m);
    }
    mappers = Collections.unmodifiableMap(mappers);

    // 4. commit the change
    this.mappers = mappers;
    this.fieldTypes = fieldTypes;
    this.hasNested = hasNested;
    this.fullPathObjectMappers = fullPathObjectMappers;
    this.parentTypes = parentTypes;

    assert assertSerialization(newMapper);
    assert assertMappersShareSameFieldType();

    return newMapper;
  }

  private boolean assertMappersShareSameFieldType() {
    for (DocumentMapper mapper : docMappers(false)) {
      List<FieldMapper> fieldMappers = new ArrayList<>();
      Collections.addAll(fieldMappers, mapper.mapping().metadataMappers);
      MapperUtils.collect(mapper.root(), new ArrayList<ObjectMapper>(), fieldMappers);
      for (FieldMapper fieldMapper : fieldMappers) {
        assert fieldMapper.fieldType() == fieldTypes.get(fieldMapper.name()) : fieldMapper.name();
      }
    }
    return true;
  }

  private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) {
    boolean legacyIndex =
        getIndexSettings().getIndexVersionCreated().before(Version.V_5_0_0_alpha1);
    if (legacyIndex) {
      return mapper.type().startsWith(".")
          && !PercolatorFieldMapper.LEGACY_TYPE_NAME.equals(mapper.type());
    } else {
      return mapper.type().startsWith(".");
    }
  }

  private boolean assertSerialization(DocumentMapper mapper) {
    // capture the source now, it may change due to concurrent parsing
    final CompressedXContent mappingSource = mapper.mappingSource();
    DocumentMapper newMapper = parse(mapper.type(), mappingSource, false);

    if (newMapper.mappingSource().equals(mappingSource) == false) {
      throw new IllegalStateException(
          "DocumentMapper serialization result is different from source. \n--> Source ["
              + mappingSource
              + "]\n--> Result ["
              + newMapper.mappingSource()
              + "]");
    }
    return true;
  }

  private void checkFieldUniqueness(
      String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
    assert Thread.holdsLock(this);

    // first check within mapping
    final Set<String> objectFullNames = new HashSet<>();
    for (ObjectMapper objectMapper : objectMappers) {
      final String fullPath = objectMapper.fullPath();
      if (objectFullNames.add(fullPath) == false) {
        throw new IllegalArgumentException(
            "Object mapper [" + fullPath + "] is defined twice in mapping for type [" + type + "]");
      }
    }

    final Set<String> fieldNames = new HashSet<>();
    for (FieldMapper fieldMapper : fieldMappers) {
      final String name = fieldMapper.name();
      if (objectFullNames.contains(name)) {
        throw new IllegalArgumentException(
            "Field [" + name + "] is defined both as an object and a field in [" + type + "]");
      } else if (fieldNames.add(name) == false) {
        throw new IllegalArgumentException(
            "Field [" + name + "] is defined twice in [" + type + "]");
      }
    }

    // then check other types
    for (String fieldName : fieldNames) {
      if (fullPathObjectMappers.containsKey(fieldName)) {
        throw new IllegalArgumentException(
            "["
                + fieldName
                + "] is defined as a field in mapping ["
                + type
                + "] but this name is already used for an object in other types");
      }
    }

    for (String objectPath : objectFullNames) {
      if (fieldTypes.get(objectPath) != null) {
        throw new IllegalArgumentException(
            "["
                + objectPath
                + "] is defined as an object in mapping ["
                + type
                + "] but this name is already used for a field in other types");
      }
    }
  }

  private void checkObjectsCompatibility(
      String type,
      Collection<ObjectMapper> objectMappers,
      Collection<FieldMapper> fieldMappers,
      boolean updateAllTypes) {
    assert Thread.holdsLock(this);

    for (ObjectMapper newObjectMapper : objectMappers) {
      ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
      if (existingObjectMapper != null) {
        // simulate a merge and ignore the result, we are just interested
        // in exceptions here
        existingObjectMapper.merge(newObjectMapper, updateAllTypes);
      }
    }
  }

  private void checkNestedFieldsLimit(Map<String, ObjectMapper> fullPathObjectMappers) {
    long allowedNestedFields = indexSettings.getValue(INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING);
    long actualNestedFields = 0;
    for (ObjectMapper objectMapper : fullPathObjectMappers.values()) {
      if (objectMapper.nested().isNested()) {
        actualNestedFields++;
      }
    }
    if (actualNestedFields > allowedNestedFields) {
      throw new IllegalArgumentException(
          "Limit of nested fields ["
              + allowedNestedFields
              + "] in index ["
              + index().getName()
              + "] has been exceeded");
    }
  }

  private void checkTotalFieldsLimit(long totalMappers) {
    long allowedTotalFields = indexSettings.getValue(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING);
    if (allowedTotalFields < totalMappers) {
      throw new IllegalArgumentException(
          "Limit of total fields ["
              + allowedTotalFields
              + "] in index ["
              + index().getName()
              + "] has been exceeded");
    }
  }

  private void checkDepthLimit(Collection<String> objectPaths) {
    final long maxDepth = indexSettings.getValue(INDEX_MAPPING_DEPTH_LIMIT_SETTING);
    for (String objectPath : objectPaths) {
      checkDepthLimit(objectPath, maxDepth);
    }
  }

  private void checkDepthLimit(String objectPath, long maxDepth) {
    int numDots = 0;
    for (int i = 0; i < objectPath.length(); ++i) {
      if (objectPath.charAt(i) == '.') {
        numDots += 1;
      }
    }
    final int depth = numDots + 2;
    if (depth > maxDepth) {
      throw new IllegalArgumentException(
          "Limit of mapping depth ["
              + maxDepth
              + "] in index ["
              + index().getName()
              + "] has been exceeded due to object field ["
              + objectPath
              + "]");
    }
  }

  /**
   * We only allow upto 1 percolator field per index.
   *
   * <p>Reasoning here is that the PercolatorQueryCache only supports a single document having a
   * percolator query. Also specifying multiple queries per document feels like an anti pattern
   */
  private void checkPercolatorFieldLimit(Iterable<MappedFieldType> fieldTypes) {
    List<String> percolatorFieldTypes = new ArrayList<>();
    for (MappedFieldType fieldType : fieldTypes) {
      if (fieldType instanceof PercolatorFieldMapper.PercolatorFieldType) {
        percolatorFieldTypes.add(fieldType.name());
      }
    }
    if (percolatorFieldTypes.size() > 1) {
      throw new IllegalArgumentException(
          "Up to one percolator field type is allowed per index, "
              + "found the following percolator fields ["
              + percolatorFieldTypes
              + "]");
    }
  }

  public DocumentMapper parse(
      String mappingType, CompressedXContent mappingSource, boolean applyDefault)
      throws MapperParsingException {
    return documentParser.parse(
        mappingType, mappingSource, applyDefault ? defaultMappingSource : null);
  }

  public boolean hasMapping(String mappingType) {
    return mappers.containsKey(mappingType);
  }

  /**
   * Return the set of concrete types that have a mapping. NOTE: this does not return the default
   * mapping.
   */
  public Collection<String> types() {
    final Set<String> types = new HashSet<>(mappers.keySet());
    types.remove(DEFAULT_MAPPING);
    return Collections.unmodifiableSet(types);
  }

  /**
   * Return the {@link DocumentMapper} for the given type. By using the special {@value
   * #DEFAULT_MAPPING} type, you can get a {@link DocumentMapper} for the default mapping.
   */
  public DocumentMapper documentMapper(String type) {
    return mappers.get(type);
  }

  /**
   * Returns the document mapper created, including a mapping update if the type has been
   * dynamically created.
   */
  public DocumentMapperForType documentMapperWithAutoCreate(String type) {
    DocumentMapper mapper = mappers.get(type);
    if (mapper != null) {
      return new DocumentMapperForType(mapper, null);
    }
    if (!dynamic) {
      throw new TypeMissingException(
          index(), type, "trying to auto create mapping, but dynamic mapping is disabled");
    }
    mapper = parse(type, null, true);
    return new DocumentMapperForType(mapper, mapper.mapping());
  }

  /**
   * Returns the {@link MappedFieldType} for the give fullName.
   *
   * <p>If multiple types have fields with the same full name, the first is returned.
   */
  public MappedFieldType fullName(String fullName) {
    return fieldTypes.get(fullName);
  }

  /**
   * Returns all the fields that match the given pattern. If the pattern is prefixed with a type
   * then the fields will be returned with a type prefix.
   */
  public Collection<String> simpleMatchToIndexNames(String pattern) {
    if (Regex.isSimpleMatchPattern(pattern) == false) {
      // no wildcards
      return Collections.singletonList(pattern);
    }
    return fieldTypes.simpleMatchToFullName(pattern);
  }

  public ObjectMapper getObjectMapper(String name) {
    return fullPathObjectMappers.get(name);
  }

  /**
   * Given a type (eg. long, string, ...), return an anonymous field mapper that can be used for
   * search operations.
   */
  public MappedFieldType unmappedFieldType(String type) {
    if (type.equals("string")) {
      deprecationLogger.deprecated(
          "[unmapped_type:string] should be replaced with [unmapped_type:keyword]");
      type = "keyword";
    }
    MappedFieldType fieldType = unmappedFieldTypes.get(type);
    if (fieldType == null) {
      final Mapper.TypeParser.ParserContext parserContext =
          documentMapperParser().parserContext(type);
      Mapper.TypeParser typeParser = parserContext.typeParser(type);
      if (typeParser == null) {
        throw new IllegalArgumentException("No mapper found for type [" + type + "]");
      }
      final Mapper.Builder<?, ?> builder =
          typeParser.parse("__anonymous_" + type, emptyMap(), parserContext);
      final BuilderContext builderContext =
          new BuilderContext(indexSettings.getSettings(), new ContentPath(1));
      fieldType = ((FieldMapper) builder.build(builderContext)).fieldType();

      // There is no need to synchronize writes here. In the case of concurrent access, we could
      // just
      // compute some mappers several times, which is not a big deal
      Map<String, MappedFieldType> newUnmappedFieldTypes = new HashMap<>();
      newUnmappedFieldTypes.putAll(unmappedFieldTypes);
      newUnmappedFieldTypes.put(type, fieldType);
      unmappedFieldTypes = unmodifiableMap(newUnmappedFieldTypes);
    }
    return fieldType;
  }

  public Analyzer indexAnalyzer() {
    return this.indexAnalyzer;
  }

  public Analyzer searchAnalyzer() {
    return this.searchAnalyzer;
  }

  public Analyzer searchQuoteAnalyzer() {
    return this.searchQuoteAnalyzer;
  }

  public Set<String> getParentTypes() {
    return parentTypes;
  }

  /** @return Whether a field is a metadata field. */
  public static boolean isMetadataField(String fieldName) {
    return META_FIELDS.contains(fieldName);
  }

  public static String[] getAllMetaFields() {
    return META_FIELDS.toArray(String.class);
  }

  /** An analyzer wrapper that can lookup fields within the index mappings */
  final class MapperAnalyzerWrapper extends DelegatingAnalyzerWrapper {

    private final Analyzer defaultAnalyzer;
    private final Function<MappedFieldType, Analyzer> extractAnalyzer;

    MapperAnalyzerWrapper(
        Analyzer defaultAnalyzer, Function<MappedFieldType, Analyzer> extractAnalyzer) {
      super(Analyzer.PER_FIELD_REUSE_STRATEGY);
      this.defaultAnalyzer = defaultAnalyzer;
      this.extractAnalyzer = extractAnalyzer;
    }

    @Override
    protected Analyzer getWrappedAnalyzer(String fieldName) {
      MappedFieldType fieldType = fullName(fieldName);
      if (fieldType != null) {
        Analyzer analyzer = extractAnalyzer.apply(fieldType);
        if (analyzer != null) {
          return analyzer;
        }
      }
      return defaultAnalyzer;
    }
  }
}