@Inject
  public DiskThresholdDecider(
      Settings settings,
      NodeSettingsService nodeSettingsService,
      ClusterInfoService infoService,
      Client client) {
    super(settings);
    String lowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "85%");
    String highWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "90%");

    if (!validWatermarkSetting(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) {
      throw new ElasticsearchParseException("unable to parse low watermark [{}]", lowWatermark);
    }
    if (!validWatermarkSetting(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) {
      throw new ElasticsearchParseException("unable to parse high watermark [{}]", highWatermark);
    }
    // Watermark is expressed in terms of used data, but we need "free" data watermark
    this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark);
    this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark);

    this.freeBytesThresholdLow =
        thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK);
    this.freeBytesThresholdHigh =
        thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK);
    this.includeRelocations =
        settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true);
    this.rerouteInterval =
        settings.getAsTime(
            CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, TimeValue.timeValueSeconds(60));

    this.enabled = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true);
    nodeSettingsService.addListener(new ApplySettings());
    infoService.addListener(new DiskListener(client));
  }
  @Inject
  public DiskThresholdDecider(Settings settings, NodeSettingsService nodeSettingsService) {
    super(settings);
    String lowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "0.7");
    String highWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "0.85");

    if (!validWatermarkSetting(lowWatermark)) {
      throw new ElasticsearchParseException(
          "Unable to parse low watermark: [" + lowWatermark + "]");
    }
    if (!validWatermarkSetting(highWatermark)) {
      throw new ElasticsearchParseException(
          "Unable to parse high watermark: [" + highWatermark + "]");
    }
    // Watermark is expressed in terms of used data, but we need "free" data watermark
    this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark);
    this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark);

    this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark);
    this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark);

    this.enabled = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, false);
    nodeSettingsService.addListener(new ApplySettings());
  }
Ejemplo n.º 3
0
  @Inject
  public ZenDiscovery(
      Settings settings,
      ClusterName clusterName,
      ThreadPool threadPool,
      TransportService transportService,
      ClusterService clusterService,
      NodeSettingsService nodeSettingsService,
      DiscoveryNodeService discoveryNodeService,
      ZenPingService pingService,
      Version version) {
    super(settings);
    this.clusterName = clusterName;
    this.threadPool = threadPool;
    this.clusterService = clusterService;
    this.transportService = transportService;
    this.discoveryNodeService = discoveryNodeService;
    this.pingService = pingService;
    this.version = version;

    // also support direct discovery.zen settings, for cases when it gets extended
    this.pingTimeout =
        settings.getAsTime(
            "discovery.zen.ping.timeout",
            settings.getAsTime(
                "discovery.zen.ping_timeout",
                componentSettings.getAsTime(
                    "ping_timeout",
                    componentSettings.getAsTime("initial_ping_timeout", timeValueSeconds(3)))));
    this.sendLeaveRequest = componentSettings.getAsBoolean("send_leave_request", true);

    this.masterElectionFilterClientNodes =
        settings.getAsBoolean("discovery.zen.master_election.filter_client", true);
    this.masterElectionFilterDataNodes =
        settings.getAsBoolean("discovery.zen.master_election.filter_data", false);

    logger.debug(
        "using ping.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]",
        pingTimeout,
        masterElectionFilterClientNodes,
        masterElectionFilterDataNodes);

    this.electMaster = new ElectMasterService(settings);
    nodeSettingsService.addListener(new ApplySettings());

    this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, this);
    this.masterFD.addListener(new MasterNodeFailureListener());

    this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService);
    this.nodesFD.addListener(new NodeFailureListener());

    this.publishClusterState =
        new PublishClusterStateAction(
            settings, transportService, this, new NewClusterStateListener());
    this.pingService.setNodesProvider(this);
    this.membership =
        new MembershipAction(settings, transportService, this, new MembershipListener());

    transportService.registerHandler(
        RejoinClusterRequestHandler.ACTION, new RejoinClusterRequestHandler());
  }
Ejemplo n.º 4
0
  @Inject
  public ThreadPool(Settings settings, @Nullable NodeSettingsService nodeSettingsService) {
    super(settings);

    Map<String, Settings> groupSettings = settings.getGroups(THREADPOOL_GROUP);

    int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
    int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5);
    int halfProcMaxAt10 = Math.min(((availableProcessors + 1) / 2), 10);
    defaultExecutorTypeSettings =
        ImmutableMap.<String, Settings>builder()
            .put(
                Names.GENERIC,
                settingsBuilder().put("type", "cached").put("keep_alive", "30s").build())
            .put(
                Names.INDEX,
                settingsBuilder().put("type", "fixed").put("size", availableProcessors).build())
            .put(
                Names.BULK,
                settingsBuilder().put("type", "fixed").put("size", availableProcessors).build())
            .put(
                Names.GET,
                settingsBuilder().put("type", "fixed").put("size", availableProcessors).build())
            .put(
                Names.SEARCH,
                settingsBuilder()
                    .put("type", "fixed")
                    .put("size", availableProcessors * 3)
                    .put("queue_size", 1000)
                    .build())
            .put(
                Names.SUGGEST,
                settingsBuilder()
                    .put("type", "fixed")
                    .put("size", availableProcessors)
                    .put("queue_size", 1000)
                    .build())
            .put(
                Names.PERCOLATE,
                settingsBuilder()
                    .put("type", "fixed")
                    .put("size", availableProcessors)
                    .put("queue_size", 1000)
                    .build())
            .put(
                Names.MANAGEMENT,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", 5)
                    .build())
            .put(
                Names.FLUSH,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", halfProcMaxAt5)
                    .build())
            .put(
                Names.MERGE,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", halfProcMaxAt5)
                    .build())
            .put(
                Names.REFRESH,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", halfProcMaxAt10)
                    .build())
            .put(
                Names.WARMER,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", halfProcMaxAt5)
                    .build())
            .put(
                Names.SNAPSHOT,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", halfProcMaxAt5)
                    .build())
            .put(Names.OPTIMIZE, settingsBuilder().put("type", "fixed").put("size", 1).build())
            .build();

    Map<String, ExecutorHolder> executors = Maps.newHashMap();
    for (Map.Entry<String, Settings> executor : defaultExecutorTypeSettings.entrySet()) {
      executors.put(
          executor.getKey(),
          build(executor.getKey(), groupSettings.get(executor.getKey()), executor.getValue()));
    }
    executors.put(
        Names.SAME,
        new ExecutorHolder(MoreExecutors.sameThreadExecutor(), new Info(Names.SAME, "same")));
    this.executors = ImmutableMap.copyOf(executors);
    this.scheduler =
        new ScheduledThreadPoolExecutor(
            1, EsExecutors.daemonThreadFactory(settings, "scheduler"), new EsAbortPolicy());
    this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
    this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
    if (nodeSettingsService != null) {
      nodeSettingsService.addListener(new ApplySettings());
    }

    TimeValue estimatedTimeInterval =
        componentSettings.getAsTime("estimated_time_interval", TimeValue.timeValueMillis(200));
    this.estimatedTimeThread =
        new EstimatedTimeThread(
            EsExecutors.threadName(settings, "[timer]"), estimatedTimeInterval.millis());
    this.estimatedTimeThread.start();
  }