/**
  * Attempts to parse the watermark into a {@link ByteSizeValue}, returning a ByteSizeValue of 0
  * bytes if the value cannot be parsed.
  */
 public ByteSizeValue thresholdBytesFromWatermark(String watermark) {
   try {
     return ByteSizeValue.parseBytesSizeValue(watermark);
   } catch (ElasticsearchParseException ex) {
     return ByteSizeValue.parseBytesSizeValue("0b");
   }
 }
 public XContentBuilder byteSizeField(
     String rawFieldName, String readableFieldName, ByteSizeValue byteSizeValue)
     throws IOException {
   if (humanReadable) {
     field(readableFieldName, byteSizeValue.toString());
   }
   field(rawFieldName, byteSizeValue.bytes());
   return this;
 }
 /**
  * Attempts to parse the watermark into a {@link ByteSizeValue}, returning a ByteSizeValue of 0
  * bytes if the value cannot be parsed.
  */
 public ByteSizeValue thresholdBytesFromWatermark(String watermark, String settingName) {
   try {
     return ByteSizeValue.parseBytesSizeValue(watermark, settingName);
   } catch (ElasticsearchParseException ex) {
     // NOTE: this is not end-user leniency, since up above we check that it's a valid byte or
     // percentage, and then store the two cases separately
     return ByteSizeValue.parseBytesSizeValue("0b", settingName);
   }
 }
 private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) {
   this.maxBytesPerSec = maxBytesPerSec;
   if (maxBytesPerSec.getBytes() <= 0) {
     rateLimiter = null;
   } else if (rateLimiter != null) {
     rateLimiter.setMBPerSec(maxBytesPerSec.getMbFrac());
   } else {
     rateLimiter = new SimpleRateLimiter(maxBytesPerSec.getMbFrac());
   }
 }
 public void setMaxRate(ByteSizeValue rate) {
   if (rate.bytes() <= 0) {
     actualRateLimiter = null;
   } else if (actualRateLimiter == null) {
     actualRateLimiter = rateLimiter;
     actualRateLimiter.setMBPerSec(rate.mbFrac());
   } else {
     assert rateLimiter == actualRateLimiter;
     rateLimiter.setMBPerSec(rate.mbFrac());
   }
 }
  @Override
  public boolean equals(Object o) {
    if (this == o) {
      return true;
    }
    if (o == null || getClass() != o.getClass()) {
      return false;
    }

    ByteSizeValue sizeValue = (ByteSizeValue) o;

    return getBytes() == sizeValue.getBytes();
  }
示例#7
0
 private static String renderValue(RestRequest request, Object value) {
   if (value == null) {
     return null;
   }
   if (value instanceof ByteSizeValue) {
     ByteSizeValue v = (ByteSizeValue) value;
     String resolution = request.param("bytes");
     if ("b".equals(resolution)) {
       return Long.toString(v.bytes());
     } else if ("k".equals(resolution)) {
       return Long.toString(v.kb());
     } else if ("m".equals(resolution)) {
       return Long.toString(v.mb());
     } else if ("g".equals(resolution)) {
       return Long.toString(v.gb());
     } else if ("t".equals(resolution)) {
       return Long.toString(v.tb());
     } else if ("p".equals(resolution)) {
       return Long.toString(v.pb());
     } else {
       return v.toString();
     }
   }
   if (value instanceof SizeValue) {
     SizeValue v = (SizeValue) value;
     String resolution = request.param("size");
     if ("b".equals(resolution)) {
       return Long.toString(v.singles());
     } else if ("k".equals(resolution)) {
       return Long.toString(v.kilo());
     } else if ("m".equals(resolution)) {
       return Long.toString(v.mega());
     } else if ("g".equals(resolution)) {
       return Long.toString(v.giga());
     } else if ("t".equals(resolution)) {
       return Long.toString(v.tera());
     } else if ("p".equals(resolution)) {
       return Long.toString(v.peta());
     } else {
       return v.toString();
     }
   }
   if (value instanceof TimeValue) {
     TimeValue v = (TimeValue) value;
     String resolution = request.param("time");
     if ("ms".equals(resolution)) {
       return Long.toString(v.millis());
     } else if ("s".equals(resolution)) {
       return Long.toString(v.seconds());
     } else if ("m".equals(resolution)) {
       return Long.toString(v.minutes());
     } else if ("h".equals(resolution)) {
       return Long.toString(v.hours());
     } else {
       return v.toString();
     }
   }
   // Add additional built in data points we can render based on request parameters?
   return value.toString();
 }
示例#8
0
 private IndexWriter createWriter() throws IOException {
   IndexWriter indexWriter = null;
   try {
     // release locks when started
     if (IndexWriter.isLocked(store.directory())) {
       logger.warn("shard is locked, releasing lock");
       IndexWriter.unlock(store.directory());
     }
     boolean create = !IndexReader.indexExists(store.directory());
     indexWriter =
         new IndexWriter(
             store.directory(),
             analysisService.defaultIndexAnalyzer(),
             create,
             deletionPolicy,
             IndexWriter.MaxFieldLength.UNLIMITED);
     indexWriter.setMergeScheduler(mergeScheduler.newMergeScheduler());
     indexWriter.setMergePolicy(mergePolicyProvider.newMergePolicy(indexWriter));
     indexWriter.setSimilarity(similarityService.defaultIndexSimilarity());
     indexWriter.setRAMBufferSizeMB(indexingBufferSize.mbFrac());
     indexWriter.setTermIndexInterval(termIndexInterval);
   } catch (IOException e) {
     safeClose(indexWriter);
     throw e;
   }
   return indexWriter;
 }
 @Override
 public HttpInfo info() {
   BoundTransportAddress boundTransportAddress = boundAddress();
   if (boundTransportAddress == null) {
     return null;
   }
   return new HttpInfo(boundTransportAddress, maxContentLength.bytes());
 }
 @Override
 public TieredMergePolicy newMergePolicy() {
   CustomTieredMergePolicyProvider mergePolicy;
   if (asyncMerge) {
     mergePolicy = new EnableMergeTieredMergePolicyProvider(this);
   } else {
     mergePolicy = new CustomTieredMergePolicyProvider(this);
   }
   mergePolicy.setUseCompoundFile(compoundFormat);
   mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed);
   mergePolicy.setFloorSegmentMB(floorSegment.mbFrac());
   mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
   mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
   mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
   mergePolicy.setSegmentsPerTier(segmentsPerTier);
   mergePolicy.setReclaimDeletesWeight(reclaimDeletesWeight);
   return mergePolicy;
 }
  @Override
  public LogByteSizeMergePolicy newMergePolicy() {
    CustomLogByteSizeMergePolicy mergePolicy;
    if (asyncMerge) {
      mergePolicy = new EnableMergeLogByteSizeMergePolicy(this);
    } else {
      mergePolicy = new CustomLogByteSizeMergePolicy(this);
    }
    mergePolicy.setMinMergeMB(minMergeSize.mbFrac());
    mergePolicy.setMaxMergeMB(maxMergeSize.mbFrac());
    mergePolicy.setMergeFactor(mergeFactor);
    mergePolicy.setMaxMergeDocs(maxMergeDocs);
    mergePolicy.setCalibrateSizeByDeletes(calibrateSizeByDeletes);
    mergePolicy.setUseCompoundFile(compoundFormat);

    policies.add(mergePolicy);
    return mergePolicy;
  }
  @Override
  public Decision canRemain(
      ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
    if (shardRouting.currentNodeId().equals(node.nodeId()) == false) {
      throw new IllegalArgumentException(
          "Shard [" + shardRouting + "] is not allocated on node: [" + node.nodeId() + "]");
    }
    final Decision decision = earlyTerminate(allocation);
    if (decision != null) {
      return decision;
    }
    ClusterInfo clusterInfo = allocation.clusterInfo();
    Map<String, DiskUsage> usages = clusterInfo.getNodeLeastAvailableDiskUsages();
    DiskUsage usage = getDiskUsage(node, allocation, usages);
    // If this node is already above the high threshold, the shard cannot remain (get it off!)
    double freeDiskPercentage = usage.getFreeDiskAsPercentage();
    long freeBytes = usage.getFreeBytes();
    if (logger.isDebugEnabled()) {
      logger.debug(
          "node [{}] has {}% free disk ({} bytes)", node.nodeId(), freeDiskPercentage, freeBytes);
    }
    if (freeBytes < freeBytesThresholdHigh.bytes()) {
      if (logger.isDebugEnabled()) {
        logger.debug(
            "less than the required {} free bytes threshold ({} bytes free) on node {}, shard cannot remain",
            freeBytesThresholdHigh,
            freeBytes,
            node.nodeId());
      }
      return allocation.decision(
          Decision.NO,
          NAME,
          "after allocation less than required [%s] free on node, free: [%s]",
          freeBytesThresholdHigh,
          new ByteSizeValue(freeBytes));
    }
    if (freeDiskPercentage < freeDiskThresholdHigh) {
      if (logger.isDebugEnabled()) {
        logger.debug(
            "less than the required {}% free disk threshold ({}% free) on node {}, shard cannot remain",
            freeDiskThresholdHigh, freeDiskPercentage, node.nodeId());
      }
      return allocation.decision(
          Decision.NO,
          NAME,
          "after allocation less than required [%s%%] free disk on node, free: [%s%%]",
          freeDiskThresholdHigh,
          freeDiskPercentage);
    }

    return allocation.decision(
        Decision.YES,
        NAME,
        "enough disk for shard to remain on node, free: [%s]",
        new ByteSizeValue(freeBytes));
  }
 /**
  * Checks if a watermark string is a valid percentage or byte size value, returning true if valid,
  * false if invalid.
  */
 public boolean validWatermarkSetting(String watermark, String settingName) {
   try {
     RatioValue.parseRatioValue(watermark);
     return true;
   } catch (ElasticsearchParseException e) {
     try {
       ByteSizeValue.parseBytesSizeValue(watermark, settingName);
       return true;
     } catch (ElasticsearchParseException ex) {
       return false;
     }
   }
 }
  @Inject
  public RecoverySettings(Settings settings, ClusterSettings clusterSettings) {
    super(settings);

    this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings);
    // doesn't have to be fast as nodes are reconnected every 10s by default (see
    // InternalClusterService.ReconnectToNodes)
    // and we want to give the master time to remove a faulty node
    this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings);

    this.internalActionTimeout = INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(settings);
    this.internalActionLongTimeout =
        INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.get(settings);

    this.activityTimeout = INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.get(settings);
    this.maxBytesPerSec = INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.get(settings);
    if (maxBytesPerSec.getBytes() <= 0) {
      rateLimiter = null;
    } else {
      rateLimiter = new SimpleRateLimiter(maxBytesPerSec.getMbFrac());
    }

    logger.debug("using max_bytes_per_sec[{}]", maxBytesPerSec);

    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, this::setInternalActionLongTimeout);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout);
  }
 /**
  * Checks if a watermark string is a valid percentage or byte size value, returning true if valid,
  * false if invalid.
  */
 public boolean validWatermarkSetting(String watermark) {
   try {
     double w = Double.parseDouble(watermark);
     if (w < 0 || w > 1.0) {
       return false;
     }
     return true;
   } catch (NumberFormatException e) {
     try {
       ByteSizeValue.parseBytesSizeValue(watermark);
       return true;
     } catch (ElasticsearchParseException ex) {
       return false;
     }
   }
 }
示例#16
0
 @Override
 public void updateIndexingBufferSize(ByteSizeValue indexingBufferSize) {
   rwl.readLock().lock();
   try {
     // LUCENE MONITOR - If this restriction is removed from Lucene, remove it from here
     if (indexingBufferSize.mbFrac() > 2048.0) {
       this.indexingBufferSize = new ByteSizeValue(2048, ByteSizeUnit.MB);
     } else {
       this.indexingBufferSize = indexingBufferSize;
     }
     IndexWriter indexWriter = this.indexWriter;
     if (indexWriter != null) {
       indexWriter.setRAMBufferSizeMB(this.indexingBufferSize.mbFrac());
     }
   } finally {
     rwl.readLock().unlock();
   }
 }
示例#17
0
 private void slowDownRecovery(ByteSizeValue shardSize) {
   long chunkSize = Math.max(1, shardSize.getBytes() / 10);
   assertTrue(
       client()
           .admin()
           .cluster()
           .prepareUpdateSettings()
           .setTransientSettings(
               Settings.builder()
                   // one chunk per sec..
                   .put(
                       RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(),
                       chunkSize,
                       ByteSizeUnit.BYTES)
                   // small chunks
                   .put(
                       CHUNK_SIZE_SETTING.getKey(),
                       new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES)))
           .get()
           .isAcknowledged());
 }
 @Override
 public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext)
     throws MapperParsingException {
   BinaryFieldMapper.Builder builder = binaryField(name);
   parseField(builder, name, node, parserContext);
   for (Map.Entry<String, Object> entry : node.entrySet()) {
     String fieldName = Strings.toUnderscoreCase(entry.getKey());
     Object fieldNode = entry.getValue();
     if (fieldName.equals("compress") && fieldNode != null) {
       builder.compress(nodeBooleanValue(fieldNode));
     } else if (fieldName.equals("compress_threshold") && fieldNode != null) {
       if (fieldNode instanceof Number) {
         builder.compressThreshold(((Number) fieldNode).longValue());
         builder.compress(true);
       } else {
         builder.compressThreshold(
             ByteSizeValue.parseBytesSizeValue(fieldNode.toString()).bytes());
         builder.compress(true);
       }
     }
   }
   return builder;
 }
  @Override
  public Decision canAllocate(
      ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
    final Decision decision = earlyTerminate(allocation);
    if (decision != null) {
      return decision;
    }

    final double usedDiskThresholdLow = 100.0 - DiskThresholdDecider.this.freeDiskThresholdLow;
    final double usedDiskThresholdHigh = 100.0 - DiskThresholdDecider.this.freeDiskThresholdHigh;
    ClusterInfo clusterInfo = allocation.clusterInfo();
    Map<String, DiskUsage> usages = clusterInfo.getNodeMostAvailableDiskUsages();
    DiskUsage usage = getDiskUsage(node, allocation, usages);
    // First, check that the node currently over the low watermark
    double freeDiskPercentage = usage.getFreeDiskAsPercentage();
    // Cache the used disk percentage for displaying disk percentages consistent with documentation
    double usedDiskPercentage = usage.getUsedDiskAsPercentage();
    long freeBytes = usage.getFreeBytes();
    if (logger.isTraceEnabled()) {
      logger.trace("node [{}] has {}% used disk", node.nodeId(), usedDiskPercentage);
    }

    // a flag for whether the primary shard has been previously allocated
    boolean primaryHasBeenAllocated =
        shardRouting.primary() && shardRouting.allocatedPostIndexCreate();

    // checks for exact byte comparisons
    if (freeBytes < freeBytesThresholdLow.bytes()) {
      // If the shard is a replica or has a primary that has already been allocated before, check
      // the low threshold
      if (!shardRouting.primary() || (shardRouting.primary() && primaryHasBeenAllocated)) {
        if (logger.isDebugEnabled()) {
          logger.debug(
              "less than the required {} free bytes threshold ({} bytes free) on node {}, preventing allocation",
              freeBytesThresholdLow,
              freeBytes,
              node.nodeId());
        }
        return allocation.decision(
            Decision.NO,
            NAME,
            "less than required [%s] free on node, free: [%s]",
            freeBytesThresholdLow,
            new ByteSizeValue(freeBytes));
      } else if (freeBytes > freeBytesThresholdHigh.bytes()) {
        // Allow the shard to be allocated because it is primary that
        // has never been allocated if it's under the high watermark
        if (logger.isDebugEnabled()) {
          logger.debug(
              "less than the required {} free bytes threshold ({} bytes free) on node {}, "
                  + "but allowing allocation because primary has never been allocated",
              freeBytesThresholdLow,
              freeBytes,
              node.nodeId());
        }
        return allocation.decision(Decision.YES, NAME, "primary has never been allocated before");
      } else {
        // Even though the primary has never been allocated, the node is
        // above the high watermark, so don't allow allocating the shard
        if (logger.isDebugEnabled()) {
          logger.debug(
              "less than the required {} free bytes threshold ({} bytes free) on node {}, "
                  + "preventing allocation even though primary has never been allocated",
              freeBytesThresholdHigh,
              freeBytes,
              node.nodeId());
        }
        return allocation.decision(
            Decision.NO,
            NAME,
            "less than required [%s] free on node, free: [%s]",
            freeBytesThresholdHigh,
            new ByteSizeValue(freeBytes));
      }
    }

    // checks for percentage comparisons
    if (freeDiskPercentage < freeDiskThresholdLow) {
      // If the shard is a replica or has a primary that has already been allocated before, check
      // the low threshold
      if (!shardRouting.primary() || (shardRouting.primary() && primaryHasBeenAllocated)) {
        if (logger.isDebugEnabled()) {
          logger.debug(
              "more than the allowed {} used disk threshold ({} used) on node [{}], preventing allocation",
              Strings.format1Decimals(usedDiskThresholdLow, "%"),
              Strings.format1Decimals(usedDiskPercentage, "%"),
              node.nodeId());
        }
        return allocation.decision(
            Decision.NO,
            NAME,
            "more than allowed [%s%%] used disk on node, free: [%s%%]",
            usedDiskThresholdLow,
            freeDiskPercentage);
      } else if (freeDiskPercentage > freeDiskThresholdHigh) {
        // Allow the shard to be allocated because it is primary that
        // has never been allocated if it's under the high watermark
        if (logger.isDebugEnabled()) {
          logger.debug(
              "more than the allowed {} used disk threshold ({} used) on node [{}], "
                  + "but allowing allocation because primary has never been allocated",
              Strings.format1Decimals(usedDiskThresholdLow, "%"),
              Strings.format1Decimals(usedDiskPercentage, "%"),
              node.nodeId());
        }
        return allocation.decision(Decision.YES, NAME, "primary has never been allocated before");
      } else {
        // Even though the primary has never been allocated, the node is
        // above the high watermark, so don't allow allocating the shard
        if (logger.isDebugEnabled()) {
          logger.debug(
              "less than the required {} free bytes threshold ({} bytes free) on node {}, "
                  + "preventing allocation even though primary has never been allocated",
              Strings.format1Decimals(freeDiskThresholdHigh, "%"),
              Strings.format1Decimals(freeDiskPercentage, "%"),
              node.nodeId());
        }
        return allocation.decision(
            Decision.NO,
            NAME,
            "more than allowed [%s%%] used disk on node, free: [%s%%]",
            usedDiskThresholdHigh,
            freeDiskPercentage);
      }
    }

    // Secondly, check that allocating the shard to this node doesn't put it above the high
    // watermark
    final long shardSize = getShardSize(shardRouting, allocation.clusterInfo());
    double freeSpaceAfterShard = freeDiskPercentageAfterShardAssigned(usage, shardSize);
    long freeBytesAfterShard = freeBytes - shardSize;
    if (freeBytesAfterShard < freeBytesThresholdHigh.bytes()) {
      logger.warn(
          "after allocating, node [{}] would have less than the required {} free bytes threshold ({} bytes free), preventing allocation",
          node.nodeId(),
          freeBytesThresholdHigh,
          freeBytesAfterShard);
      return allocation.decision(
          Decision.NO,
          NAME,
          "after allocation less than required [%s] free on node, free: [%s]",
          freeBytesThresholdLow,
          new ByteSizeValue(freeBytesAfterShard));
    }
    if (freeSpaceAfterShard < freeDiskThresholdHigh) {
      logger.warn(
          "after allocating, node [{}] would have more than the allowed {} free disk threshold ({} free), preventing allocation",
          node.nodeId(),
          Strings.format1Decimals(freeDiskThresholdHigh, "%"),
          Strings.format1Decimals(freeSpaceAfterShard, "%"));
      return allocation.decision(
          Decision.NO,
          NAME,
          "after allocation more than allowed [%s%%] used disk on node, free: [%s%%]",
          usedDiskThresholdLow,
          freeSpaceAfterShard);
    }

    return allocation.decision(
        Decision.YES,
        NAME,
        "enough disk for shard on node, free: [%s]",
        new ByteSizeValue(freeBytes));
  }
/*
 * Holds all the configuration that is used to create an {@link Engine}.
 * Once {@link Engine} has been created with this object, changes to this
 * object will affect the {@link Engine} instance.
 */
public final class EngineConfig {
  private final ShardId shardId;
  private volatile boolean failOnMergeFailure = true;
  private volatile boolean failEngineOnCorruption = true;
  private volatile ByteSizeValue indexingBufferSize;
  private volatile int indexConcurrency = IndexWriterConfig.DEFAULT_MAX_THREAD_STATES;
  private volatile boolean compoundOnFlush = true;
  private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis();
  private volatile boolean enableGcDeletes = true;
  private volatile String codecName = DEFAULT_CODEC_NAME;
  private final boolean optimizeAutoGenerateId;
  private final ThreadPool threadPool;
  private final ShardIndexingService indexingService;
  private final IndexSettingsService indexSettingsService;
  @Nullable private final IndicesWarmer warmer;
  private final Store store;
  private final SnapshotDeletionPolicy deletionPolicy;
  private final Translog translog;
  private final MergePolicyProvider mergePolicyProvider;
  private final MergeSchedulerProvider mergeScheduler;
  private final Analyzer analyzer;
  private final Similarity similarity;
  private final CodecService codecService;
  private final Engine.FailedEngineListener failedEngineListener;

  /**
   * Index setting for index concurrency / number of threadstates in the indexwriter. The default is
   * depending on the number of CPUs in the system. We use a 0.65 the number of CPUs or at least
   * {@value org.apache.lucene.index.IndexWriterConfig#DEFAULT_MAX_THREAD_STATES} This setting is
   * realtime updateable
   */
  public static final String INDEX_CONCURRENCY_SETTING = "index.index_concurrency";

  /** Index setting for compound file on flush. This setting is realtime updateable. */
  public static final String INDEX_COMPOUND_ON_FLUSH = "index.compound_on_flush";

  /**
   * Setting to control auto generated ID optimizations. Default is <code>true</code> if not
   * present. This setting is <b>not</b> realtime updateable.
   */
  public static final String INDEX_OPTIMIZE_AUTOGENERATED_ID_SETTING =
      "index.optimize_auto_generated_id";

  /**
   * Index setting to enable / disable deletes garbage collection. This setting is realtime
   * updateable
   */
  public static final String INDEX_GC_DELETES_SETTING = "index.gc_deletes";

  /**
   * Index setting to enable / disable engine failures on merge exceptions. Default is <code>true
   * </code> / <tt>enabled</tt>. This setting is realtime updateable.
   */
  public static final String INDEX_FAIL_ON_MERGE_FAILURE_SETTING = "index.fail_on_merge_failure";

  /**
   * Index setting to enable / disable engine failures on detected index corruptions. Default is
   * <code>true</code> / <tt>enabled</tt>. This setting is realtime updateable.
   */
  public static final String INDEX_FAIL_ON_CORRUPTION_SETTING = "index.fail_on_corruption";

  /**
   * Index setting to control the initial index buffer size. This setting is <b>not</b> realtime
   * updateable.
   */
  public static final String INDEX_BUFFER_SIZE_SETTING = "index.buffer_size";

  /**
   * Index setting to change the low level lucene codec used for writing new segments. This setting
   * is realtime updateable.
   */
  public static final String INDEX_CODEC_SETTING = "index.codec";

  public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
  public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
  public static final ByteSizeValue DEFAUTL_INDEX_BUFFER_SIZE =
      new ByteSizeValue(64, ByteSizeUnit.MB);
  public static final ByteSizeValue INACTIVE_SHARD_INDEXING_BUFFER =
      ByteSizeValue.parseBytesSizeValue("500kb");

  private static final String DEFAULT_CODEC_NAME = "default";

  /** Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */
  public EngineConfig(
      ShardId shardId,
      boolean optimizeAutoGenerateId,
      ThreadPool threadPool,
      ShardIndexingService indexingService,
      IndexSettingsService indexSettingsService,
      IndicesWarmer warmer,
      Store store,
      SnapshotDeletionPolicy deletionPolicy,
      Translog translog,
      MergePolicyProvider mergePolicyProvider,
      MergeSchedulerProvider mergeScheduler,
      Analyzer analyzer,
      Similarity similarity,
      CodecService codecService,
      Engine.FailedEngineListener failedEngineListener) {
    this.shardId = shardId;
    this.optimizeAutoGenerateId = optimizeAutoGenerateId;
    this.threadPool = threadPool;
    this.indexingService = indexingService;
    this.indexSettingsService = indexSettingsService;
    this.warmer = warmer;
    this.store = store;
    this.deletionPolicy = deletionPolicy;
    this.translog = translog;
    this.mergePolicyProvider = mergePolicyProvider;
    this.mergeScheduler = mergeScheduler;
    this.analyzer = analyzer;
    this.similarity = similarity;
    this.codecService = codecService;
    this.failedEngineListener = failedEngineListener;
    Settings indexSettings = indexSettingsService.getSettings();
    this.compoundOnFlush =
        indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
    this.indexConcurrency =
        indexSettings.getAsInt(
            EngineConfig.INDEX_CONCURRENCY_SETTING,
            Math.max(
                IndexWriterConfig.DEFAULT_MAX_THREAD_STATES,
                (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65)));
    codecName =
        indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
    indexingBufferSize =
        indexSettings.getAsBytesSize(INDEX_BUFFER_SIZE_SETTING, DEFAUTL_INDEX_BUFFER_SIZE);
    failEngineOnCorruption = indexSettings.getAsBoolean(INDEX_FAIL_ON_CORRUPTION_SETTING, true);
    failOnMergeFailure = indexSettings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE_SETTING, true);
    gcDeletesInMillis =
        indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis();
  }

  /** Sets the indexing buffer */
  public void setIndexingBufferSize(ByteSizeValue indexingBufferSize) {
    this.indexingBufferSize = indexingBufferSize;
  }

  /**
   * Sets the index concurrency
   *
   * @see #getIndexConcurrency()
   */
  public void setIndexConcurrency(int indexConcurrency) {
    this.indexConcurrency = indexConcurrency;
  }

  /**
   * Enables / disables gc deletes
   *
   * @see #isEnableGcDeletes()
   */
  public void setEnableGcDeletes(boolean enableGcDeletes) {
    this.enableGcDeletes = enableGcDeletes;
  }

  /**
   * Returns <code>true</code> iff the engine should be failed if a merge error is hit. Defaults to
   * <code>true</code>
   */
  public boolean isFailOnMergeFailure() {
    return failOnMergeFailure;
  }

  /**
   * Returns <code>true</code> if the engine should be failed in the case of a corrupted index.
   * Defaults to <code>true</code>
   */
  public boolean isFailEngineOnCorruption() {
    return failEngineOnCorruption;
  }

  /**
   * Returns the initial index buffer size. This setting is only read on startup and otherwise
   * controlled by {@link org.elasticsearch.indices.memory.IndexingMemoryController}
   */
  public ByteSizeValue getIndexingBufferSize() {
    return indexingBufferSize;
  }

  /**
   * Returns the index concurrency that directly translates into the number of thread states used in
   * the engines {@code IndexWriter}.
   *
   * @see org.apache.lucene.index.IndexWriterConfig#getMaxThreadStates()
   */
  public int getIndexConcurrency() {
    return indexConcurrency;
  }

  /**
   * Returns <code>true</code> iff flushed segments should be written as compound file system.
   * Defaults to <code>true</code>
   */
  public boolean isCompoundOnFlush() {
    return compoundOnFlush;
  }

  /** Returns the GC deletes cycle in milliseconds. */
  public long getGcDeletesInMillis() {
    return gcDeletesInMillis;
  }

  /**
   * Returns <code>true</code> iff delete garbage collection in the engine should be enabled. This
   * setting is updateable in realtime and forces a volatile read. Consumers can safely read this
   * value directly go fetch it's latest value. The default is <code>true</code>
   *
   * <p>Engine GC deletion if enabled collects deleted documents from in-memory realtime data
   * structures after a certain amount of time ({@link #getGcDeletesInMillis()} if enabled. Before
   * deletes are GCed they will cause re-adding the document that was deleted to fail.
   */
  public boolean isEnableGcDeletes() {
    return enableGcDeletes;
  }

  /**
   * Returns the {@link Codec} used in the engines {@link org.apache.lucene.index.IndexWriter}
   *
   * <p>Note: this settings is only read on startup and if a new writer is created. This happens
   * either due to a settings change in the {@link
   * org.elasticsearch.index.engine.EngineConfig.EngineSettingsListener} or if {@link
   * Engine#flush(org.elasticsearch.index.engine.Engine.FlushType, boolean, boolean)} with {@link
   * org.elasticsearch.index.engine.Engine.FlushType#NEW_WRITER} is executed.
   */
  public Codec getCodec() {
    return codecService.codec(codecName);
  }

  /**
   * Returns <code>true</code> iff documents with auto-generated IDs are optimized if possible. This
   * mainly means that they are simply appended to the index if no update call is necessary.
   */
  public boolean isOptimizeAutoGenerateId() {
    return optimizeAutoGenerateId;
  }

  /**
   * Returns a thread-pool mainly used to get estimated time stamps from {@link
   * org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule async force
   * merge calls on the {@link org.elasticsearch.threadpool.ThreadPool.Names#OPTIMIZE} thread-pool
   */
  public ThreadPool getThreadPool() {
    return threadPool;
  }

  /**
   * Returns a {@link org.elasticsearch.index.indexing.ShardIndexingService} used inside the engine
   * to inform about pre and post index and create operations. The operations are used for statistic
   * purposes etc.
   *
   * @see
   *     org.elasticsearch.index.indexing.ShardIndexingService#postCreate(org.elasticsearch.index.engine.Engine.Create)
   * @see
   *     org.elasticsearch.index.indexing.ShardIndexingService#preCreate(org.elasticsearch.index.engine.Engine.Create)
   */
  public ShardIndexingService getIndexingService() {
    return indexingService;
  }

  /**
   * Returns an {@link org.elasticsearch.index.settings.IndexSettingsService} used to register a
   * {@link org.elasticsearch.index.engine.EngineConfig.EngineSettingsListener} instance in order to
   * get notification for realtime changeable settings exposed in this {@link
   * org.elasticsearch.index.engine.EngineConfig}.
   */
  public IndexSettingsService getIndexSettingsService() {
    return indexSettingsService;
  }

  /**
   * Returns an {@link org.elasticsearch.indices.IndicesWarmer} used to warm new searchers before
   * they are used for searching. Note: This method might retrun <code>null</code>
   */
  @Nullable
  public IndicesWarmer getWarmer() {
    return warmer;
  }

  /**
   * Returns the {@link org.elasticsearch.index.store.Store} instance that provides access to the
   * {@link org.apache.lucene.store.Directory} used for the engines {@link
   * org.apache.lucene.index.IndexWriter} to write it's index files to.
   *
   * <p>Note: In order to use this instance the consumer needs to increment the stores reference
   * before it's used the first time and hold it's reference until it's not needed anymore.
   */
  public Store getStore() {
    return store;
  }

  /**
   * Returns a {@link org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy} used in the
   * engines {@link org.apache.lucene.index.IndexWriter}.
   */
  public SnapshotDeletionPolicy getDeletionPolicy() {
    return deletionPolicy;
  }

  /** Returns a {@link Translog instance} */
  public Translog getTranslog() {
    return translog;
  }

  /**
   * Returns the {@link org.elasticsearch.index.merge.policy.MergePolicyProvider} used to obtain a
   * {@link org.apache.lucene.index.MergePolicy} for the engines {@link
   * org.apache.lucene.index.IndexWriter}
   */
  public MergePolicyProvider getMergePolicyProvider() {
    return mergePolicyProvider;
  }

  /**
   * Returns the {@link org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider} used to
   * obtain a {@link org.apache.lucene.index.MergeScheduler} for the engines {@link
   * org.apache.lucene.index.IndexWriter}
   */
  public MergeSchedulerProvider getMergeScheduler() {
    return mergeScheduler;
  }

  /** Returns a listener that should be called on engine failure */
  public Engine.FailedEngineListener getFailedEngineListener() {
    return failedEngineListener;
  }

  /** Returns the latest index settings directly from the index settings service. */
  public Settings getIndexSettings() {
    return indexSettingsService.getSettings();
  }

  /** Returns the engines shard ID */
  public ShardId getShardId() {
    return shardId;
  }

  /**
   * Returns the analyzer as the default analyzer in the engines {@link
   * org.apache.lucene.index.IndexWriter}
   */
  public Analyzer getAnalyzer() {
    return analyzer;
  }

  /**
   * Returns the {@link org.apache.lucene.search.similarities.Similarity} used for indexing and
   * searching.
   */
  public Similarity getSimilarity() {
    return similarity;
  }

  /**
   * Basic realtime updateable settings listener that can be used ot receive notification if an
   * index setting changed.
   */
  public abstract static class EngineSettingsListener implements IndexSettingsService.Listener {

    private final ESLogger logger;
    private final EngineConfig config;

    public EngineSettingsListener(ESLogger logger, EngineConfig config) {
      this.logger = logger;
      this.config = config;
    }

    @Override
    public final void onRefreshSettings(Settings settings) {
      boolean change = false;
      long gcDeletesInMillis =
          settings
              .getAsTime(
                  EngineConfig.INDEX_GC_DELETES_SETTING,
                  TimeValue.timeValueMillis(config.getGcDeletesInMillis()))
              .millis();
      if (gcDeletesInMillis != config.getGcDeletesInMillis()) {
        logger.info(
            "updating {} from [{}] to [{}]",
            EngineConfig.INDEX_GC_DELETES_SETTING,
            TimeValue.timeValueMillis(config.getGcDeletesInMillis()),
            TimeValue.timeValueMillis(gcDeletesInMillis));
        config.gcDeletesInMillis = gcDeletesInMillis;
        change = true;
      }

      final boolean compoundOnFlush =
          settings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, config.isCompoundOnFlush());
      if (compoundOnFlush != config.isCompoundOnFlush()) {
        logger.info(
            "updating {} from [{}] to [{}]",
            EngineConfig.INDEX_COMPOUND_ON_FLUSH,
            config.isCompoundOnFlush(),
            compoundOnFlush);
        config.compoundOnFlush = compoundOnFlush;
        change = true;
      }

      final boolean failEngineOnCorruption =
          settings.getAsBoolean(
              EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, config.isFailEngineOnCorruption());
      if (failEngineOnCorruption != config.isFailEngineOnCorruption()) {
        logger.info(
            "updating {} from [{}] to [{}]",
            EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING,
            config.isFailEngineOnCorruption(),
            failEngineOnCorruption);
        config.failEngineOnCorruption = failEngineOnCorruption;
        change = true;
      }
      int indexConcurrency =
          settings.getAsInt(EngineConfig.INDEX_CONCURRENCY_SETTING, config.getIndexConcurrency());
      if (indexConcurrency != config.getIndexConcurrency()) {
        logger.info(
            "updating index.index_concurrency from [{}] to [{}]",
            config.getIndexConcurrency(),
            indexConcurrency);
        config.setIndexConcurrency(indexConcurrency);
        // we have to flush in this case, since it only applies on a new index writer
        change = true;
      }
      final String codecName = settings.get(EngineConfig.INDEX_CODEC_SETTING, config.codecName);
      if (!codecName.equals(config.codecName)) {
        logger.info(
            "updating {} from [{}] to [{}]",
            EngineConfig.INDEX_CODEC_SETTING,
            config.codecName,
            codecName);
        config.codecName = codecName;
        // we want to flush in this case, so the new codec will be reflected right away...
        change = true;
      }
      final boolean failOnMergeFailure =
          settings.getAsBoolean(
              EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING, config.isFailOnMergeFailure());
      if (failOnMergeFailure != config.isFailOnMergeFailure()) {
        logger.info(
            "updating {} from [{}] to [{}]",
            EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING,
            config.isFailOnMergeFailure(),
            failOnMergeFailure);
        config.failOnMergeFailure = failOnMergeFailure;
        change = true;
      }

      if (change) {
        onChange();
      }
    }

    /**
     * This method is called if any of the settings that are exposed as realtime updateble settings
     * has changed. This method should be overwritten by subclasses to react on settings changes.
     */
    protected abstract void onChange();
  }
}
 @Test(expected = ElasticsearchParseException.class)
 public void testFailOnEmptyNumberParsing() {
   assertThat(ByteSizeValue.parseBytesSizeValue("g").toString(), is("23b"));
 }
 @Test
 public void testParsing() {
   assertThat(ByteSizeValue.parseBytesSizeValue("42pb").toString(), is("42pb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("42P").toString(), is("42pb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("42PB").toString(), is("42pb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("54tb").toString(), is("54tb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("54T").toString(), is("54tb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("54TB").toString(), is("54tb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("12gb").toString(), is("12gb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("12G").toString(), is("12gb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("12GB").toString(), is("12gb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("12M").toString(), is("12mb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("1b").toString(), is("1b"));
   assertThat(ByteSizeValue.parseBytesSizeValue("23kb").toString(), is("23kb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("23k").toString(), is("23kb"));
   assertThat(ByteSizeValue.parseBytesSizeValue("23").toString(), is("23b"));
 }
  public Decision canAllocate(
      ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
    if (!enabled) {
      return allocation.decision(Decision.YES, "disk threshold decider disabled");
    }
    // Allow allocation regardless if only a single node is available
    if (allocation.nodes().size() <= 1) {
      return allocation.decision(Decision.YES, "only a single node is present");
    }

    ClusterInfo clusterInfo = allocation.clusterInfo();
    if (clusterInfo == null) {
      if (logger.isTraceEnabled()) {
        logger.trace("Cluster info unavailable for disk threshold decider, allowing allocation.");
      }
      return allocation.decision(Decision.YES, "cluster info unavailable");
    }

    Map<String, DiskUsage> usages = clusterInfo.getNodeDiskUsages();
    Map<String, Long> shardSizes = clusterInfo.getShardSizes();
    if (usages.isEmpty()) {
      if (logger.isTraceEnabled()) {
        logger.trace(
            "Unable to determine disk usages for disk-aware allocation, allowing allocation");
      }
      return allocation.decision(Decision.YES, "disk usages unavailable");
    }

    DiskUsage usage = usages.get(node.nodeId());
    if (usage == null) {
      // If there is no usage, and we have other nodes in the cluster,
      // use the average usage for all nodes as the usage for this node
      usage = averageUsage(node, usages);
      if (logger.isDebugEnabled()) {
        logger.debug(
            "Unable to determine disk usage for [{}], defaulting to average across nodes [{} total] [{} free] [{}% free]",
            node.nodeId(),
            usage.getTotalBytes(),
            usage.getFreeBytes(),
            usage.getFreeDiskAsPercentage());
      }
    }

    // First, check that the node currently over the low watermark
    double freeDiskPercentage = usage.getFreeDiskAsPercentage();
    long freeBytes = usage.getFreeBytes();
    if (logger.isDebugEnabled()) {
      logger.debug("Node [{}] has {}% free disk", node.nodeId(), freeDiskPercentage);
    }
    if (freeBytes < freeBytesThresholdLow.bytes()) {
      if (logger.isDebugEnabled()) {
        logger.debug(
            "Less than the required {} free bytes threshold ({} bytes free) on node {}, preventing allocation",
            freeBytesThresholdLow,
            freeBytes,
            node.nodeId());
      }
      return allocation.decision(
          Decision.NO,
          "less than required [%s] free on node, free: [%s]",
          freeBytesThresholdLow,
          new ByteSizeValue(freeBytes));
    }
    if (freeDiskPercentage < freeDiskThresholdLow) {
      if (logger.isDebugEnabled()) {
        logger.debug(
            "Less than the required {}% free disk threshold ({}% free) on node [{}], preventing allocation",
            freeDiskThresholdLow, freeDiskPercentage, node.nodeId());
      }
      return allocation.decision(
          Decision.NO,
          "less than required [%d%%] free disk on node, free: [%d%%]",
          freeDiskThresholdLow,
          freeDiskThresholdLow);
    }

    // Secondly, check that allocating the shard to this node doesn't put it above the high
    // watermark
    Long shardSize = shardSizes.get(shardIdentifierFromRouting(shardRouting));
    shardSize = shardSize == null ? 0 : shardSize;
    double freeSpaceAfterShard = this.freeDiskPercentageAfterShardAssigned(usage, shardSize);
    long freeBytesAfterShard = freeBytes - shardSize;
    if (freeBytesAfterShard < freeBytesThresholdHigh.bytes()) {
      logger.warn(
          "After allocating, node [{}] would have less than the required {} free bytes threshold ({} bytes free), preventing allocation",
          node.nodeId(),
          freeBytesThresholdHigh,
          freeBytesAfterShard);
      return allocation.decision(
          Decision.NO,
          "after allocation less than required [%s] free on node, free: [%s]",
          freeBytesThresholdLow,
          new ByteSizeValue(freeBytesAfterShard));
    }
    if (freeSpaceAfterShard < freeDiskThresholdHigh) {
      logger.warn(
          "After allocating, node [{}] would have less than the required {}% free disk threshold ({}% free), preventing allocation",
          node.nodeId(), freeDiskThresholdHigh, freeSpaceAfterShard);
      return allocation.decision(
          Decision.NO,
          "after allocation less than required [%d%%] free disk on node, free: [%d%%]",
          freeDiskThresholdLow,
          freeSpaceAfterShard);
    }

    return allocation.decision(
        Decision.YES, "enough disk for shard on node, free: [%s]", new ByteSizeValue(freeBytes));
  }
    @Override
    public void onRefreshSettings(Settings settings) {
      ByteSizeValue minMergeSize =
          settings.getAsBytesSize(
              "index.merge.policy.min_merge_size",
              LogByteSizeMergePolicyProvider.this.minMergeSize);
      if (!minMergeSize.equals(LogByteSizeMergePolicyProvider.this.minMergeSize)) {
        logger.info(
            "updating min_merge_size from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.minMergeSize,
            minMergeSize);
        LogByteSizeMergePolicyProvider.this.minMergeSize = minMergeSize;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMinMergeMB(minMergeSize.mbFrac());
        }
      }

      ByteSizeValue maxMergeSize =
          settings.getAsBytesSize(
              "index.merge.policy.max_merge_size",
              LogByteSizeMergePolicyProvider.this.maxMergeSize);
      if (!maxMergeSize.equals(LogByteSizeMergePolicyProvider.this.maxMergeSize)) {
        logger.info(
            "updating max_merge_size from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.maxMergeSize,
            maxMergeSize);
        LogByteSizeMergePolicyProvider.this.maxMergeSize = maxMergeSize;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMaxMergeMB(maxMergeSize.mbFrac());
        }
      }

      int maxMergeDocs =
          settings.getAsInt(
              "index.merge.policy.max_merge_docs",
              LogByteSizeMergePolicyProvider.this.maxMergeDocs);
      if (maxMergeDocs != LogByteSizeMergePolicyProvider.this.maxMergeDocs) {
        logger.info(
            "updating max_merge_docs from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.maxMergeDocs,
            maxMergeDocs);
        LogByteSizeMergePolicyProvider.this.maxMergeDocs = maxMergeDocs;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMaxMergeDocs(maxMergeDocs);
        }
      }

      int mergeFactor =
          settings.getAsInt(
              "index.merge.policy.merge_factor", LogByteSizeMergePolicyProvider.this.mergeFactor);
      if (mergeFactor != LogByteSizeMergePolicyProvider.this.mergeFactor) {
        logger.info(
            "updating merge_factor from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.mergeFactor,
            mergeFactor);
        LogByteSizeMergePolicyProvider.this.mergeFactor = mergeFactor;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setMergeFactor(mergeFactor);
        }
      }

      boolean compoundFormat =
          settings.getAsBoolean(
              "index.compound_format", LogByteSizeMergePolicyProvider.this.compoundFormat);
      if (compoundFormat != LogByteSizeMergePolicyProvider.this.compoundFormat) {
        logger.info(
            "updating index.compound_format from [{}] to [{}]",
            LogByteSizeMergePolicyProvider.this.compoundFormat,
            compoundFormat);
        LogByteSizeMergePolicyProvider.this.compoundFormat = compoundFormat;
        for (CustomLogByteSizeMergePolicy policy : policies) {
          policy.setUseCompoundFile(compoundFormat);
        }
      }
    }
  public void testLimitsRequestSize() throws Exception {
    ByteSizeValue inFlightRequestsLimit = new ByteSizeValue(8, ByteSizeUnit.KB);
    if (noopBreakerUsed()) {
      logger.info("--> noop breakers used, skipping test");
      return;
    }

    internalCluster().ensureAtLeastNumDataNodes(2);

    NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
    List<NodeStats> dataNodeStats = new ArrayList<>();
    for (NodeStats stat : nodeStats.getNodes()) {
      if (stat.getNode().isDataNode()) {
        dataNodeStats.add(stat);
      }
    }

    assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
    Collections.shuffle(dataNodeStats, random());

    // send bulk request from source node to target node later. The sole shard is bound to the
    // target node.
    NodeStats targetNode = dataNodeStats.get(0);
    NodeStats sourceNode = dataNodeStats.get(1);

    assertAcked(
        prepareCreate("index")
            .setSettings(
                Settings.builder()
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
                    .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
                    .put("index.routing.allocation.include._name", targetNode.getNode().getName())
                    .put(
                        EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(),
                        EnableAllocationDecider.Rebalance.NONE)));

    Client client = client(sourceNode.getNode().getName());

    // we use the limit size as a (very) rough indication on how many requests we should sent to hit
    // the limit
    int numRequests = inFlightRequestsLimit.bytesAsInt();
    BulkRequest bulkRequest = new BulkRequest();
    for (int i = 0; i < numRequests; i++) {
      IndexRequest indexRequest = new IndexRequest("index", "type", Integer.toString(i));
      indexRequest.source("field", "value", "num", i);
      bulkRequest.add(indexRequest);
    }

    Settings limitSettings =
        Settings.builder()
            .put(
                HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING
                    .getKey(),
                inFlightRequestsLimit)
            .build();

    assertAcked(
        client().admin().cluster().prepareUpdateSettings().setTransientSettings(limitSettings));

    // can either fail directly with an exception or the response contains exceptions (depending on
    // client)
    try {
      BulkResponse response = client.bulk(bulkRequest).actionGet();
      if (!response.hasFailures()) {
        fail("Should have thrown CircuitBreakingException");
      } else {
        // each item must have failed with CircuitBreakingException
        for (BulkItemResponse bulkItemResponse : response) {
          Throwable cause = ExceptionsHelper.unwrapCause(bulkItemResponse.getFailure().getCause());
          assertThat(cause, instanceOf(CircuitBreakingException.class));
          assertEquals(
              ((CircuitBreakingException) cause).getByteLimit(), inFlightRequestsLimit.bytes());
        }
      }
    } catch (CircuitBreakingException ex) {
      assertEquals(ex.getByteLimit(), inFlightRequestsLimit.bytes());
    }
  }
  public Decision canRemain(
      ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
    if (!enabled) {
      return allocation.decision(Decision.YES, "disk threshold decider disabled");
    }
    // Allow allocation regardless if only a single node is available
    if (allocation.nodes().size() <= 1) {
      return allocation.decision(Decision.YES, "only a single node is present");
    }

    ClusterInfo clusterInfo = allocation.clusterInfo();
    if (clusterInfo == null) {
      if (logger.isTraceEnabled()) {
        logger.trace("Cluster info unavailable for disk threshold decider, allowing allocation.");
      }
      return allocation.decision(Decision.YES, "cluster info unavailable");
    }

    Map<String, DiskUsage> usages = clusterInfo.getNodeDiskUsages();
    if (usages.isEmpty()) {
      if (logger.isTraceEnabled()) {
        logger.trace(
            "Unable to determine disk usages for disk-aware allocation, allowing allocation");
      }
      return allocation.decision(Decision.YES, "disk usages unavailable");
    }

    DiskUsage usage = usages.get(node.nodeId());
    if (usage == null) {
      // If there is no usage, and we have other nodes in the cluster,
      // use the average usage for all nodes as the usage for this node
      usage = averageUsage(node, usages);
      if (logger.isDebugEnabled()) {
        logger.debug(
            "Unable to determine disk usage for {}, defaulting to average across nodes [{} total] [{} free] [{}% free]",
            node.nodeId(),
            usage.getTotalBytes(),
            usage.getFreeBytes(),
            usage.getFreeDiskAsPercentage());
      }
    }

    // If this node is already above the high threshold, the shard cannot remain (get it off!)
    double freeDiskPercentage = usage.getFreeDiskAsPercentage();
    long freeBytes = usage.getFreeBytes();
    if (logger.isDebugEnabled()) {
      logger.debug(
          "Node [{}] has {}% free disk ({} bytes)", node.nodeId(), freeDiskPercentage, freeBytes);
    }
    if (freeBytes < freeBytesThresholdHigh.bytes()) {
      if (logger.isDebugEnabled()) {
        logger.debug(
            "Less than the required {} free bytes threshold ({} bytes free) on node {}, shard cannot remain",
            freeBytesThresholdHigh,
            freeBytes,
            node.nodeId());
      }
      return allocation.decision(
          Decision.NO,
          "after allocation less than required [%s] free on node, free: [%s]",
          freeBytesThresholdHigh,
          new ByteSizeValue(freeBytes));
    }
    if (freeDiskPercentage < freeDiskThresholdHigh) {
      if (logger.isDebugEnabled()) {
        logger.debug(
            "Less than the required {}% free disk threshold ({}% free) on node {}, shard cannot remain",
            freeDiskThresholdHigh, freeDiskPercentage, node.nodeId());
      }
      return allocation.decision(
          Decision.NO,
          "after allocation less than required [%d%%] free disk on node, free: [%d%%]",
          freeDiskThresholdHigh,
          freeDiskPercentage);
    }

    return allocation.decision(
        Decision.YES,
        "enough disk for shard to remain on node, free: [%s]",
        new ByteSizeValue(freeBytes));
  }
  public static void main(String[] args) throws InterruptedException {
    final ByteSizeValue payloadSize = new ByteSizeValue(10, ByteSizeUnit.MB);
    final int NUMBER_OF_ITERATIONS = 100000;
    final int NUMBER_OF_CLIENTS = 5;
    final byte[] payload = new byte[(int) payloadSize.bytes()];

    Settings settings = ImmutableSettings.settingsBuilder().build();

    NetworkService networkService = new NetworkService(settings);

    final ThreadPool threadPool = new ThreadPool();
    final TransportService transportServiceServer =
        new TransportService(
                new NettyTransport(settings, threadPool, networkService, Version.CURRENT),
                threadPool)
            .start();
    final TransportService transportServiceClient =
        new TransportService(
                new NettyTransport(settings, threadPool, networkService, Version.CURRENT),
                threadPool)
            .start();

    final DiscoveryNode bigNode =
        new DiscoveryNode(
            "big", new InetSocketTransportAddress("localhost", 9300), Version.CURRENT);
    //        final DiscoveryNode smallNode = new DiscoveryNode("small", new
    // InetSocketTransportAddress("localhost", 9300));
    final DiscoveryNode smallNode = bigNode;

    transportServiceClient.connectToNode(bigNode);
    transportServiceClient.connectToNode(smallNode);

    transportServiceServer.registerHandler(
        "benchmark",
        new BaseTransportRequestHandler<BenchmarkMessageRequest>() {
          @Override
          public BenchmarkMessageRequest newInstance() {
            return new BenchmarkMessageRequest();
          }

          @Override
          public String executor() {
            return ThreadPool.Names.GENERIC;
          }

          @Override
          public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel)
              throws Exception {
            channel.sendResponse(new BenchmarkMessageResponse(request));
          }
        });

    final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS);
    for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
      new Thread(
              new Runnable() {
                @Override
                public void run() {
                  for (int i = 0; i < NUMBER_OF_ITERATIONS; i++) {
                    BenchmarkMessageRequest message = new BenchmarkMessageRequest(1, payload);
                    transportServiceClient
                        .submitRequest(
                            bigNode,
                            "benchmark",
                            message,
                            options().withType(TransportRequestOptions.Type.BULK),
                            new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
                              @Override
                              public BenchmarkMessageResponse newInstance() {
                                return new BenchmarkMessageResponse();
                              }

                              @Override
                              public String executor() {
                                return ThreadPool.Names.SAME;
                              }

                              @Override
                              public void handleResponse(BenchmarkMessageResponse response) {}

                              @Override
                              public void handleException(TransportException exp) {
                                exp.printStackTrace();
                              }
                            })
                        .txGet();
                  }
                  latch.countDown();
                }
              })
          .start();
    }

    new Thread(
            new Runnable() {
              @Override
              public void run() {
                for (int i = 0; i < 1; i++) {
                  BenchmarkMessageRequest message =
                      new BenchmarkMessageRequest(2, BytesRef.EMPTY_BYTES);
                  long start = System.currentTimeMillis();
                  transportServiceClient
                      .submitRequest(
                          smallNode,
                          "benchmark",
                          message,
                          options().withType(TransportRequestOptions.Type.STATE),
                          new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
                            @Override
                            public BenchmarkMessageResponse newInstance() {
                              return new BenchmarkMessageResponse();
                            }

                            @Override
                            public String executor() {
                              return ThreadPool.Names.SAME;
                            }

                            @Override
                            public void handleResponse(BenchmarkMessageResponse response) {}

                            @Override
                            public void handleException(TransportException exp) {
                              exp.printStackTrace();
                            }
                          })
                      .txGet();
                  long took = System.currentTimeMillis() - start;
                  System.out.println("Took " + took + "ms");
                }
              }
            })
        .start();

    latch.await();
  }
 void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests
   if (chunkSize.bytesAsInt() <= 0) {
     throw new IllegalArgumentException("chunkSize must be > 0");
   }
   this.chunkSize = chunkSize;
 }
  public static void main(String[] args) {
    final String executor = ThreadPool.Names.GENERIC;
    final boolean waitForRequest = true;
    final ByteSizeValue payloadSize = new ByteSizeValue(100, ByteSizeUnit.BYTES);
    final int NUMBER_OF_CLIENTS = 10;
    final int NUMBER_OF_ITERATIONS = 100000;
    final byte[] payload = new byte[(int) payloadSize.bytes()];
    final AtomicLong idGenerator = new AtomicLong();
    final Type type = Type.NETTY;

    Settings settings = ImmutableSettings.settingsBuilder().build();

    final ThreadPool serverThreadPool = new ThreadPool();
    final TransportService serverTransportService =
        new TransportService(type.newTransport(settings, serverThreadPool), serverThreadPool)
            .start();

    final ThreadPool clientThreadPool = new ThreadPool();
    final TransportService clientTransportService =
        new TransportService(type.newTransport(settings, clientThreadPool), clientThreadPool)
            .start();

    final DiscoveryNode node =
        new DiscoveryNode("server", serverTransportService.boundAddress().publishAddress());

    serverTransportService.registerHandler(
        "benchmark",
        new BaseTransportRequestHandler<BenchmarkMessage>() {
          @Override
          public BenchmarkMessage newInstance() {
            return new BenchmarkMessage();
          }

          @Override
          public String executor() {
            return executor;
          }

          @Override
          public void messageReceived(BenchmarkMessage request, TransportChannel channel)
              throws Exception {
            channel.sendResponse(request);
          }
        });

    clientTransportService.connectToNode(node);

    for (int i = 0; i < 10000; i++) {
      BenchmarkMessage message = new BenchmarkMessage(1, payload);
      clientTransportService
          .submitRequest(
              node,
              "benchmark",
              message,
              new BaseTransportResponseHandler<BenchmarkMessage>() {
                @Override
                public BenchmarkMessage newInstance() {
                  return new BenchmarkMessage();
                }

                @Override
                public String executor() {
                  return ThreadPool.Names.SAME;
                }

                @Override
                public void handleResponse(BenchmarkMessage response) {}

                @Override
                public void handleException(TransportException exp) {
                  exp.printStackTrace();
                }
              })
          .txGet();
    }

    Thread[] clients = new Thread[NUMBER_OF_CLIENTS];
    final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS);
    for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
      clients[i] =
          new Thread(
              new Runnable() {
                @Override
                public void run() {
                  for (int j = 0; j < NUMBER_OF_ITERATIONS; j++) {
                    final long id = idGenerator.incrementAndGet();
                    BenchmarkMessage message = new BenchmarkMessage(id, payload);
                    BaseTransportResponseHandler<BenchmarkMessage> handler =
                        new BaseTransportResponseHandler<BenchmarkMessage>() {
                          @Override
                          public BenchmarkMessage newInstance() {
                            return new BenchmarkMessage();
                          }

                          @Override
                          public String executor() {
                            return executor;
                          }

                          @Override
                          public void handleResponse(BenchmarkMessage response) {
                            if (response.id != id) {
                              System.out.println(
                                  "NO ID MATCH [" + response.id + "] and [" + id + "]");
                            }
                            latch.countDown();
                          }

                          @Override
                          public void handleException(TransportException exp) {
                            exp.printStackTrace();
                            latch.countDown();
                          }
                        };

                    if (waitForRequest) {
                      clientTransportService
                          .submitRequest(node, "benchmark", message, handler)
                          .txGet();
                    } else {
                      clientTransportService.sendRequest(node, "benchmark", message, handler);
                    }
                  }
                }
              });
    }

    StopWatch stopWatch = new StopWatch().start();
    for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
      clients[i].start();
    }

    try {
      latch.await();
    } catch (InterruptedException e) {
      e.printStackTrace();
    }
    stopWatch.stop();

    System.out.println(
        "Ran ["
            + NUMBER_OF_CLIENTS
            + "], each with ["
            + NUMBER_OF_ITERATIONS
            + "] iterations, payload ["
            + payloadSize
            + "]: took ["
            + stopWatch.totalTime()
            + "], TPS: "
            + (NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS) / stopWatch.totalTime().secondsFrac());

    clientTransportService.close();
    clientThreadPool.shutdownNow();

    serverTransportService.close();
    serverThreadPool.shutdownNow();
  }
  @SuppressWarnings("unchecked")
  public static synchronized MongoDBRiverDefinition parseSettings(
      String riverName,
      String riverIndexName,
      RiverSettings settings,
      ScriptService scriptService) {

    logger.trace("Parse river settings for {}", riverName);
    Preconditions.checkNotNull(riverName, "No riverName specified");
    Preconditions.checkNotNull(riverIndexName, "No riverIndexName specified");
    Preconditions.checkNotNull(settings, "No settings specified");

    Builder builder = new Builder();
    builder.riverName(riverName);
    builder.riverIndexName(riverIndexName);

    List<ServerAddress> mongoServers = new ArrayList<ServerAddress>();
    String mongoHost;
    int mongoPort;

    if (settings.settings().containsKey(MongoDBRiver.TYPE)) {
      Map<String, Object> mongoSettings =
          (Map<String, Object>) settings.settings().get(MongoDBRiver.TYPE);
      if (mongoSettings.containsKey(SERVERS_FIELD)) {
        Object mongoServersSettings = mongoSettings.get(SERVERS_FIELD);
        logger.trace("mongoServersSettings: " + mongoServersSettings);
        boolean array = XContentMapValues.isArray(mongoServersSettings);

        if (array) {
          ArrayList<Map<String, Object>> feeds =
              (ArrayList<Map<String, Object>>) mongoServersSettings;
          for (Map<String, Object> feed : feeds) {
            mongoHost = XContentMapValues.nodeStringValue(feed.get(HOST_FIELD), null);
            mongoPort = XContentMapValues.nodeIntegerValue(feed.get(PORT_FIELD), DEFAULT_DB_PORT);
            logger.trace("Server: " + mongoHost + " - " + mongoPort);
            try {
              mongoServers.add(new ServerAddress(mongoHost, mongoPort));
            } catch (UnknownHostException uhEx) {
              logger.warn("Cannot add mongo server {}:{}", uhEx, mongoHost, mongoPort);
            }
          }
        }
      } else {
        mongoHost =
            XContentMapValues.nodeStringValue(mongoSettings.get(HOST_FIELD), DEFAULT_DB_HOST);
        mongoPort =
            XContentMapValues.nodeIntegerValue(mongoSettings.get(PORT_FIELD), DEFAULT_DB_PORT);
        try {
          mongoServers.add(new ServerAddress(mongoHost, mongoPort));
        } catch (UnknownHostException uhEx) {
          logger.warn("Cannot add mongo server {}:{}", uhEx, mongoHost, mongoPort);
        }
      }
      builder.mongoServers(mongoServers);

      MongoClientOptions.Builder mongoClientOptionsBuilder =
          MongoClientOptions.builder().socketKeepAlive(true);

      // MongoDB options
      if (mongoSettings.containsKey(OPTIONS_FIELD)) {
        Map<String, Object> mongoOptionsSettings =
            (Map<String, Object>) mongoSettings.get(OPTIONS_FIELD);
        logger.trace("mongoOptionsSettings: " + mongoOptionsSettings);
        builder.mongoSecondaryReadPreference(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(SECONDARY_READ_PREFERENCE_FIELD), false));
        builder.connectTimeout(
            XContentMapValues.nodeIntegerValue(
                mongoOptionsSettings.get(CONNECT_TIMEOUT), DEFAULT_CONNECT_TIMEOUT));
        builder.socketTimeout(
            XContentMapValues.nodeIntegerValue(
                mongoOptionsSettings.get(SOCKET_TIMEOUT), DEFAULT_SOCKET_TIMEOUT));
        builder.dropCollection(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(DROP_COLLECTION_FIELD), false));
        String isMongos =
            XContentMapValues.nodeStringValue(mongoOptionsSettings.get(IS_MONGOS_FIELD), null);
        if (isMongos != null) {
          builder.isMongos(Boolean.valueOf(isMongos));
        }
        builder.mongoUseSSL(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(SSL_CONNECTION_FIELD), false));
        builder.mongoSSLVerifyCertificate(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(SSL_VERIFY_CERT_FIELD), true));
        builder.advancedTransformation(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(ADVANCED_TRANSFORMATION_FIELD), false));
        builder.skipInitialImport(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(SKIP_INITIAL_IMPORT_FIELD), false));
        builder.connectionsPerHost(
            XContentMapValues.nodeIntegerValue(
                mongoOptionsSettings.get(CONNECTIONS_PER_HOST), DEFAULT_CONNECTIONS_PER_HOST));
        builder.threadsAllowedToBlockForConnectionMultiplier(
            XContentMapValues.nodeIntegerValue(
                mongoOptionsSettings.get(THREADS_ALLOWED_TO_BLOCK_FOR_CONNECTION_MULTIPLIER),
                DEFAULT_THREADS_ALLOWED_TO_BLOCK_FOR_CONNECTION_MULTIPLIER));

        mongoClientOptionsBuilder
            .connectTimeout(builder.connectTimeout)
            .socketTimeout(builder.socketTimeout)
            .connectionsPerHost(builder.connectionsPerHost)
            .threadsAllowedToBlockForConnectionMultiplier(
                builder.threadsAllowedToBlockForConnectionMultiplier);

        if (builder.mongoSecondaryReadPreference) {
          mongoClientOptionsBuilder.readPreference(ReadPreference.secondaryPreferred());
        }

        if (builder.mongoUseSSL) {
          mongoClientOptionsBuilder.socketFactory(getSSLSocketFactory());
        }

        if (mongoOptionsSettings.containsKey(PARENT_TYPES_FIELD)) {
          Set<String> parentTypes = new HashSet<String>();
          Object parentTypesSettings = mongoOptionsSettings.get(PARENT_TYPES_FIELD);
          logger.trace("parentTypesSettings: " + parentTypesSettings);
          boolean array = XContentMapValues.isArray(parentTypesSettings);

          if (array) {
            ArrayList<String> fields = (ArrayList<String>) parentTypesSettings;
            for (String field : fields) {
              logger.trace("Field: " + field);
              parentTypes.add(field);
            }
          }

          builder.parentTypes(parentTypes);
        }

        if (mongoOptionsSettings.containsKey(STORE_STATISTICS_FIELD)) {
          Object storeStatistics = mongoOptionsSettings.get(STORE_STATISTICS_FIELD);
          boolean object = XContentMapValues.isObject(storeStatistics);
          if (object) {
            Map<String, Object> storeStatisticsSettings = (Map<String, Object>) storeStatistics;
            builder.storeStatistics(true);
            builder.statisticsIndexName(
                XContentMapValues.nodeStringValue(
                    storeStatisticsSettings.get(INDEX_OBJECT), riverName + "-stats"));
            builder.statisticsTypeName(
                XContentMapValues.nodeStringValue(
                    storeStatisticsSettings.get(TYPE_FIELD), "stats"));
          } else {
            builder.storeStatistics(XContentMapValues.nodeBooleanValue(storeStatistics, false));
            if (builder.storeStatistics) {
              builder.statisticsIndexName(riverName + "-stats");
              builder.statisticsTypeName("stats");
            }
          }
        }
        // builder.storeStatistics(XContentMapValues.nodeBooleanValue(mongoOptionsSettings.get(STORE_STATISTICS_FIELD),
        // false));
        builder.importAllCollections(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(IMPORT_ALL_COLLECTIONS_FIELD), false));
        builder.disableIndexRefresh(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(DISABLE_INDEX_REFRESH_FIELD), false));
        builder.includeCollection(
            XContentMapValues.nodeStringValue(
                mongoOptionsSettings.get(INCLUDE_COLLECTION_FIELD), ""));

        if (mongoOptionsSettings.containsKey(INCLUDE_FIELDS_FIELD)) {
          Set<String> includeFields = new HashSet<String>();
          Object includeFieldsSettings = mongoOptionsSettings.get(INCLUDE_FIELDS_FIELD);
          logger.trace("includeFieldsSettings: " + includeFieldsSettings);
          boolean array = XContentMapValues.isArray(includeFieldsSettings);

          if (array) {
            ArrayList<String> fields = (ArrayList<String>) includeFieldsSettings;
            for (String field : fields) {
              logger.trace("Field: " + field);
              includeFields.add(field);
            }
          }

          if (!includeFields.contains(MongoDBRiver.MONGODB_ID_FIELD)) {
            includeFields.add(MongoDBRiver.MONGODB_ID_FIELD);
          }
          builder.includeFields(includeFields);
        } else if (mongoOptionsSettings.containsKey(EXCLUDE_FIELDS_FIELD)) {
          Set<String> excludeFields = new HashSet<String>();
          Object excludeFieldsSettings = mongoOptionsSettings.get(EXCLUDE_FIELDS_FIELD);
          logger.trace("excludeFieldsSettings: " + excludeFieldsSettings);
          boolean array = XContentMapValues.isArray(excludeFieldsSettings);

          if (array) {
            ArrayList<String> fields = (ArrayList<String>) excludeFieldsSettings;
            for (String field : fields) {
              logger.trace("Field: " + field);
              excludeFields.add(field);
            }
          }

          builder.excludeFields(excludeFields);
        }

        if (mongoOptionsSettings.containsKey(INITIAL_TIMESTAMP_FIELD)) {
          BSONTimestamp timeStamp = null;
          try {
            Map<String, Object> initalTimestampSettings =
                (Map<String, Object>) mongoOptionsSettings.get(INITIAL_TIMESTAMP_FIELD);
            String scriptType = "js";
            if (initalTimestampSettings.containsKey(INITIAL_TIMESTAMP_SCRIPT_TYPE_FIELD)) {
              scriptType =
                  initalTimestampSettings.get(INITIAL_TIMESTAMP_SCRIPT_TYPE_FIELD).toString();
            }
            if (initalTimestampSettings.containsKey(INITIAL_TIMESTAMP_SCRIPT_FIELD)) {

              ExecutableScript scriptExecutable =
                  scriptService.executable(
                      scriptType,
                      initalTimestampSettings.get(INITIAL_TIMESTAMP_SCRIPT_FIELD).toString(),
                      ScriptService.ScriptType.INLINE,
                      Maps.newHashMap());
              Object ctx = scriptExecutable.run();
              logger.trace("initialTimestamp script returned: {}", ctx);
              if (ctx != null) {
                long timestamp = Long.parseLong(ctx.toString());
                timeStamp = new BSONTimestamp((int) (new Date(timestamp).getTime() / 1000), 1);
              }
            }
          } catch (Throwable t) {
            logger.error("Could not set initial timestamp", t);
          } finally {
            builder.initialTimestamp(timeStamp);
          }
        }
      }
      builder.mongoClientOptions(mongoClientOptionsBuilder.build());

      // Credentials
      if (mongoSettings.containsKey(CREDENTIALS_FIELD)) {
        String dbCredential;
        String mau = "";
        String map = "";
        String maad = "";
        String mlu = "";
        String mlp = "";
        String mlad = "";
        // String mdu = "";
        // String mdp = "";
        Object mongoCredentialsSettings = mongoSettings.get(CREDENTIALS_FIELD);
        boolean array = XContentMapValues.isArray(mongoCredentialsSettings);

        if (array) {
          ArrayList<Map<String, Object>> credentials =
              (ArrayList<Map<String, Object>>) mongoCredentialsSettings;
          for (Map<String, Object> credential : credentials) {
            dbCredential = XContentMapValues.nodeStringValue(credential.get(DB_FIELD), null);
            if (ADMIN_DB_FIELD.equals(dbCredential)) {
              mau = XContentMapValues.nodeStringValue(credential.get(USER_FIELD), null);
              map = XContentMapValues.nodeStringValue(credential.get(PASSWORD_FIELD), null);
              maad = XContentMapValues.nodeStringValue(credential.get(AUTH_FIELD), null);
            } else if (LOCAL_DB_FIELD.equals(dbCredential)) {
              mlu = XContentMapValues.nodeStringValue(credential.get(USER_FIELD), null);
              mlp = XContentMapValues.nodeStringValue(credential.get(PASSWORD_FIELD), null);
              mlad = XContentMapValues.nodeStringValue(credential.get(AUTH_FIELD), null);
              // } else {
              // mdu = XContentMapValues.nodeStringValue(
              // credential.get(USER_FIELD), null);
              // mdp = XContentMapValues.nodeStringValue(
              // credential.get(PASSWORD_FIELD), null);
            }
          }
        }
        builder.mongoAdminUser(mau);
        builder.mongoAdminPassword(map);
        builder.mongoAdminAuthDatabase(maad);
        builder.mongoLocalUser(mlu);
        builder.mongoLocalPassword(mlp);
        builder.mongoLocalAuthDatabase(mlad);
        // mongoDbUser = mdu;
        // mongoDbPassword = mdp;
      }

      builder.mongoDb(XContentMapValues.nodeStringValue(mongoSettings.get(DB_FIELD), riverName));
      builder.mongoCollection(
          XContentMapValues.nodeStringValue(mongoSettings.get(COLLECTION_FIELD), riverName));
      builder.mongoGridFS(
          XContentMapValues.nodeBooleanValue(mongoSettings.get(GRIDFS_FIELD), false));
      if (mongoSettings.containsKey(FILTER_FIELD)) {
        String filter = XContentMapValues.nodeStringValue(mongoSettings.get(FILTER_FIELD), "");
        filter = removePrefix("o.", filter);
        builder.mongoCollectionFilter(convertToBasicDBObject(filter));
        // DBObject bsonObject = (DBObject) JSON.parse(filter);
        // builder.mongoOplogFilter(convertToBasicDBObject(addPrefix("o.",
        // filter)));
        builder.mongoOplogFilter(convertToBasicDBObject(removePrefix("o.", filter)));
        // } else {
        // builder.mongoOplogFilter("");
      }

      if (mongoSettings.containsKey(SCRIPT_FIELD)) {
        String scriptType = "js";
        builder.script(mongoSettings.get(SCRIPT_FIELD).toString());
        if (mongoSettings.containsKey("scriptType")) {
          scriptType = mongoSettings.get("scriptType").toString();
        } else if (mongoSettings.containsKey(SCRIPT_TYPE_FIELD)) {
          scriptType = mongoSettings.get(SCRIPT_TYPE_FIELD).toString();
        }
        builder.scriptType(scriptType);
      }
    } else {
      mongoHost = DEFAULT_DB_HOST;
      mongoPort = DEFAULT_DB_PORT;
      try {
        mongoServers.add(new ServerAddress(mongoHost, mongoPort));
        builder.mongoServers(mongoServers);
      } catch (UnknownHostException e) {
        e.printStackTrace();
      }
      builder.mongoDb(riverName);
      builder.mongoCollection(riverName);
    }

    if (settings.settings().containsKey(INDEX_OBJECT)) {
      Map<String, Object> indexSettings =
          (Map<String, Object>) settings.settings().get(INDEX_OBJECT);
      builder.indexName(
          XContentMapValues.nodeStringValue(indexSettings.get(NAME_FIELD), builder.mongoDb));
      builder.typeName(
          XContentMapValues.nodeStringValue(indexSettings.get(TYPE_FIELD), builder.mongoDb));

      Bulk.Builder bulkBuilder = new Bulk.Builder();
      if (indexSettings.containsKey(BULK_FIELD)) {
        Map<String, Object> bulkSettings = (Map<String, Object>) indexSettings.get(BULK_FIELD);
        int bulkActions =
            XContentMapValues.nodeIntegerValue(
                bulkSettings.get(ACTIONS_FIELD), DEFAULT_BULK_ACTIONS);
        bulkBuilder.bulkActions(bulkActions);
        String size =
            XContentMapValues.nodeStringValue(
                bulkSettings.get(SIZE_FIELD), DEFAULT_BULK_SIZE.toString());
        bulkBuilder.bulkSize(ByteSizeValue.parseBytesSizeValue(size));
        bulkBuilder.concurrentRequests(
            XContentMapValues.nodeIntegerValue(
                bulkSettings.get(CONCURRENT_REQUESTS_FIELD),
                EsExecutors.boundedNumberOfProcessors(ImmutableSettings.EMPTY)));
        bulkBuilder.flushInterval(
            XContentMapValues.nodeTimeValue(
                bulkSettings.get(FLUSH_INTERVAL_FIELD), DEFAULT_FLUSH_INTERVAL));
        builder.throttleSize(
            XContentMapValues.nodeIntegerValue(
                indexSettings.get(THROTTLE_SIZE_FIELD), bulkActions * 5));
      } else {
        int bulkActions =
            XContentMapValues.nodeIntegerValue(
                indexSettings.get(BULK_SIZE_FIELD), DEFAULT_BULK_ACTIONS);
        bulkBuilder.bulkActions(bulkActions);
        bulkBuilder.bulkSize(DEFAULT_BULK_SIZE);
        bulkBuilder.flushInterval(
            XContentMapValues.nodeTimeValue(
                indexSettings.get(BULK_TIMEOUT_FIELD), DEFAULT_FLUSH_INTERVAL));
        bulkBuilder.concurrentRequests(
            XContentMapValues.nodeIntegerValue(
                indexSettings.get(CONCURRENT_BULK_REQUESTS_FIELD),
                EsExecutors.boundedNumberOfProcessors(ImmutableSettings.EMPTY)));
        builder.throttleSize(
            XContentMapValues.nodeIntegerValue(
                indexSettings.get(THROTTLE_SIZE_FIELD), bulkActions * 5));
      }
      builder.bulk(bulkBuilder.build());
    } else {
      builder.indexName(builder.mongoDb);
      builder.typeName(builder.mongoDb);
      builder.bulk(new Bulk.Builder().build());
    }
    return builder.build();
  }