Beispiel #1
0
  public void addDeletion(SearchRequestBuilder searchRequest) {
    searchRequest
        .addSort("_doc", SortOrder.ASC)
        .setScroll(TimeValue.timeValueMinutes(5))
        .setSize(100)
        // load only doc ids, not _source fields
        .setFetchSource(false);

    // this search is synchronous. An optimization would be to be non-blocking,
    // but it requires to tracking pending requests in close().
    // Same semaphore can't be reused because of potential deadlock (requires to acquire
    // two locks)
    SearchResponse searchResponse = searchRequest.get();

    while (true) {
      SearchHit[] hits = searchResponse.getHits().getHits();
      for (SearchHit hit : hits) {
        DeleteRequestBuilder deleteRequestBuilder =
            client.prepareDelete(hit.index(), hit.type(), hit.getId());
        SearchHitField routing = hit.field("_routing");
        if (routing != null) {
          deleteRequestBuilder.setRouting(routing.getValue());
        }
        add(deleteRequestBuilder.request());
      }

      String scrollId = searchResponse.getScrollId();
      searchResponse =
          client.prepareSearchScroll(scrollId).setScroll(TimeValue.timeValueMinutes(5)).get();
      if (hits.length == 0) {
        client.nativeClient().prepareClearScroll().addScrollId(scrollId).get();
        break;
      }
    }
  }
  public static void main(String[] args) throws Exception {
    NodesStressTest test =
        new NodesStressTest()
            .numberOfNodes(2)
            .indexThreads(5)
            .indexIterations(10 * 1000)
            .searcherThreads(5)
            .searchIterations(10 * 1000)
            .sleepBeforeClose(TimeValue.timeValueMinutes(10))
            .sleepAfterDone(TimeValue.timeValueMinutes(10))
            .build(EMPTY_SETTINGS);

    test.start();
  }
  @SuppressWarnings({"unchecked"})
  @Inject
  public SimpleRiver(
      RiverName riverName, RiverSettings settings, Client client, ThreadPool threadPool) {
    super(riverName, settings);
    this.client = client;

    if (settings.settings().containsKey("simple")) {
      Map<String, Object> simpleSettings = (Map<String, Object>) settings.settings().get("simple");
      simpleNumber = XContentMapValues.nodeIntegerValue(simpleSettings.get("number"), 100);
      fieldName = XContentMapValues.nodeStringValue(simpleSettings.get("field"), "test");
      poll =
          XContentMapValues.nodeTimeValue(
              simpleSettings.get("poll"), TimeValue.timeValueMinutes(60));
    }

    logger.info(
        "creating simple stream river for [{} numbers] with field [{}]", simpleNumber, fieldName);

    if (settings.settings().containsKey("index")) {
      Map<String, Object> indexSettings = (Map<String, Object>) settings.settings().get("index");
      indexName = XContentMapValues.nodeStringValue(indexSettings.get("index"), riverName.name());
      typeName = XContentMapValues.nodeStringValue(indexSettings.get("type"), "simple_type");
      bulkSize = XContentMapValues.nodeIntegerValue(indexSettings.get("bulk_size"), 100);
      bulkThreshold = XContentMapValues.nodeIntegerValue(indexSettings.get("bulk_threshold"), 10);
    } else {
      indexName = riverName.name();
      typeName = "simple_type";
      bulkSize = 100;
      bulkThreshold = 10;
    }
  }
 public void testCloseAndReopenOrDeleteWithActiveScroll() throws IOException {
   createIndex("test");
   for (int i = 0; i < 100; i++) {
     client()
         .prepareIndex("test", "type1", Integer.toString(i))
         .setSource(jsonBuilder().startObject().field("field", i).endObject())
         .execute()
         .actionGet();
   }
   refresh();
   SearchResponse searchResponse =
       client()
           .prepareSearch()
           .setQuery(matchAllQuery())
           .setSize(35)
           .setScroll(TimeValue.timeValueMinutes(2))
           .addSort("field", SortOrder.ASC)
           .execute()
           .actionGet();
   long counter = 0;
   assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
   assertThat(searchResponse.getHits().hits().length, equalTo(35));
   for (SearchHit hit : searchResponse.getHits()) {
     assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
   }
   if (randomBoolean()) {
     client().admin().indices().prepareClose("test").get();
     client().admin().indices().prepareOpen("test").get();
     ensureGreen("test");
   } else {
     client().admin().indices().prepareDelete("test").get();
   }
 }
  public ScrollChunk nextChunk() {

    final SearchResponse search;
    // make sure to return the initial hits, see
    // https://github.com/Graylog2/graylog2-server/issues/2126
    if (firstResponse == null) {
      search =
          client
              .prepareSearchScroll(scrollId)
              .setScroll(TimeValue.timeValueMinutes(1))
              .execute()
              .actionGet();
    } else {
      search = firstResponse;
      firstResponse = null;
    }

    final SearchHits hits = search.getHits();
    if (hits.getHits().length == 0) {
      // scroll exhausted
      LOG.debug("[{}] Reached end of scroll results.", queryHash, getOriginalQuery());
      return null;
    }
    LOG.debug(
        "[{}][{}] New scroll id {}, number of hits in chunk: {}",
        queryHash,
        chunkId,
        search.getScrollId(),
        hits.getHits().length);
    scrollId = search.getScrollId(); // save the id for the next request.

    return new ScrollChunk(hits, fields, chunkId++);
  }
  public static void main(String[] args) throws Exception {
    System.setProperty("es.logger.prefix", "");

    Settings settings =
        settingsBuilder()
            .put("index.shard.check_index", true)
            .put("gateway.type", "none")
            .put("path.data", "data/data1,data/data2")
            .build();

    RollingRestartStressTest test =
        new RollingRestartStressTest()
            .settings(settings)
            .numberOfNodes(4)
            .numberOfShards(5)
            .numberOfReplicas(1)
            .initialNumberOfDocs(1000)
            .textTokens(150)
            .numberOfFields(10)
            .cleanNodeData(false)
            .indexers(5)
            .indexerThrottle(TimeValue.timeValueMillis(50))
            .period(TimeValue.timeValueMinutes(3));

    test.run();
  }
 @Test
 public void get_with_time_value_timeout_is_not_yet_implemented() {
   try {
     esTester.client().prepareSearchScroll("scrollId").get(TimeValue.timeValueMinutes(1));
     fail();
   } catch (Exception e) {
     assertThat(e).isInstanceOf(IllegalStateException.class).hasMessage("Not yet implemented");
   }
 }
Beispiel #8
0
 @Override
 public boolean hasNext() {
   if (hits.isEmpty()) {
     SearchScrollRequestBuilder esRequest =
         esClient
             .prepareSearchScroll(scrollId)
             .setScroll(TimeValue.timeValueMinutes(SCROLL_TIME_IN_MINUTES));
     Collections.addAll(hits, esRequest.get().getHits().getHits());
   }
   return !hits.isEmpty();
 }
  @Test
  @TestLogging(value = "cluster.service:TRACE")
  public void testDeleteCreateInOneBulk() throws Exception {
    internalCluster()
        .startNodesAsync(
            2, Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_KEY, "zen").build())
        .get();
    assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
    prepareCreate("test")
        .setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true)
        .addMapping("type")
        .get();
    ensureGreen("test");

    // now that the cluster is stable, remove publishing timeout
    assertAcked(
        client()
            .admin()
            .cluster()
            .prepareUpdateSettings()
            .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0")));

    Set<String> nodes = new HashSet<>(Arrays.asList(internalCluster().getNodeNames()));
    nodes.remove(internalCluster().getMasterName());

    // block none master node.
    BlockClusterStateProcessing disruption =
        new BlockClusterStateProcessing(nodes.iterator().next(), getRandom());
    internalCluster().setDisruptionScheme(disruption);
    logger.info("--> indexing a doc");
    index("test", "type", "1");
    refresh();
    disruption.startDisrupting();
    logger.info("--> delete index and recreate it");
    assertFalse(
        client()
            .admin()
            .indices()
            .prepareDelete("test")
            .setTimeout("200ms")
            .get()
            .isAcknowledged());
    assertFalse(
        prepareCreate("test")
            .setTimeout("200ms")
            .setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true)
            .get()
            .isAcknowledged());
    logger.info("--> letting cluster proceed");
    disruption.stopDisrupting();
    ensureGreen(TimeValue.timeValueMinutes(30), "test");
    assertHitCount(client().prepareSearch("test").get(), 0);
  }
  public void scanThis(
      AnyExecutor<T> any, QueryBuilder query, long keepTimeInMinutes, int pageSize) {
    SearchRequestBuilder srb =
        client
            .prepareSearch(getIndexName())
            .setQuery(query)
            .setSize(pageSize)
            .setSearchType(SearchType.SCAN)
            .setScroll(TimeValue.timeValueMinutes(keepTimeInMinutes));
    SearchResponse rsp = srb.execute().actionGet();

    try {
      int counter = 0;
      while (true) {
        rsp =
            client
                .prepareSearchScroll(rsp.scrollId())
                .setScroll(TimeValue.timeValueMinutes(keepTimeInMinutes))
                .execute()
                .actionGet();
        long currentResults = rsp.hits().hits().length;
        logger.info(
            "("
                + counter++
                + ") scanquery with "
                + pageSize
                + " page size and "
                + currentResults
                + " hits");
        if (currentResults == 0) break;

        for (T t : collectObjects(rsp)) {
          any.execute(t);
        }
      }
    } catch (Exception ex) {
      logger.error("Cannot run scanThis", ex);
    }
  }
  @Test
  public void no_trace_logs() {
    logTester.setLevel(LoggerLevel.DEBUG);

    SearchResponse response =
        esTester
            .client()
            .prepareSearch(FakeIndexDefinition.INDEX)
            .setSearchType(SearchType.SCAN)
            .setScroll(TimeValue.timeValueMinutes(1))
            .get();
    logTester.clear();
    esTester.client().prepareSearchScroll(response.getScrollId()).get();
    assertThat(logTester.logs()).isEmpty();
  }
 public static long parseHumanDTToMills(String humanDateTime) {
   long datetime = 0;
   if (humanDateTime.indexOf("ms") != -1) {
     datetime =
         TimeValue.timeValueMillis(NumberUtils.toLong(humanDateTime.replaceAll("ms", "")))
             .getMillis();
   } else if (humanDateTime.indexOf("h") != -1) {
     datetime =
         TimeValue.timeValueHours(NumberUtils.toLong(humanDateTime.replaceAll("h", "")))
             .getMillis();
   } else if (humanDateTime.indexOf("m") != -1) {
     datetime =
         TimeValue.timeValueMinutes(NumberUtils.toLong(humanDateTime.replaceAll("m", "")))
             .getMillis();
   } else if (humanDateTime.indexOf("s") != -1) {
     datetime =
         TimeValue.timeValueSeconds(NumberUtils.toLong(humanDateTime.replaceAll("s", "")))
             .getMillis();
   }
   return datetime;
 }
 private void cleanFailedShards(final ClusterChangedEvent event) {
   RoutingTable routingTable = event.state().routingTable();
   RoutingNodes.RoutingNodeIterator routingNode =
       event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
   if (routingNode == null) {
     failedShards.clear();
     return;
   }
   DiscoveryNodes nodes = event.state().nodes();
   long now = System.currentTimeMillis();
   String localNodeId = nodes.localNodeId();
   Iterator<Map.Entry<ShardId, FailedShard>> iterator = failedShards.entrySet().iterator();
   shards:
   while (iterator.hasNext()) {
     Map.Entry<ShardId, FailedShard> entry = iterator.next();
     FailedShard failedShard = entry.getValue();
     IndexRoutingTable indexRoutingTable = routingTable.index(entry.getKey().getIndex());
     if (indexRoutingTable != null) {
       IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(entry.getKey().id());
       if (shardRoutingTable != null) {
         for (ShardRouting shardRouting : shardRoutingTable.assignedShards()) {
           if (localNodeId.equals(shardRouting.currentNodeId())) {
             // we have a timeout here just to make sure we don't have dangled failed shards for
             // some reason
             // its just another safely layer
             if (shardRouting.version() == failedShard.version
                 && ((now - failedShard.timestamp) < TimeValue.timeValueMinutes(60).millis())) {
               // It's the same failed shard - keep it if it hasn't timed out
               continue shards;
             } else {
               // Different version or expired, remove it
               break;
             }
           }
         }
       }
     }
     iterator.remove();
   }
 }
  void assertNewReplicasWork(String indexName) throws Exception {
    final int numReplicas = 1;
    final long startTime = System.currentTimeMillis();
    logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, indexName);
    assertAcked(
        client()
            .admin()
            .indices()
            .prepareUpdateSettings(indexName)
            .setSettings(Settings.builder().put("number_of_replicas", numReplicas))
            .execute()
            .actionGet());
    ensureGreen(TimeValue.timeValueMinutes(2), indexName);
    logger.debug(
        "--> index [{}] is green, took [{}]",
        indexName,
        TimeValue.timeValueMillis(System.currentTimeMillis() - startTime));
    logger.debug(
        "--> recovery status:\n{}",
        XContentHelper.toString(client().admin().indices().prepareRecoveries(indexName).get()));

    // TODO: do something with the replicas! query? index?
  }
  @Override
  public void start() {
    logger.info("starting simple stream");
    bulkProcessor =
        BulkProcessor.builder(
                client,
                new BulkProcessor.Listener() {

                  @Override
                  public void beforeBulk(long executionId, BulkRequest request) {
                    logger.info(
                        "Going to execute new bulk composed of {} actions",
                        request.numberOfActions());
                  }

                  @Override
                  public void afterBulk(
                      long executionId, BulkRequest request, BulkResponse response) {
                    logger.info("Executed bulk composed of {} actions", request.numberOfActions());
                  }

                  @Override
                  public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
                    logger.warn("Error executing bulk", failure);
                  }
                })
            .setBulkActions(bulkSize)
            .setFlushInterval(TimeValue.timeValueMinutes(5))
            .setConcurrentRequests(bulkThreshold)
            .build();

    thread =
        EsExecutors.daemonThreadFactory(settings.globalSettings(), "Simple processor")
            .newThread(new SimpleConnector());
    thread.start();
  }
  private void waitForClusterState(long clusterStateVersion) {
    ClusterStateObserver observer =
        new ClusterStateObserver(
            clusterService, TimeValue.timeValueMinutes(5), logger, threadPool.getThreadContext());
    final ClusterState clusterState = observer.observedState();
    if (clusterState.getVersion() >= clusterStateVersion) {
      logger.trace(
          "node has cluster state with version higher than {} (current: {})",
          clusterStateVersion,
          clusterState.getVersion());
      return;
    } else {
      logger.trace(
          "waiting for cluster state version {} (current: {})",
          clusterStateVersion,
          clusterState.getVersion());
      final PlainActionFuture<Void> future = new PlainActionFuture<>();
      observer.waitForNextChange(
          new ClusterStateObserver.Listener() {

            @Override
            public void onNewClusterState(ClusterState state) {
              future.onResponse(null);
            }

            @Override
            public void onClusterServiceClose() {
              future.onFailure(new NodeClosedException(clusterService.localNode()));
            }

            @Override
            public void onTimeout(TimeValue timeout) {
              future.onFailure(
                  new IllegalStateException(
                      "cluster state never updated to version " + clusterStateVersion));
            }
          },
          new ClusterStateObserver.ValidationPredicate() {

            @Override
            protected boolean validate(ClusterState newState) {
              return newState.getVersion() >= clusterStateVersion;
            }
          });
      try {
        future.get();
        logger.trace(
            "successfully waited for cluster state with version {} (current: {})",
            clusterStateVersion,
            observer.observedState().getVersion());
      } catch (Exception e) {
        logger.debug(
            (Supplier<?>)
                () ->
                    new ParameterizedMessage(
                        "failed waiting for cluster state with version {} (current: {})",
                        clusterStateVersion,
                        observer.observedState()),
            e);
        throw ExceptionsHelper.convertToRuntime(e);
      }
    }
  }
  @SuppressWarnings({"unchecked"})
  @Inject
  public RabbitmqRiver(
      RiverName riverName, RiverSettings settings, Client client, ScriptService scriptService) {
    super(riverName, settings);
    this.client = client;

    if (settings.settings().containsKey("rabbitmq")) {
      Map<String, Object> rabbitSettings =
          (Map<String, Object>) settings.settings().get("rabbitmq");

      if (rabbitSettings.containsKey("addresses")) {
        List<Address> addresses = new ArrayList<Address>();
        for (Map<String, Object> address :
            (List<Map<String, Object>>) rabbitSettings.get("addresses")) {
          addresses.add(
              new Address(
                  XContentMapValues.nodeStringValue(address.get("host"), "localhost"),
                  XContentMapValues.nodeIntegerValue(address.get("port"), AMQP.PROTOCOL.PORT)));
        }
        rabbitAddresses = addresses.toArray(new Address[addresses.size()]);
      } else {
        String rabbitHost =
            XContentMapValues.nodeStringValue(rabbitSettings.get("host"), "localhost");
        int rabbitPort =
            XContentMapValues.nodeIntegerValue(rabbitSettings.get("port"), AMQP.PROTOCOL.PORT);
        rabbitAddresses = new Address[] {new Address(rabbitHost, rabbitPort)};
      }

      rabbitUser = XContentMapValues.nodeStringValue(rabbitSettings.get("user"), "guest");
      rabbitPassword = XContentMapValues.nodeStringValue(rabbitSettings.get("pass"), "guest");
      rabbitVhost = XContentMapValues.nodeStringValue(rabbitSettings.get("vhost"), "/");

      rabbitQueue = XContentMapValues.nodeStringValue(rabbitSettings.get("queue"), "elasticsearch");
      rabbitExchange =
          XContentMapValues.nodeStringValue(rabbitSettings.get("exchange"), "elasticsearch");
      rabbitRoutingKey =
          XContentMapValues.nodeStringValue(rabbitSettings.get("routing_key"), "elasticsearch");

      rabbitExchangeDeclare =
          XContentMapValues.nodeBooleanValue(rabbitSettings.get("exchange_declare"), true);
      if (rabbitExchangeDeclare) {

        rabbitExchangeType =
            XContentMapValues.nodeStringValue(rabbitSettings.get("exchange_type"), "direct");
        rabbitExchangeDurable =
            XContentMapValues.nodeBooleanValue(rabbitSettings.get("exchange_durable"), true);
      } else {
        rabbitExchangeType = "direct";
        rabbitExchangeDurable = true;
      }

      rabbitQueueDeclare =
          XContentMapValues.nodeBooleanValue(rabbitSettings.get("queue_declare"), true);
      if (rabbitQueueDeclare) {
        rabbitQueueDurable =
            XContentMapValues.nodeBooleanValue(rabbitSettings.get("queue_durable"), true);
        rabbitQueueAutoDelete =
            XContentMapValues.nodeBooleanValue(rabbitSettings.get("queue_auto_delete"), false);
        if (rabbitSettings.containsKey("args")) {
          rabbitQueueArgs = (Map<String, Object>) rabbitSettings.get("args");
        }
      } else {
        rabbitQueueDurable = true;
        rabbitQueueAutoDelete = false;
      }
      rabbitQueueBind = XContentMapValues.nodeBooleanValue(rabbitSettings.get("queue_bind"), true);

      rabbitQosPrefetchSize =
          XContentMapValues.nodeIntegerValue(rabbitSettings.get("qos_prefetch_size"), 0);
      rabbitQosPrefetchCount =
          XContentMapValues.nodeIntegerValue(rabbitSettings.get("qos_prefetch_count"), 10);

      rabbitHeartbeat =
          TimeValue.parseTimeValue(
              XContentMapValues.nodeStringValue(rabbitSettings.get("heartbeat"), "30m"),
              TimeValue.timeValueMinutes(30));

    } else {
      rabbitAddresses = new Address[] {new Address("localhost", AMQP.PROTOCOL.PORT)};
      rabbitUser = "******";
      rabbitPassword = "******";
      rabbitVhost = "/";

      rabbitQueue = "elasticsearch";
      rabbitQueueAutoDelete = false;
      rabbitQueueDurable = true;
      rabbitExchange = "elasticsearch";
      rabbitExchangeType = "direct";
      rabbitExchangeDurable = true;
      rabbitRoutingKey = "elasticsearch";

      rabbitExchangeDeclare = true;
      rabbitQueueDeclare = true;
      rabbitQueueBind = true;

      rabbitQosPrefetchSize = 0;
      rabbitQosPrefetchCount = 10;

      rabbitHeartbeat = TimeValue.timeValueMinutes(30);
    }

    if (settings.settings().containsKey("index")) {
      Map<String, Object> indexSettings = (Map<String, Object>) settings.settings().get("index");
      bulkSize = XContentMapValues.nodeIntegerValue(indexSettings.get("bulk_size"), 100);
      if (indexSettings.containsKey("bulk_timeout")) {
        bulkTimeout =
            TimeValue.parseTimeValue(
                XContentMapValues.nodeStringValue(indexSettings.get("bulk_timeout"), "10ms"),
                TimeValue.timeValueMillis(10));
      } else {
        bulkTimeout = TimeValue.timeValueMillis(10);
      }
      ordered = XContentMapValues.nodeBooleanValue(indexSettings.get("ordered"), false);
    } else {
      bulkSize = 100;
      bulkTimeout = TimeValue.timeValueMillis(10);
      ordered = false;
    }

    if (settings.settings().containsKey("bulk_script_filter")) {
      Map<String, Object> scriptSettings =
          (Map<String, Object>) settings.settings().get("bulk_script_filter");
      if (scriptSettings.containsKey("script")) {
        String scriptLang = "native";
        if (scriptSettings.containsKey("script_lang")) {
          scriptLang = scriptSettings.get("script_lang").toString();
        }
        Map<String, Object> scriptParams = null;
        if (scriptSettings.containsKey("script_params")) {
          scriptParams = (Map<String, Object>) scriptSettings.get("script_params");
        } else {
          scriptParams = Maps.newHashMap();
        }
        bulkScript =
            scriptService.executable(
                scriptLang, scriptSettings.get("script").toString(), scriptParams);
      } else {
        bulkScript = null;
      }
    } else {
      bulkScript = null;
    }

    if (settings.settings().containsKey("script_filter")) {
      Map<String, Object> scriptSettings =
          (Map<String, Object>) settings.settings().get("script_filter");
      if (scriptSettings.containsKey("script")) {
        String scriptLang = "mvel";
        if (scriptSettings.containsKey("script_lang")) {
          scriptLang = scriptSettings.get("script_lang").toString();
        }
        Map<String, Object> scriptParams = null;
        if (scriptSettings.containsKey("script_params")) {
          scriptParams = (Map<String, Object>) scriptSettings.get("script_params");
        } else {
          scriptParams = Maps.newHashMap();
        }
        script =
            scriptService.executable(
                scriptLang, scriptSettings.get("script").toString(), scriptParams);
      } else {
        script = null;
      }
    } else {
      script = null;
    }
  }
public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener {

  public static final Setting<Integer> EXPECTED_NODES_SETTING =
      Setting.intSetting("gateway.expected_nodes", -1, -1, Property.NodeScope);
  public static final Setting<Integer> EXPECTED_DATA_NODES_SETTING =
      Setting.intSetting("gateway.expected_data_nodes", -1, -1, Property.NodeScope);
  public static final Setting<Integer> EXPECTED_MASTER_NODES_SETTING =
      Setting.intSetting("gateway.expected_master_nodes", -1, -1, Property.NodeScope);
  public static final Setting<TimeValue> RECOVER_AFTER_TIME_SETTING =
      Setting.positiveTimeSetting(
          "gateway.recover_after_time", TimeValue.timeValueMillis(0), Property.NodeScope);
  public static final Setting<Integer> RECOVER_AFTER_NODES_SETTING =
      Setting.intSetting("gateway.recover_after_nodes", -1, -1, Property.NodeScope);
  public static final Setting<Integer> RECOVER_AFTER_DATA_NODES_SETTING =
      Setting.intSetting("gateway.recover_after_data_nodes", -1, -1, Property.NodeScope);
  public static final Setting<Integer> RECOVER_AFTER_MASTER_NODES_SETTING =
      Setting.intSetting("gateway.recover_after_master_nodes", 0, 0, Property.NodeScope);

  public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK =
      new ClusterBlock(
          1,
          "state not recovered / initialized",
          true,
          true,
          RestStatus.SERVICE_UNAVAILABLE,
          ClusterBlockLevel.ALL);

  public static final TimeValue DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET =
      TimeValue.timeValueMinutes(5);

  private final Gateway gateway;

  private final ThreadPool threadPool;

  private final AllocationService allocationService;

  private final ClusterService clusterService;

  private final TimeValue recoverAfterTime;
  private final int recoverAfterNodes;
  private final int expectedNodes;
  private final int recoverAfterDataNodes;
  private final int expectedDataNodes;
  private final int recoverAfterMasterNodes;
  private final int expectedMasterNodes;

  private final AtomicBoolean recovered = new AtomicBoolean();
  private final AtomicBoolean scheduledRecovery = new AtomicBoolean();

  @Inject
  public GatewayService(
      Settings settings,
      AllocationService allocationService,
      ClusterService clusterService,
      ThreadPool threadPool,
      GatewayMetaState metaState,
      TransportNodesListGatewayMetaState listGatewayMetaState,
      Discovery discovery,
      IndicesService indicesService) {
    super(settings);
    this.gateway =
        new Gateway(
            settings, clusterService, metaState, listGatewayMetaState, discovery, indicesService);
    this.allocationService = allocationService;
    this.clusterService = clusterService;
    this.threadPool = threadPool;
    // allow to control a delay of when indices will get created
    this.expectedNodes = EXPECTED_NODES_SETTING.get(this.settings);
    this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(this.settings);
    this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(this.settings);

    if (RECOVER_AFTER_TIME_SETTING.exists(this.settings)) {
      recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(this.settings);
    } else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) {
      recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET;
    } else {
      recoverAfterTime = null;
    }
    this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(this.settings);
    this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(this.settings);
    // default the recover after master nodes to the minimum master nodes in the discovery
    if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(this.settings)) {
      recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(this.settings);
    } else {
      // TODO: change me once the minimum_master_nodes is changed too
      recoverAfterMasterNodes = settings.getAsInt("discovery.zen.minimum_master_nodes", -1);
    }

    // Add the not recovered as initial state block, we don't allow anything until
    this.clusterService.addInitialStateBlock(STATE_NOT_RECOVERED_BLOCK);
  }

  @Override
  protected void doStart() {
    // use post applied so that the state will be visible to the background recovery thread we spawn
    // in performStateRecovery
    clusterService.addListener(this);
  }

  @Override
  protected void doStop() {
    clusterService.removeListener(this);
  }

  @Override
  protected void doClose() {}

  @Override
  public void clusterChanged(final ClusterChangedEvent event) {
    if (lifecycle.stoppedOrClosed()) {
      return;
    }

    final ClusterState state = event.state();

    if (state.nodes().isLocalNodeElectedMaster() == false) {
      // not our job to recover
      return;
    }
    if (state.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) {
      // already recovered
      return;
    }

    DiscoveryNodes nodes = state.nodes();
    if (state.nodes().getMasterNodeId() == null) {
      logger.debug("not recovering from gateway, no master elected yet");
    } else if (recoverAfterNodes != -1
        && (nodes.getMasterAndDataNodes().size()) < recoverAfterNodes) {
      logger.debug(
          "not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]",
          nodes.getMasterAndDataNodes().size(),
          recoverAfterNodes);
    } else if (recoverAfterDataNodes != -1 && nodes.getDataNodes().size() < recoverAfterDataNodes) {
      logger.debug(
          "not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]",
          nodes.getDataNodes().size(),
          recoverAfterDataNodes);
    } else if (recoverAfterMasterNodes != -1
        && nodes.getMasterNodes().size() < recoverAfterMasterNodes) {
      logger.debug(
          "not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]",
          nodes.getMasterNodes().size(),
          recoverAfterMasterNodes);
    } else {
      boolean enforceRecoverAfterTime;
      String reason;
      if (expectedNodes == -1 && expectedMasterNodes == -1 && expectedDataNodes == -1) {
        // no expected is set, honor the setting if they are there
        enforceRecoverAfterTime = true;
        reason = "recover_after_time was set to [" + recoverAfterTime + "]";
      } else {
        // one of the expected is set, see if all of them meet the need, and ignore the timeout in
        // this case
        enforceRecoverAfterTime = false;
        reason = "";
        if (expectedNodes != -1
            && (nodes.getMasterAndDataNodes().size()
                < expectedNodes)) { // does not meet the expected...
          enforceRecoverAfterTime = true;
          reason =
              "expecting ["
                  + expectedNodes
                  + "] nodes, but only have ["
                  + nodes.getMasterAndDataNodes().size()
                  + "]";
        } else if (expectedDataNodes != -1
            && (nodes.getDataNodes().size() < expectedDataNodes)) { // does not meet the expected...
          enforceRecoverAfterTime = true;
          reason =
              "expecting ["
                  + expectedDataNodes
                  + "] data nodes, but only have ["
                  + nodes.getDataNodes().size()
                  + "]";
        } else if (expectedMasterNodes != -1
            && (nodes.getMasterNodes().size()
                < expectedMasterNodes)) { // does not meet the expected...
          enforceRecoverAfterTime = true;
          reason =
              "expecting ["
                  + expectedMasterNodes
                  + "] master nodes, but only have ["
                  + nodes.getMasterNodes().size()
                  + "]";
        }
      }
      performStateRecovery(enforceRecoverAfterTime, reason);
    }
  }

  private void performStateRecovery(boolean enforceRecoverAfterTime, String reason) {
    final Gateway.GatewayStateRecoveredListener recoveryListener = new GatewayRecoveryListener();

    if (enforceRecoverAfterTime && recoverAfterTime != null) {
      if (scheduledRecovery.compareAndSet(false, true)) {
        logger.info("delaying initial state recovery for [{}]. {}", recoverAfterTime, reason);
        threadPool.schedule(
            recoverAfterTime,
            ThreadPool.Names.GENERIC,
            () -> {
              if (recovered.compareAndSet(false, true)) {
                logger.info(
                    "recover_after_time [{}] elapsed. performing state recovery...",
                    recoverAfterTime);
                gateway.performStateRecovery(recoveryListener);
              }
            });
      }
    } else {
      if (recovered.compareAndSet(false, true)) {
        threadPool
            .generic()
            .execute(
                new AbstractRunnable() {
                  @Override
                  public void onFailure(Exception e) {
                    logger.warn("Recovery failed", e);
                    // we reset `recovered` in the listener don't reset it here otherwise there
                    // might be a race
                    // that resets it to false while a new recover is already running?
                    recoveryListener.onFailure("state recovery failed: " + e.getMessage());
                  }

                  @Override
                  protected void doRun() throws Exception {
                    gateway.performStateRecovery(recoveryListener);
                  }
                });
      }
    }
  }

  public Gateway getGateway() {
    return gateway;
  }

  class GatewayRecoveryListener implements Gateway.GatewayStateRecoveredListener {

    @Override
    public void onSuccess(final ClusterState recoveredState) {
      logger.trace("successful state recovery, importing cluster state...");
      clusterService.submitStateUpdateTask(
          "local-gateway-elected-state",
          new ClusterStateUpdateTask() {
            @Override
            public ClusterState execute(ClusterState currentState) {
              assert currentState.metaData().indices().isEmpty();

              // remove the block, since we recovered from gateway
              ClusterBlocks.Builder blocks =
                  ClusterBlocks.builder()
                      .blocks(currentState.blocks())
                      .blocks(recoveredState.blocks())
                      .removeGlobalBlock(STATE_NOT_RECOVERED_BLOCK);

              MetaData.Builder metaDataBuilder = MetaData.builder(recoveredState.metaData());
              // automatically generate a UID for the metadata if we need to
              metaDataBuilder.generateClusterUuidIfNeeded();

              if (MetaData.SETTING_READ_ONLY_SETTING.get(recoveredState.metaData().settings())
                  || MetaData.SETTING_READ_ONLY_SETTING.get(currentState.metaData().settings())) {
                blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
              }

              for (IndexMetaData indexMetaData : recoveredState.metaData()) {
                metaDataBuilder.put(indexMetaData, false);
                blocks.addBlocks(indexMetaData);
              }

              // update the state to reflect the new metadata and routing
              ClusterState updatedState =
                  ClusterState.builder(currentState)
                      .blocks(blocks)
                      .metaData(metaDataBuilder)
                      .build();

              // initialize all index routing tables as empty
              RoutingTable.Builder routingTableBuilder =
                  RoutingTable.builder(updatedState.routingTable());
              for (ObjectCursor<IndexMetaData> cursor :
                  updatedState.metaData().indices().values()) {
                routingTableBuilder.addAsRecovery(cursor.value);
              }
              // start with 0 based versions for routing table
              routingTableBuilder.version(0);

              // now, reroute
              updatedState =
                  ClusterState.builder(updatedState)
                      .routingTable(routingTableBuilder.build())
                      .build();
              return allocationService.reroute(updatedState, "state recovered");
            }

            @Override
            public void onFailure(String source, Exception e) {
              logger.error(
                  (Supplier<?>)
                      () -> new ParameterizedMessage("unexpected failure during [{}]", source),
                  e);
              GatewayRecoveryListener.this.onFailure("failed to updated cluster state");
            }

            @Override
            public void clusterStateProcessed(
                String source, ClusterState oldState, ClusterState newState) {
              logger.info(
                  "recovered [{}] indices into cluster_state",
                  newState.metaData().indices().size());
            }
          });
    }

    @Override
    public void onFailure(String message) {
      recovered.set(false);
      scheduledRecovery.set(false);
      // don't remove the block here, we don't want to allow anything in such a case
      logger.info("metadata state not restored, reason: {}", message);
    }
  }

  // used for testing
  public TimeValue recoverAfterTime() {
    return recoverAfterTime;
  }
}
  @Test
  public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws Exception {
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(Settings.settingsBuilder().put("index.number_of_shards", 3))
        .execute()
        .actionGet();
    client()
        .admin()
        .cluster()
        .prepareHealth()
        .setWaitForEvents(Priority.LANGUID)
        .setWaitForGreenStatus()
        .execute()
        .actionGet();

    client()
        .admin()
        .cluster()
        .prepareHealth()
        .setWaitForEvents(Priority.LANGUID)
        .setWaitForGreenStatus()
        .execute()
        .actionGet();

    for (int i = 0; i < 100; i++) {
      String routing = "0";
      if (i > 90) {
        routing = "1";
      } else if (i > 60) {
        routing = "2";
      }
      client()
          .prepareIndex("test", "type1", Integer.toString(i))
          .setSource("field", i)
          .setRouting(routing)
          .execute()
          .actionGet();
    }

    client().admin().indices().prepareRefresh().execute().actionGet();

    SearchResponse searchResponse =
        client()
            .prepareSearch()
            .setSearchType(SearchType.QUERY_THEN_FETCH)
            .setQuery(matchAllQuery())
            .setSize(3)
            .setScroll(TimeValue.timeValueMinutes(2))
            .addSort("field", SortOrder.ASC)
            .execute()
            .actionGet();
    try {
      long counter = 0;

      assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
      assertThat(searchResponse.getHits().hits().length, equalTo(3));
      for (SearchHit hit : searchResponse.getHits()) {
        assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
      }

      for (int i = 0; i < 32; i++) {
        searchResponse =
            client()
                .prepareSearchScroll(searchResponse.getScrollId())
                .setScroll(TimeValue.timeValueMinutes(2))
                .execute()
                .actionGet();

        assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
        assertThat(searchResponse.getHits().hits().length, equalTo(3));
        for (SearchHit hit : searchResponse.getHits()) {
          assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
        }
      }

      // and now, the last one is one
      searchResponse =
          client()
              .prepareSearchScroll(searchResponse.getScrollId())
              .setScroll(TimeValue.timeValueMinutes(2))
              .execute()
              .actionGet();

      assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
      assertThat(searchResponse.getHits().hits().length, equalTo(1));
      for (SearchHit hit : searchResponse.getHits()) {
        assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
      }

      // a the last is zero
      searchResponse =
          client()
              .prepareSearchScroll(searchResponse.getScrollId())
              .setScroll(TimeValue.timeValueMinutes(2))
              .execute()
              .actionGet();

      assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
      assertThat(searchResponse.getHits().hits().length, equalTo(0));
      for (SearchHit hit : searchResponse.getHits()) {
        assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
      }

    } finally {
      clearScroll(searchResponse.getScrollId());
    }
  }
/** @author kimchy (shay.banon) */
public class RollingRestartStressTest {

  private final ESLogger logger = Loggers.getLogger(getClass());

  private int numberOfShards = 5;
  private int numberOfReplicas = 1;
  private int numberOfNodes = 4;

  private int textTokens = 150;
  private int numberOfFields = 10;
  private long initialNumberOfDocs = 100000;

  private int indexers = 0;

  private TimeValue indexerThrottle = TimeValue.timeValueMillis(100);

  private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;

  private TimeValue period = TimeValue.timeValueMinutes(20);

  private boolean clearNodeData = true;

  private Node client;

  private AtomicLong indexCounter = new AtomicLong();
  private AtomicLong idCounter = new AtomicLong();

  public RollingRestartStressTest numberOfNodes(int numberOfNodes) {
    this.numberOfNodes = numberOfNodes;
    return this;
  }

  public RollingRestartStressTest numberOfShards(int numberOfShards) {
    this.numberOfShards = numberOfShards;
    return this;
  }

  public RollingRestartStressTest numberOfReplicas(int numberOfReplicas) {
    this.numberOfReplicas = numberOfReplicas;
    return this;
  }

  public RollingRestartStressTest initialNumberOfDocs(long initialNumberOfDocs) {
    this.initialNumberOfDocs = initialNumberOfDocs;
    return this;
  }

  public RollingRestartStressTest textTokens(int textTokens) {
    this.textTokens = textTokens;
    return this;
  }

  public RollingRestartStressTest numberOfFields(int numberOfFields) {
    this.numberOfFields = numberOfFields;
    return this;
  }

  public RollingRestartStressTest indexers(int indexers) {
    this.indexers = indexers;
    return this;
  }

  public RollingRestartStressTest indexerThrottle(TimeValue indexerThrottle) {
    this.indexerThrottle = indexerThrottle;
    return this;
  }

  public RollingRestartStressTest period(TimeValue period) {
    this.period = period;
    return this;
  }

  public RollingRestartStressTest cleanNodeData(boolean clearNodeData) {
    this.clearNodeData = clearNodeData;
    return this;
  }

  public RollingRestartStressTest settings(Settings settings) {
    this.settings = settings;
    return this;
  }

  public void run() throws Exception {
    Node[] nodes = new Node[numberOfNodes];
    for (int i = 0; i < nodes.length; i++) {
      nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
    }
    client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();

    client
        .client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            settingsBuilder()
                .put("index.number_of_shards", numberOfShards)
                .put("index.number_of_replicas", numberOfReplicas))
        .execute()
        .actionGet();

    logger.info("********** [START] INDEXING INITIAL DOCS");
    for (long i = 0; i < initialNumberOfDocs; i++) {
      indexDoc();
    }
    logger.info("********** [DONE ] INDEXING INITIAL DOCS");

    Indexer[] indexerThreads = new Indexer[indexers];
    for (int i = 0; i < indexerThreads.length; i++) {
      indexerThreads[i] = new Indexer();
    }
    for (int i = 0; i < indexerThreads.length; i++) {
      indexerThreads[i].start();
    }

    long testStart = System.currentTimeMillis();

    // start doing the rolling restart
    int nodeIndex = 0;
    while (true) {
      File[] nodeData =
          ((InternalNode) nodes[nodeIndex])
              .injector()
              .getInstance(NodeEnvironment.class)
              .nodeDataLocations();
      nodes[nodeIndex].close();
      if (clearNodeData) {
        FileSystemUtils.deleteRecursively(nodeData);
      }

      try {
        ClusterHealthResponse clusterHealth =
            client
                .client()
                .admin()
                .cluster()
                .prepareHealth()
                .setWaitForGreenStatus()
                .setWaitForNodes(Integer.toString(numberOfNodes + 0 /* client node*/))
                .setWaitForRelocatingShards(0)
                .setTimeout("10m")
                .execute()
                .actionGet();
        if (clusterHealth.timedOut()) {
          logger.warn("timed out waiting for green status....");
        }
      } catch (Exception e) {
        logger.warn("failed to execute cluster health....");
      }

      nodes[nodeIndex] = NodeBuilder.nodeBuilder().settings(settings).node();

      Thread.sleep(1000);

      try {
        ClusterHealthResponse clusterHealth =
            client
                .client()
                .admin()
                .cluster()
                .prepareHealth()
                .setWaitForGreenStatus()
                .setWaitForNodes(Integer.toString(numberOfNodes + 1 /* client node*/))
                .setWaitForRelocatingShards(0)
                .setTimeout("10m")
                .execute()
                .actionGet();
        if (clusterHealth.timedOut()) {
          logger.warn("timed out waiting for green status....");
        }
      } catch (Exception e) {
        logger.warn("failed to execute cluster health....");
      }

      if (++nodeIndex == nodes.length) {
        nodeIndex = 0;
      }

      if ((System.currentTimeMillis() - testStart) > period.millis()) {
        logger.info("test finished");
        break;
      }
    }

    for (int i = 0; i < indexerThreads.length; i++) {
      indexerThreads[i].close = true;
    }

    Thread.sleep(indexerThrottle.millis() + 10000);

    for (int i = 0; i < indexerThreads.length; i++) {
      if (!indexerThreads[i].closed) {
        logger.warn("thread not closed!");
      }
    }

    client.client().admin().indices().prepareRefresh().execute().actionGet();

    // check the status
    IndicesStatusResponse status =
        client.client().admin().indices().prepareStatus("test").execute().actionGet();
    for (IndexShardStatus shardStatus : status.index("test")) {
      ShardStatus shard = shardStatus.shards()[0];
      logger.info("shard [{}], docs [{}]", shard.shardId(), shard.getDocs().numDocs());
      for (ShardStatus shardStatu : shardStatus) {
        if (shard.docs().numDocs() != shardStatu.docs().numDocs()) {
          logger.warn(
              "shard doc number does not match!, got {} and {}",
              shard.docs().numDocs(),
              shardStatu.docs().numDocs());
        }
      }
    }

    // check the count
    for (int i = 0; i < (nodes.length * 5); i++) {
      CountResponse count =
          client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
      logger.info(
          "indexed [{}], count [{}], [{}]",
          count.count(),
          indexCounter.get(),
          count.count() == indexCounter.get() ? "OK" : "FAIL");
      if (count.count() != indexCounter.get()) {
        logger.warn("count does not match!");
      }
    }

    // scan all the docs, verify all have the same version based on the number of replicas
    SearchResponse searchResponse =
        client
            .client()
            .prepareSearch()
            .setSearchType(SearchType.SCAN)
            .setQuery(matchAllQuery())
            .setSize(50)
            .setScroll(TimeValue.timeValueMinutes(2))
            .execute()
            .actionGet();
    logger.info("Verifying versions for {} hits...", searchResponse.hits().totalHits());

    while (true) {
      searchResponse =
          client
              .client()
              .prepareSearchScroll(searchResponse.scrollId())
              .setScroll(TimeValue.timeValueMinutes(2))
              .execute()
              .actionGet();
      if (searchResponse.failedShards() > 0) {
        logger.warn("Search Failures " + Arrays.toString(searchResponse.shardFailures()));
      }
      for (SearchHit hit : searchResponse.hits()) {
        long version = -1;
        for (int i = 0; i < (numberOfReplicas + 1); i++) {
          GetResponse getResponse =
              client.client().prepareGet(hit.index(), hit.type(), hit.id()).execute().actionGet();
          if (version == -1) {
            version = getResponse.version();
          } else {
            if (version != getResponse.version()) {
              logger.warn(
                  "Doc {} has different version numbers {} and {}",
                  hit.id(),
                  version,
                  getResponse.version());
            }
          }
        }
      }
      if (searchResponse.hits().hits().length == 0) {
        break;
      }
    }
    logger.info("Done verifying versions");

    client.close();
    for (Node node : nodes) {
      node.close();
    }
  }

  private class Indexer extends Thread {

    volatile boolean close = false;

    volatile boolean closed = false;

    @Override
    public void run() {
      while (true) {
        if (close) {
          closed = true;
          return;
        }
        try {
          indexDoc();
          Thread.sleep(indexerThrottle.millis());
        } catch (Exception e) {
          logger.warn("failed to index / sleep", e);
        }
      }
    }
  }

  private void indexDoc() throws Exception {
    StringBuilder sb = new StringBuilder();
    XContentBuilder json =
        XContentFactory.jsonBuilder()
            .startObject()
            .field("field", "value" + ThreadLocalRandom.current().nextInt());

    int fields = Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfFields;
    for (int i = 0; i < fields; i++) {
      json.field("num_" + i, ThreadLocalRandom.current().nextDouble());
      int tokens = ThreadLocalRandom.current().nextInt() % textTokens;
      sb.setLength(0);
      for (int j = 0; j < tokens; j++) {
        sb.append(UUID.randomBase64UUID()).append(' ');
      }
      json.field("text_" + i, sb.toString());
    }

    json.endObject();

    String id = Long.toString(idCounter.incrementAndGet());
    client
        .client()
        .prepareIndex("test", "type1", id)
        .setCreate(true)
        .setSource(json)
        .execute()
        .actionGet();
    indexCounter.incrementAndGet();
  }

  public static void main(String[] args) throws Exception {
    System.setProperty("es.logger.prefix", "");

    Settings settings =
        settingsBuilder()
            .put("index.shard.check_index", true)
            .put("gateway.type", "none")
            .put("path.data", "data/data1,data/data2")
            .build();

    RollingRestartStressTest test =
        new RollingRestartStressTest()
            .settings(settings)
            .numberOfNodes(4)
            .numberOfShards(5)
            .numberOfReplicas(1)
            .initialNumberOfDocs(1000)
            .textTokens(150)
            .numberOfFields(10)
            .cleanNodeData(false)
            .indexers(5)
            .indexerThrottle(TimeValue.timeValueMillis(50))
            .period(TimeValue.timeValueMinutes(3));

    test.run();
  }
}
  @Test
  public void testSimpleScrollQueryThenFetch() throws Exception {
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(Settings.settingsBuilder().put("index.number_of_shards", 3))
        .execute()
        .actionGet();
    client()
        .admin()
        .cluster()
        .prepareHealth()
        .setWaitForEvents(Priority.LANGUID)
        .setWaitForGreenStatus()
        .execute()
        .actionGet();

    client()
        .admin()
        .cluster()
        .prepareHealth()
        .setWaitForEvents(Priority.LANGUID)
        .setWaitForGreenStatus()
        .execute()
        .actionGet();

    for (int i = 0; i < 100; i++) {
      client()
          .prepareIndex("test", "type1", Integer.toString(i))
          .setSource(jsonBuilder().startObject().field("field", i).endObject())
          .execute()
          .actionGet();
    }

    client().admin().indices().prepareRefresh().execute().actionGet();

    SearchResponse searchResponse =
        client()
            .prepareSearch()
            .setQuery(matchAllQuery())
            .setSize(35)
            .setScroll(TimeValue.timeValueMinutes(2))
            .addSort("field", SortOrder.ASC)
            .execute()
            .actionGet();
    try {
      long counter = 0;

      assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
      assertThat(searchResponse.getHits().hits().length, equalTo(35));
      for (SearchHit hit : searchResponse.getHits()) {
        assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
      }

      searchResponse =
          client()
              .prepareSearchScroll(searchResponse.getScrollId())
              .setScroll(TimeValue.timeValueMinutes(2))
              .execute()
              .actionGet();

      assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
      assertThat(searchResponse.getHits().hits().length, equalTo(35));
      for (SearchHit hit : searchResponse.getHits()) {
        assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
      }

      searchResponse =
          client()
              .prepareSearchScroll(searchResponse.getScrollId())
              .setScroll(TimeValue.timeValueMinutes(2))
              .execute()
              .actionGet();

      assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
      assertThat(searchResponse.getHits().hits().length, equalTo(30));
      for (SearchHit hit : searchResponse.getHits()) {
        assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
      }
    } finally {
      clearScroll(searchResponse.getScrollId());
    }
  }
  @Test
  public void testScrollAndUpdateIndex() throws Exception {
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(Settings.settingsBuilder().put("index.number_of_shards", 5))
        .execute()
        .actionGet();
    client()
        .admin()
        .cluster()
        .prepareHealth()
        .setWaitForEvents(Priority.LANGUID)
        .setWaitForGreenStatus()
        .execute()
        .actionGet();

    for (int i = 0; i < 500; i++) {
      client()
          .prepareIndex("test", "tweet", Integer.toString(i))
          .setSource(
              jsonBuilder()
                  .startObject()
                  .field("user", "kimchy")
                  .field("postDate", System.currentTimeMillis())
                  .field("message", "test")
                  .endObject())
          .execute()
          .actionGet();
    }

    client().admin().indices().prepareRefresh().execute().actionGet();

    assertThat(
        client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(),
        equalTo(500l));
    assertThat(
        client()
            .prepareCount()
            .setQuery(termQuery("message", "test"))
            .execute()
            .actionGet()
            .getCount(),
        equalTo(500l));
    assertThat(
        client()
            .prepareCount()
            .setQuery(termQuery("message", "test"))
            .execute()
            .actionGet()
            .getCount(),
        equalTo(500l));
    assertThat(
        client()
            .prepareCount()
            .setQuery(termQuery("message", "update"))
            .execute()
            .actionGet()
            .getCount(),
        equalTo(0l));
    assertThat(
        client()
            .prepareCount()
            .setQuery(termQuery("message", "update"))
            .execute()
            .actionGet()
            .getCount(),
        equalTo(0l));

    SearchResponse searchResponse =
        client()
            .prepareSearch()
            .setQuery(queryStringQuery("user:kimchy"))
            .setSize(35)
            .setScroll(TimeValue.timeValueMinutes(2))
            .addSort("postDate", SortOrder.ASC)
            .execute()
            .actionGet();
    try {
      do {
        for (SearchHit searchHit : searchResponse.getHits().hits()) {
          Map<String, Object> map = searchHit.sourceAsMap();
          map.put("message", "update");
          client()
              .prepareIndex("test", "tweet", searchHit.id())
              .setSource(map)
              .execute()
              .actionGet();
        }
        searchResponse =
            client()
                .prepareSearchScroll(searchResponse.getScrollId())
                .setScroll(TimeValue.timeValueMinutes(2))
                .execute()
                .actionGet();
      } while (searchResponse.getHits().hits().length > 0);

      client().admin().indices().prepareRefresh().execute().actionGet();
      assertThat(
          client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(),
          equalTo(500l));
      assertThat(
          client()
              .prepareCount()
              .setQuery(termQuery("message", "test"))
              .execute()
              .actionGet()
              .getCount(),
          equalTo(0l));
      assertThat(
          client()
              .prepareCount()
              .setQuery(termQuery("message", "test"))
              .execute()
              .actionGet()
              .getCount(),
          equalTo(0l));
      assertThat(
          client()
              .prepareCount()
              .setQuery(termQuery("message", "update"))
              .execute()
              .actionGet()
              .getCount(),
          equalTo(500l));
      assertThat(
          client()
              .prepareCount()
              .setQuery(termQuery("message", "update"))
              .execute()
              .actionGet()
              .getCount(),
          equalTo(500l));
    } finally {
      clearScroll(searchResponse.getScrollId());
    }
  }
  /** All indices has to be created before! */
  public void mergeIndices(
      Collection<String> indexList,
      String intoIndex,
      int hitsPerPage,
      boolean forceRefresh,
      CreateObjectsInterface<T> createObj,
      FilterBuilder additionalFilter) {
    if (forceRefresh) {
      refresh(indexList);
      refresh(intoIndex);
    }

    int keepTime = 100;
    for (String fromIndex : indexList) {
      SearchRequestBuilder srb =
          client
              .prepareSearch(fromIndex)
              .setVersion(true)
              .setQuery(QueryBuilders.matchAllQuery())
              .setSize(hitsPerPage)
              .setSearchType(SearchType.SCAN)
              .setScroll(TimeValue.timeValueMinutes(keepTime));
      if (additionalFilter != null) srb.setFilter(additionalFilter);
      SearchResponse rsp = srb.execute().actionGet();

      try {
        long total = rsp.hits().totalHits();
        int collectedResults = 0;
        while (true) {
          StopWatch queryWatch = new StopWatch().start();
          rsp =
              client
                  .prepareSearchScroll(rsp.scrollId())
                  .setScroll(TimeValue.timeValueMinutes(keepTime))
                  .execute()
                  .actionGet();
          long currentResults = rsp.hits().hits().length;
          if (currentResults == 0) break;

          queryWatch.stop();
          Collection<T> objs = createObj.collectObjects(rsp);
          StopWatch updateWatch = new StopWatch().start();
          int failed = bulkUpdate(objs, intoIndex, false, false).size();
          // trying to enable flushing to avoid memory issues on the server side?
          flush(intoIndex);
          updateWatch.stop();
          collectedResults += currentResults;
          logger.info(
              "Progress "
                  + collectedResults
                  + "/"
                  + total
                  + " fromIndex="
                  + fromIndex
                  + " update:"
                  + updateWatch.totalTime().getSeconds()
                  + " query:"
                  + queryWatch.totalTime().getSeconds()
                  + " failed:"
                  + failed);
        }
        logger.info(
            "Finished copying of index:"
                + fromIndex
                + ". Total:"
                + total
                + " collected:"
                + collectedResults);
      } catch (Exception ex) {
        //                throw new RuntimeException(ex);
        logger.error(
            "Failed to copy data from index " + fromIndex + " into " + intoIndex + ".", ex);
      }
    }

    if (forceRefresh) refresh(intoIndex);
  }
  @Test
  public void testSimpleScrollQueryThenFetch_clearAllScrollIds() throws Exception {
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(Settings.settingsBuilder().put("index.number_of_shards", 3))
        .execute()
        .actionGet();
    client()
        .admin()
        .cluster()
        .prepareHealth()
        .setWaitForEvents(Priority.LANGUID)
        .setWaitForGreenStatus()
        .execute()
        .actionGet();

    client()
        .admin()
        .cluster()
        .prepareHealth()
        .setWaitForEvents(Priority.LANGUID)
        .setWaitForGreenStatus()
        .execute()
        .actionGet();

    for (int i = 0; i < 100; i++) {
      client()
          .prepareIndex("test", "type1", Integer.toString(i))
          .setSource(jsonBuilder().startObject().field("field", i).endObject())
          .execute()
          .actionGet();
    }

    client().admin().indices().prepareRefresh().execute().actionGet();

    SearchResponse searchResponse1 =
        client()
            .prepareSearch()
            .setQuery(matchAllQuery())
            .setSize(35)
            .setScroll(TimeValue.timeValueMinutes(2))
            .setSearchType(SearchType.QUERY_THEN_FETCH)
            .addSort("field", SortOrder.ASC)
            .execute()
            .actionGet();

    SearchResponse searchResponse2 =
        client()
            .prepareSearch()
            .setQuery(matchAllQuery())
            .setSize(35)
            .setScroll(TimeValue.timeValueMinutes(2))
            .setSearchType(SearchType.QUERY_THEN_FETCH)
            .addSort("field", SortOrder.ASC)
            .execute()
            .actionGet();

    long counter1 = 0;
    long counter2 = 0;

    assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
    assertThat(searchResponse1.getHits().hits().length, equalTo(35));
    for (SearchHit hit : searchResponse1.getHits()) {
      assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
    }

    assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
    assertThat(searchResponse2.getHits().hits().length, equalTo(35));
    for (SearchHit hit : searchResponse2.getHits()) {
      assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
    }

    searchResponse1 =
        client()
            .prepareSearchScroll(searchResponse1.getScrollId())
            .setScroll(TimeValue.timeValueMinutes(2))
            .execute()
            .actionGet();

    searchResponse2 =
        client()
            .prepareSearchScroll(searchResponse2.getScrollId())
            .setScroll(TimeValue.timeValueMinutes(2))
            .execute()
            .actionGet();

    assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
    assertThat(searchResponse1.getHits().hits().length, equalTo(35));
    for (SearchHit hit : searchResponse1.getHits()) {
      assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
    }

    assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
    assertThat(searchResponse2.getHits().hits().length, equalTo(35));
    for (SearchHit hit : searchResponse2.getHits()) {
      assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
    }

    ClearScrollResponse clearResponse =
        client().prepareClearScroll().addScrollId("_all").execute().actionGet();
    assertThat(clearResponse.isSucceeded(), is(true));
    assertThat(clearResponse.getNumFreed(), greaterThan(0));
    assertThat(clearResponse.status(), equalTo(RestStatus.OK));

    assertThrows(
        internalCluster()
            .transportClient()
            .prepareSearchScroll(searchResponse1.getScrollId())
            .setScroll(TimeValue.timeValueMinutes(2)),
        RestStatus.NOT_FOUND);
    assertThrows(
        internalCluster()
            .transportClient()
            .prepareSearchScroll(searchResponse2.getScrollId())
            .setScroll(TimeValue.timeValueMinutes(2)),
        RestStatus.NOT_FOUND);
  }
public class RecoverySettings extends AbstractComponent {

  public static final Setting<ByteSizeValue> INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING =
      Setting.byteSizeSetting(
          "indices.recovery.max_bytes_per_sec",
          new ByteSizeValue(40, ByteSizeUnit.MB),
          Property.Dynamic,
          Property.NodeScope);

  /**
   * how long to wait before retrying after issues cause by cluster state syncing between nodes
   * i.e., local node is not yet known on remote node, remote shard not yet started etc.
   */
  public static final Setting<TimeValue> INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING =
      Setting.positiveTimeSetting(
          "indices.recovery.retry_delay_state_sync",
          TimeValue.timeValueMillis(500),
          Property.Dynamic,
          Property.NodeScope);

  /** how long to wait before retrying after network related issues */
  public static final Setting<TimeValue> INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING =
      Setting.positiveTimeSetting(
          "indices.recovery.retry_delay_network",
          TimeValue.timeValueSeconds(5),
          Property.Dynamic,
          Property.NodeScope);

  /** timeout value to use for requests made as part of the recovery process */
  public static final Setting<TimeValue> INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING =
      Setting.positiveTimeSetting(
          "indices.recovery.internal_action_timeout",
          TimeValue.timeValueMinutes(15),
          Property.Dynamic,
          Property.NodeScope);

  /**
   * timeout value to use for requests made as part of the recovery process that are expected to
   * take long time. defaults to twice `indices.recovery.internal_action_timeout`.
   */
  public static final Setting<TimeValue> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING =
      Setting.timeSetting(
          "indices.recovery.internal_action_long_timeout",
          (s) ->
              TimeValue.timeValueMillis(
                  INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2),
          TimeValue.timeValueSeconds(0),
          Property.Dynamic,
          Property.NodeScope);

  /**
   * recoveries that don't show any activity for more then this interval will be failed. defaults to
   * `indices.recovery.internal_action_long_timeout`
   */
  public static final Setting<TimeValue> INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING =
      Setting.timeSetting(
          "indices.recovery.recovery_activity_timeout",
          INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING::get,
          TimeValue.timeValueSeconds(0),
          Property.Dynamic,
          Property.NodeScope);

  public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB);

  private volatile ByteSizeValue maxBytesPerSec;
  private volatile SimpleRateLimiter rateLimiter;
  private volatile TimeValue retryDelayStateSync;
  private volatile TimeValue retryDelayNetwork;
  private volatile TimeValue activityTimeout;
  private volatile TimeValue internalActionTimeout;
  private volatile TimeValue internalActionLongTimeout;

  private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE;

  @Inject
  public RecoverySettings(Settings settings, ClusterSettings clusterSettings) {
    super(settings);

    this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings);
    // doesn't have to be fast as nodes are reconnected every 10s by default (see
    // InternalClusterService.ReconnectToNodes)
    // and we want to give the master time to remove a faulty node
    this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings);

    this.internalActionTimeout = INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(settings);
    this.internalActionLongTimeout =
        INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.get(settings);

    this.activityTimeout = INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.get(settings);
    this.maxBytesPerSec = INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.get(settings);
    if (maxBytesPerSec.getBytes() <= 0) {
      rateLimiter = null;
    } else {
      rateLimiter = new SimpleRateLimiter(maxBytesPerSec.getMbFrac());
    }

    logger.debug("using max_bytes_per_sec[{}]", maxBytesPerSec);

    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, this::setInternalActionLongTimeout);
    clusterSettings.addSettingsUpdateConsumer(
        INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout);
  }

  public RateLimiter rateLimiter() {
    return rateLimiter;
  }

  public TimeValue retryDelayNetwork() {
    return retryDelayNetwork;
  }

  public TimeValue retryDelayStateSync() {
    return retryDelayStateSync;
  }

  public TimeValue activityTimeout() {
    return activityTimeout;
  }

  public TimeValue internalActionTimeout() {
    return internalActionTimeout;
  }

  public TimeValue internalActionLongTimeout() {
    return internalActionLongTimeout;
  }

  public ByteSizeValue getChunkSize() {
    return chunkSize;
  }

  void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests
    if (chunkSize.bytesAsInt() <= 0) {
      throw new IllegalArgumentException("chunkSize must be > 0");
    }
    this.chunkSize = chunkSize;
  }

  public void setRetryDelayStateSync(TimeValue retryDelayStateSync) {
    this.retryDelayStateSync = retryDelayStateSync;
  }

  public void setRetryDelayNetwork(TimeValue retryDelayNetwork) {
    this.retryDelayNetwork = retryDelayNetwork;
  }

  public void setActivityTimeout(TimeValue activityTimeout) {
    this.activityTimeout = activityTimeout;
  }

  public void setInternalActionTimeout(TimeValue internalActionTimeout) {
    this.internalActionTimeout = internalActionTimeout;
  }

  public void setInternalActionLongTimeout(TimeValue internalActionLongTimeout) {
    this.internalActionLongTimeout = internalActionLongTimeout;
  }

  private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) {
    this.maxBytesPerSec = maxBytesPerSec;
    if (maxBytesPerSec.getBytes() <= 0) {
      rateLimiter = null;
    } else if (rateLimiter != null) {
      rateLimiter.setMBPerSec(maxBytesPerSec.getMbFrac());
    } else {
      rateLimiter = new SimpleRateLimiter(maxBytesPerSec.getMbFrac());
    }
  }
}
  public void run() throws Exception {
    Node[] nodes = new Node[numberOfNodes];
    for (int i = 0; i < nodes.length; i++) {
      nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
    }
    client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();

    client
        .client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            settingsBuilder()
                .put("index.number_of_shards", numberOfShards)
                .put("index.number_of_replicas", numberOfReplicas))
        .execute()
        .actionGet();

    logger.info("********** [START] INDEXING INITIAL DOCS");
    for (long i = 0; i < initialNumberOfDocs; i++) {
      indexDoc();
    }
    logger.info("********** [DONE ] INDEXING INITIAL DOCS");

    Indexer[] indexerThreads = new Indexer[indexers];
    for (int i = 0; i < indexerThreads.length; i++) {
      indexerThreads[i] = new Indexer();
    }
    for (int i = 0; i < indexerThreads.length; i++) {
      indexerThreads[i].start();
    }

    long testStart = System.currentTimeMillis();

    // start doing the rolling restart
    int nodeIndex = 0;
    while (true) {
      File[] nodeData =
          ((InternalNode) nodes[nodeIndex])
              .injector()
              .getInstance(NodeEnvironment.class)
              .nodeDataLocations();
      nodes[nodeIndex].close();
      if (clearNodeData) {
        FileSystemUtils.deleteRecursively(nodeData);
      }

      try {
        ClusterHealthResponse clusterHealth =
            client
                .client()
                .admin()
                .cluster()
                .prepareHealth()
                .setWaitForGreenStatus()
                .setWaitForNodes(Integer.toString(numberOfNodes + 0 /* client node*/))
                .setWaitForRelocatingShards(0)
                .setTimeout("10m")
                .execute()
                .actionGet();
        if (clusterHealth.timedOut()) {
          logger.warn("timed out waiting for green status....");
        }
      } catch (Exception e) {
        logger.warn("failed to execute cluster health....");
      }

      nodes[nodeIndex] = NodeBuilder.nodeBuilder().settings(settings).node();

      Thread.sleep(1000);

      try {
        ClusterHealthResponse clusterHealth =
            client
                .client()
                .admin()
                .cluster()
                .prepareHealth()
                .setWaitForGreenStatus()
                .setWaitForNodes(Integer.toString(numberOfNodes + 1 /* client node*/))
                .setWaitForRelocatingShards(0)
                .setTimeout("10m")
                .execute()
                .actionGet();
        if (clusterHealth.timedOut()) {
          logger.warn("timed out waiting for green status....");
        }
      } catch (Exception e) {
        logger.warn("failed to execute cluster health....");
      }

      if (++nodeIndex == nodes.length) {
        nodeIndex = 0;
      }

      if ((System.currentTimeMillis() - testStart) > period.millis()) {
        logger.info("test finished");
        break;
      }
    }

    for (int i = 0; i < indexerThreads.length; i++) {
      indexerThreads[i].close = true;
    }

    Thread.sleep(indexerThrottle.millis() + 10000);

    for (int i = 0; i < indexerThreads.length; i++) {
      if (!indexerThreads[i].closed) {
        logger.warn("thread not closed!");
      }
    }

    client.client().admin().indices().prepareRefresh().execute().actionGet();

    // check the status
    IndicesStatusResponse status =
        client.client().admin().indices().prepareStatus("test").execute().actionGet();
    for (IndexShardStatus shardStatus : status.index("test")) {
      ShardStatus shard = shardStatus.shards()[0];
      logger.info("shard [{}], docs [{}]", shard.shardId(), shard.getDocs().numDocs());
      for (ShardStatus shardStatu : shardStatus) {
        if (shard.docs().numDocs() != shardStatu.docs().numDocs()) {
          logger.warn(
              "shard doc number does not match!, got {} and {}",
              shard.docs().numDocs(),
              shardStatu.docs().numDocs());
        }
      }
    }

    // check the count
    for (int i = 0; i < (nodes.length * 5); i++) {
      CountResponse count =
          client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
      logger.info(
          "indexed [{}], count [{}], [{}]",
          count.count(),
          indexCounter.get(),
          count.count() == indexCounter.get() ? "OK" : "FAIL");
      if (count.count() != indexCounter.get()) {
        logger.warn("count does not match!");
      }
    }

    // scan all the docs, verify all have the same version based on the number of replicas
    SearchResponse searchResponse =
        client
            .client()
            .prepareSearch()
            .setSearchType(SearchType.SCAN)
            .setQuery(matchAllQuery())
            .setSize(50)
            .setScroll(TimeValue.timeValueMinutes(2))
            .execute()
            .actionGet();
    logger.info("Verifying versions for {} hits...", searchResponse.hits().totalHits());

    while (true) {
      searchResponse =
          client
              .client()
              .prepareSearchScroll(searchResponse.scrollId())
              .setScroll(TimeValue.timeValueMinutes(2))
              .execute()
              .actionGet();
      if (searchResponse.failedShards() > 0) {
        logger.warn("Search Failures " + Arrays.toString(searchResponse.shardFailures()));
      }
      for (SearchHit hit : searchResponse.hits()) {
        long version = -1;
        for (int i = 0; i < (numberOfReplicas + 1); i++) {
          GetResponse getResponse =
              client.client().prepareGet(hit.index(), hit.type(), hit.id()).execute().actionGet();
          if (version == -1) {
            version = getResponse.version();
          } else {
            if (version != getResponse.version()) {
              logger.warn(
                  "Doc {} has different version numbers {} and {}",
                  hit.id(),
                  version,
                  getResponse.version());
            }
          }
        }
      }
      if (searchResponse.hits().hits().length == 0) {
        break;
      }
    }
    logger.info("Done verifying versions");

    client.close();
    for (Node node : nodes) {
      node.close();
    }
  }
Beispiel #27
0
 @Override
 public TimeValue defaultValue() {
   return TimeValue.timeValueMinutes(30);
 }