@Override
  protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
    IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
    IndexShard indexShard = indexService.getShard(request.shardId().id());
    final QueryShardContext queryShardContext = indexService.newQueryShardContext();
    queryShardContext.setTypes(request.types());

    boolean valid;
    String explanation = null;
    String error = null;
    Engine.Searcher searcher = indexShard.acquireSearcher("validate_query");

    DefaultSearchContext searchContext =
        new DefaultSearchContext(
            0,
            new ShardSearchLocalRequest(
                request.types(), request.nowInMillis(), request.filteringAliases()),
            null,
            searcher,
            indexService,
            indexShard,
            scriptService,
            pageCacheRecycler,
            bigArrays,
            threadPool.estimatedTimeInMillisCounter(),
            parseFieldMatcher,
            SearchService.NO_TIMEOUT);
    SearchContext.setCurrent(searchContext);
    try {
      searchContext.parsedQuery(queryShardContext.toQuery(request.query()));
      searchContext.preProcess();

      valid = true;
      if (request.rewrite()) {
        explanation = getRewrittenQuery(searcher.searcher(), searchContext.query());
      } else if (request.explain()) {
        explanation = searchContext.filteredQuery().query().toString();
      }
    } catch (QueryShardException | ParsingException e) {
      valid = false;
      error = e.getDetailedMessage();
    } catch (AssertionError | IOException e) {
      valid = false;
      error = e.getMessage();
    } finally {
      searchContext.close();
      SearchContext.removeCurrent();
    }

    return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error);
  }
  private RecoveryResponse recover(final StartRecoveryRequest request) throws IOException {
    final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
    final IndexShard shard = indexService.getShard(request.shardId().id());

    // starting recovery from that our (the source) shard state is marking the shard to be in
    // recovery mode as well, otherwise
    // the index operations will not be routed to it properly
    RoutingNode node = clusterService.state().getRoutingNodes().node(request.targetNode().getId());
    if (node == null) {
      logger.debug(
          "delaying recovery of {} as source node {} is unknown",
          request.shardId(),
          request.targetNode());
      throw new DelayRecoveryException(
          "source node does not have the node [" + request.targetNode() + "] in its state yet..");
    }

    ShardRouting routingEntry = shard.routingEntry();
    if (request.isPrimaryRelocation()
        && (routingEntry.relocating() == false
            || routingEntry.relocatingNodeId().equals(request.targetNode().getId()) == false)) {
      logger.debug(
          "delaying recovery of {} as source shard is not marked yet as relocating to {}",
          request.shardId(),
          request.targetNode());
      throw new DelayRecoveryException(
          "source shard is not marked yet as relocating to [" + request.targetNode() + "]");
    }

    ShardRouting targetShardRouting = node.getByShardId(request.shardId());
    if (targetShardRouting == null) {
      logger.debug(
          "delaying recovery of {} as it is not listed as assigned to target node {}",
          request.shardId(),
          request.targetNode());
      throw new DelayRecoveryException(
          "source node does not have the shard listed in its state as allocated on the node");
    }
    if (!targetShardRouting.initializing()) {
      logger.debug(
          "delaying recovery of {} as it is not listed as initializing on the target node {}. known shards state is [{}]",
          request.shardId(),
          request.targetNode(),
          targetShardRouting.state());
      throw new DelayRecoveryException(
          "source node has the state of the target shard to be ["
              + targetShardRouting.state()
              + "], expecting to be [initializing]");
    }

    RecoverySourceHandler handler =
        ongoingRecoveries.addNewRecovery(request, targetShardRouting.allocationId().getId(), shard);
    logger.trace(
        "[{}][{}] starting recovery to {}",
        request.shardId().getIndex().getName(),
        request.shardId().id(),
        request.targetNode());
    try {
      return handler.recoverToTarget();
    } finally {
      ongoingRecoveries.remove(shard, handler);
    }
  }
  private RecoveryResponse recover(final StartRecoveryRequest request) {
    final IndexService indexService =
        indicesService.indexServiceSafe(request.shardId().index().name());
    final IndexShard shard = indexService.getShard(request.shardId().id());

    // starting recovery from that our (the source) shard state is marking the shard to be in
    // recovery mode as well, otherwise
    // the index operations will not be routed to it properly
    RoutingNode node = clusterService.state().getRoutingNodes().node(request.targetNode().id());
    if (node == null) {
      logger.debug(
          "delaying recovery of {} as source node {} is unknown",
          request.shardId(),
          request.targetNode());
      throw new DelayRecoveryException(
          "source node does not have the node [" + request.targetNode() + "] in its state yet..");
    }
    ShardRouting targetShardRouting = null;
    for (ShardRouting shardRouting : node) {
      if (shardRouting.shardId().equals(request.shardId())) {
        targetShardRouting = shardRouting;
        break;
      }
    }
    if (targetShardRouting == null) {
      logger.debug(
          "delaying recovery of {} as it is not listed as assigned to target node {}",
          request.shardId(),
          request.targetNode());
      throw new DelayRecoveryException(
          "source node does not have the shard listed in its state as allocated on the node");
    }
    if (!targetShardRouting.initializing()) {
      logger.debug(
          "delaying recovery of {} as it is not listed as initializing on the target node {}. known shards state is [{}]",
          request.shardId(),
          request.targetNode(),
          targetShardRouting.state());
      throw new DelayRecoveryException(
          "source node has the state of the target shard to be ["
              + targetShardRouting.state()
              + "], expecting to be [initializing]");
    }

    logger.trace(
        "[{}][{}] starting recovery to {}, mark_as_relocated {}",
        request.shardId().index().name(),
        request.shardId().id(),
        request.targetNode(),
        request.markAsRelocated());
    final RecoverySourceHandler handler;
    if (IndexMetaData.isOnSharedFilesystem(shard.indexSettings())) {
      handler =
          new SharedFSRecoverySourceHandler(
              shard, request, recoverySettings, transportService, logger);
    } else {
      handler =
          new RecoverySourceHandler(shard, request, recoverySettings, transportService, logger);
    }
    ongoingRecoveries.add(shard, handler);
    try {
      return handler.recoverToTarget();
    } finally {
      ongoingRecoveries.remove(shard, handler);
    }
  }
  @TestLogging("org.elasticsearch.gateway:TRACE")
  public void testIndexWithFewDocuments() throws Exception {
    final Path dataPath = createTempDir();
    Settings nodeSettings = nodeSettings(dataPath);

    internalCluster().startNodesAsync(3, nodeSettings).get();
    final String IDX = "test";

    Settings idxSettings =
        Settings.builder()
            .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
            .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
            .put(
                IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
                new ByteSizeValue(1, ByteSizeUnit.PB))
            .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
            .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
            .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
            .build();

    prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
    ensureGreen(IDX);

    // So basically, the primary should fail and the replica will need to
    // replay the translog, this is what this tests
    client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
    client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();

    IndicesStatsResponse indicesStatsResponse =
        client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get();
    assertEquals(
        2,
        indicesStatsResponse
            .getIndex(IDX)
            .getPrimaries()
            .getTranslog()
            .estimatedNumberOfOperations());
    assertEquals(
        2,
        indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations());
    Index index = resolveIndex(IDX);
    for (IndicesService service : internalCluster().getInstances(IndicesService.class)) {
      IndexService indexService = service.indexService(index);
      if (indexService != null) {
        IndexShard shard = indexService.getShard(0);
        TranslogStats translogStats = shard.translogStats();
        assertTrue(translogStats != null || shard instanceof ShadowIndexShard);
        if (translogStats != null) {
          assertEquals(2, translogStats.estimatedNumberOfOperations());
        }
      }
    }

    // Check that we can get doc 1 and 2, because we are doing realtime
    // gets and getting from the primary
    GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get();
    GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get();
    assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
    assertThat(gResp2.getSource().get("foo"), equalTo("bar"));

    flushAndRefresh(IDX);
    client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get();
    client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get();
    refresh();

    // Check that we can get doc 1 and 2 without realtime
    gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(false).get();
    gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(false).get();
    assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
    assertThat(gResp2.getSource().get("foo"), equalTo("bar"));

    logger.info("--> restarting all nodes");
    if (randomBoolean()) {
      logger.info("--> rolling restart");
      internalCluster().rollingRestart();
    } else {
      logger.info("--> full restart");
      internalCluster().fullRestart();
    }

    client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
    ensureGreen(IDX);
    flushAndRefresh(IDX);

    logger.info("--> performing query");
    SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
    assertHitCount(resp, 4);

    logger.info("--> deleting index");
    assertAcked(client().admin().indices().prepareDelete(IDX));
  }
 protected Releasable getIndexShardOperationsCounter(ShardId shardId) {
   IndexService indexService = indicesService.indexServiceSafe(shardId.index().getName());
   IndexShard indexShard = indexService.getShard(shardId.id());
   return new IndexShardReference(indexShard);
 }