public class SearchService extends AbstractLifecycleComponent<SearchService> {

  public static final String NORMS_LOADING_KEY = "index.norms.loading";
  public static final String DEFAULT_KEEPALIVE_KEY = "search.default_keep_alive";
  public static final String KEEPALIVE_INTERVAL_KEY = "search.keep_alive_interval";

  private final ThreadPool threadPool;

  private final ClusterService clusterService;

  private final IndicesService indicesService;

  private final IndicesWarmer indicesWarmer;

  private final ScriptService scriptService;

  private final PageCacheRecycler pageCacheRecycler;

  private final BigArrays bigArrays;

  private final DfsPhase dfsPhase;

  private final QueryPhase queryPhase;

  private final FetchPhase fetchPhase;

  private final IndicesQueryCache indicesQueryCache;

  private final long defaultKeepAlive;

  private final ScheduledFuture<?> keepAliveReaper;

  private final AtomicLong idGenerator = new AtomicLong();

  private final ConcurrentMapLong<SearchContext> activeContexts =
      ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();

  private final ImmutableMap<String, SearchParseElement> elementParsers;

  @Inject
  public SearchService(
      Settings settings,
      ClusterService clusterService,
      IndicesService indicesService,
      IndicesWarmer indicesWarmer,
      ThreadPool threadPool,
      ScriptService scriptService,
      PageCacheRecycler pageCacheRecycler,
      BigArrays bigArrays,
      DfsPhase dfsPhase,
      QueryPhase queryPhase,
      FetchPhase fetchPhase,
      IndicesQueryCache indicesQueryCache) {
    super(settings);
    this.threadPool = threadPool;
    this.clusterService = clusterService;
    this.indicesService = indicesService;
    indicesService
        .indicesLifecycle()
        .addListener(
            new IndicesLifecycle.Listener() {

              @Override
              public void afterIndexDeleted(Index index, @IndexSettings Settings indexSettings) {
                // once an index is closed we can just clean up all the pending search context
                // information
                // to release memory and let references to the filesystem go etc.
                freeAllContextForIndex(index);
              }
            });
    this.indicesWarmer = indicesWarmer;
    this.scriptService = scriptService;
    this.pageCacheRecycler = pageCacheRecycler;
    this.bigArrays = bigArrays;
    this.dfsPhase = dfsPhase;
    this.queryPhase = queryPhase;
    this.fetchPhase = fetchPhase;
    this.indicesQueryCache = indicesQueryCache;

    TimeValue keepAliveInterval = settings.getAsTime(KEEPALIVE_INTERVAL_KEY, timeValueMinutes(1));
    // we can have 5 minutes here, since we make sure to clean with search requests and when
    // shard/index closes
    this.defaultKeepAlive = settings.getAsTime(DEFAULT_KEEPALIVE_KEY, timeValueMinutes(5)).millis();

    Map<String, SearchParseElement> elementParsers = new HashMap<>();
    elementParsers.putAll(dfsPhase.parseElements());
    elementParsers.putAll(queryPhase.parseElements());
    elementParsers.putAll(fetchPhase.parseElements());
    elementParsers.put("stats", new StatsGroupsParseElement());
    this.elementParsers = ImmutableMap.copyOf(elementParsers);

    this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval);

    this.indicesWarmer.addListener(new NormsWarmer());
    this.indicesWarmer.addListener(new FieldDataWarmer());
    this.indicesWarmer.addListener(new SearchWarmer());
  }

  protected void putContext(SearchContext context) {
    final SearchContext previous = activeContexts.put(context.id(), context);
    assert previous == null;
  }

  protected SearchContext removeContext(long id) {
    return activeContexts.remove(id);
  }

  @Override
  protected void doStart() {}

  @Override
  protected void doStop() {
    for (final SearchContext context : activeContexts.values()) {
      freeContext(context.id());
    }
  }

  @Override
  protected void doClose() {
    doStop();
    FutureUtils.cancel(keepAliveReaper);
  }

  public DfsSearchResult executeDfsPhase(ShardSearchRequest request) {
    final SearchContext context = createAndPutContext(request);
    try {
      contextProcessing(context);
      dfsPhase.execute(context);
      contextProcessedSuccessfully(context);
      return context.dfsResult();
    } catch (Throwable e) {
      logger.trace("Dfs phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      cleanContext(context);
    }
  }

  public QuerySearchResult executeScan(ShardSearchRequest request) {
    final SearchContext context = createAndPutContext(request);
    final int originalSize = context.size();
    try {
      if (context.aggregations() != null) {
        throw new IllegalArgumentException("aggregations are not supported with search_type=scan");
      }

      if (context.scroll() == null) {
        throw new ElasticsearchException("Scroll must be provided when scanning...");
      }

      assert context.searchType() == SearchType.SCAN;
      context.searchType(
          SearchType
              .QUERY_THEN_FETCH); // move to QUERY_THEN_FETCH, and then, when scrolling, move to
      // SCAN
      context.size(0); // set size to 0 so that we only count matches
      assert context.searchType() == SearchType.QUERY_THEN_FETCH;

      contextProcessing(context);
      queryPhase.execute(context);
      contextProcessedSuccessfully(context);
      return context.queryResult();
    } catch (Throwable e) {
      logger.trace("Scan phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      context.size(originalSize);
      cleanContext(context);
    }
  }

  public ScrollQueryFetchSearchResult executeScan(InternalScrollSearchRequest request) {
    final SearchContext context = findContext(request.id());
    contextProcessing(context);
    try {
      processScroll(request, context);
      if (context.searchType() == SearchType.QUERY_THEN_FETCH) {
        // first scanning, reset the from to 0
        context.searchType(SearchType.SCAN);
        context.from(0);
      }
      queryPhase.execute(context);
      shortcutDocIdsToLoadForScanning(context);
      fetchPhase.execute(context);
      if (context.scroll() == null || context.fetchResult().hits().hits().length < context.size()) {
        freeContext(request.id());
      } else {
        contextProcessedSuccessfully(context);
      }
      return new ScrollQueryFetchSearchResult(
          new QueryFetchSearchResult(context.queryResult(), context.fetchResult()),
          context.shardTarget());
    } catch (Throwable e) {
      logger.trace("Scan phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      cleanContext(context);
    }
  }

  /**
   * Try to load the query results from the cache or execute the query phase directly if the cache
   * cannot be used.
   */
  private void loadOrExecuteQueryPhase(
      final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase)
      throws Exception {
    final boolean canCache = indicesQueryCache.canCache(request, context);
    if (canCache) {
      indicesQueryCache.loadIntoContext(request, context, queryPhase);
    } else {
      queryPhase.execute(context);
    }
  }

  public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) {
    final SearchContext context = createAndPutContext(request);
    final ShardSearchStats shardSearchStats = context.indexShard().searchService();
    try {
      shardSearchStats.onPreQueryPhase(context);
      long time = System.nanoTime();
      contextProcessing(context);

      loadOrExecuteQueryPhase(request, context, queryPhase);

      if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scroll() == null) {
        freeContext(context.id());
      } else {
        contextProcessedSuccessfully(context);
      }
      shardSearchStats.onQueryPhase(context, System.nanoTime() - time);

      return context.queryResult();
    } catch (Throwable e) {
      // execution exception can happen while loading the cache, strip it
      if (e instanceof ExecutionException) {
        e = e.getCause();
      }
      shardSearchStats.onFailedQueryPhase(context);
      logger.trace("Query phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      cleanContext(context);
    }
  }

  public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) {
    final SearchContext context = findContext(request.id());
    ShardSearchStats shardSearchStats = context.indexShard().searchService();
    try {
      shardSearchStats.onPreQueryPhase(context);
      long time = System.nanoTime();
      contextProcessing(context);
      processScroll(request, context);
      queryPhase.execute(context);
      contextProcessedSuccessfully(context);
      shardSearchStats.onQueryPhase(context, System.nanoTime() - time);
      return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget());
    } catch (Throwable e) {
      shardSearchStats.onFailedQueryPhase(context);
      logger.trace("Query phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      cleanContext(context);
    }
  }

  public QuerySearchResult executeQueryPhase(QuerySearchRequest request) {
    final SearchContext context = findContext(request.id());
    contextProcessing(context);
    try {
      final IndexCache indexCache = context.indexShard().indexService().cache();
      context
          .searcher()
          .dfSource(
              new CachedDfSource(
                  context.searcher().getIndexReader(),
                  request.dfs(),
                  context.similarityService().similarity(),
                  indexCache.filter(),
                  indexCache.filterPolicy()));
    } catch (Throwable e) {
      processFailure(context, e);
      cleanContext(context);
      throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
    }
    ShardSearchStats shardSearchStats = context.indexShard().searchService();
    try {
      shardSearchStats.onPreQueryPhase(context);
      long time = System.nanoTime();
      queryPhase.execute(context);
      if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scroll() == null) {
        // no hits, we can release the context since there will be no fetch phase
        freeContext(context.id());
      } else {
        contextProcessedSuccessfully(context);
      }
      shardSearchStats.onQueryPhase(context, System.nanoTime() - time);
      return context.queryResult();
    } catch (Throwable e) {
      shardSearchStats.onFailedQueryPhase(context);
      logger.trace("Query phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      cleanContext(context);
    }
  }

  public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) {
    final SearchContext context = createAndPutContext(request);
    contextProcessing(context);
    try {
      ShardSearchStats shardSearchStats = context.indexShard().searchService();
      shardSearchStats.onPreQueryPhase(context);
      long time = System.nanoTime();
      try {
        loadOrExecuteQueryPhase(request, context, queryPhase);
      } catch (Throwable e) {
        shardSearchStats.onFailedQueryPhase(context);
        throw ExceptionsHelper.convertToRuntime(e);
      }
      long time2 = System.nanoTime();
      shardSearchStats.onQueryPhase(context, time2 - time);
      shardSearchStats.onPreFetchPhase(context);
      try {
        shortcutDocIdsToLoad(context);
        fetchPhase.execute(context);
        if (context.scroll() == null) {
          freeContext(context.id());
        } else {
          contextProcessedSuccessfully(context);
        }
      } catch (Throwable e) {
        shardSearchStats.onFailedFetchPhase(context);
        throw ExceptionsHelper.convertToRuntime(e);
      }
      shardSearchStats.onFetchPhase(context, System.nanoTime() - time2);
      return new QueryFetchSearchResult(context.queryResult(), context.fetchResult());
    } catch (Throwable e) {
      logger.trace("Fetch phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      cleanContext(context);
    }
  }

  public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) {
    final SearchContext context = findContext(request.id());
    contextProcessing(context);
    try {
      final IndexCache indexCache = context.indexShard().indexService().cache();
      context
          .searcher()
          .dfSource(
              new CachedDfSource(
                  context.searcher().getIndexReader(),
                  request.dfs(),
                  context.similarityService().similarity(),
                  indexCache.filter(),
                  indexCache.filterPolicy()));
    } catch (Throwable e) {
      freeContext(context.id());
      cleanContext(context);
      throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
    }
    try {
      ShardSearchStats shardSearchStats = context.indexShard().searchService();
      shardSearchStats.onPreQueryPhase(context);
      long time = System.nanoTime();
      try {
        queryPhase.execute(context);
      } catch (Throwable e) {
        shardSearchStats.onFailedQueryPhase(context);
        throw ExceptionsHelper.convertToRuntime(e);
      }
      long time2 = System.nanoTime();
      shardSearchStats.onQueryPhase(context, time2 - time);
      shardSearchStats.onPreFetchPhase(context);
      try {
        shortcutDocIdsToLoad(context);
        fetchPhase.execute(context);
        if (context.scroll() == null) {
          freeContext(request.id());
        } else {
          contextProcessedSuccessfully(context);
        }
      } catch (Throwable e) {
        shardSearchStats.onFailedFetchPhase(context);
        throw ExceptionsHelper.convertToRuntime(e);
      }
      shardSearchStats.onFetchPhase(context, System.nanoTime() - time2);
      return new QueryFetchSearchResult(context.queryResult(), context.fetchResult());
    } catch (Throwable e) {
      logger.trace("Fetch phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      cleanContext(context);
    }
  }

  public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) {
    final SearchContext context = findContext(request.id());
    contextProcessing(context);
    try {
      ShardSearchStats shardSearchStats = context.indexShard().searchService();
      processScroll(request, context);
      shardSearchStats.onPreQueryPhase(context);
      long time = System.nanoTime();
      try {
        queryPhase.execute(context);
      } catch (Throwable e) {
        shardSearchStats.onFailedQueryPhase(context);
        throw ExceptionsHelper.convertToRuntime(e);
      }
      long time2 = System.nanoTime();
      shardSearchStats.onQueryPhase(context, time2 - time);
      shardSearchStats.onPreFetchPhase(context);
      try {
        shortcutDocIdsToLoad(context);
        fetchPhase.execute(context);
        if (context.scroll() == null) {
          freeContext(request.id());
        } else {
          contextProcessedSuccessfully(context);
        }
      } catch (Throwable e) {
        shardSearchStats.onFailedFetchPhase(context);
        throw ExceptionsHelper.convertToRuntime(e);
      }
      shardSearchStats.onFetchPhase(context, System.nanoTime() - time2);
      return new ScrollQueryFetchSearchResult(
          new QueryFetchSearchResult(context.queryResult(), context.fetchResult()),
          context.shardTarget());
    } catch (Throwable e) {
      logger.trace("Fetch phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      cleanContext(context);
    }
  }

  public FetchSearchResult executeFetchPhase(ShardFetchRequest request) {
    final SearchContext context = findContext(request.id());
    contextProcessing(context);
    final ShardSearchStats shardSearchStats = context.indexShard().searchService();
    try {
      if (request.lastEmittedDoc() != null) {
        context.lastEmittedDoc(request.lastEmittedDoc());
      }
      context.docIdsToLoad(request.docIds(), 0, request.docIdsSize());
      shardSearchStats.onPreFetchPhase(context);
      long time = System.nanoTime();
      fetchPhase.execute(context);
      if (context.scroll() == null) {
        freeContext(request.id());
      } else {
        contextProcessedSuccessfully(context);
      }
      shardSearchStats.onFetchPhase(context, System.nanoTime() - time);
      return context.fetchResult();
    } catch (Throwable e) {
      shardSearchStats.onFailedFetchPhase(context);
      logger.trace("Fetch phase failed", e);
      processFailure(context, e);
      throw ExceptionsHelper.convertToRuntime(e);
    } finally {
      cleanContext(context);
    }
  }

  private SearchContext findContext(long id) throws SearchContextMissingException {
    SearchContext context = activeContexts.get(id);
    if (context == null) {
      throw new SearchContextMissingException(id);
    }
    SearchContext.setCurrent(context);
    return context;
  }

  final SearchContext createAndPutContext(ShardSearchRequest request) {
    SearchContext context = createContext(request, null);
    boolean success = false;
    try {
      putContext(context);
      context.indexShard().searchService().onNewContext(context);
      success = true;
      return context;
    } finally {
      if (!success) {
        freeContext(context.id());
      }
    }
  }

  final SearchContext createContext(
      ShardSearchRequest request, @Nullable Engine.Searcher searcher) {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(request.shardId());

    SearchShardTarget shardTarget =
        new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());

    Engine.Searcher engineSearcher =
        searcher == null ? indexShard.acquireSearcher("search") : searcher;
    SearchContext context =
        new DefaultSearchContext(
            idGenerator.incrementAndGet(),
            request,
            shardTarget,
            engineSearcher,
            indexService,
            indexShard,
            scriptService,
            pageCacheRecycler,
            bigArrays,
            threadPool.estimatedTimeInMillisCounter());
    SearchContext.setCurrent(context);
    try {
      context.scroll(request.scroll());

      parseTemplate(request);
      parseSource(context, request.source());
      parseSource(context, request.extraSource());

      // if the from and size are still not set, default them
      if (context.from() == -1) {
        context.from(0);
      }
      if (context.searchType() == SearchType.COUNT) {
        // so that the optimizations we apply to size=0 also apply to search_type=COUNT
        // and that we close contexts when done with the query phase
        context.searchType(SearchType.QUERY_THEN_FETCH);
        context.size(0);
      } else if (context.size() == -1) {
        context.size(10);
      }

      // pre process
      dfsPhase.preProcess(context);
      queryPhase.preProcess(context);
      fetchPhase.preProcess(context);

      // compute the context keep alive
      long keepAlive = defaultKeepAlive;
      if (request.scroll() != null && request.scroll().keepAlive() != null) {
        keepAlive = request.scroll().keepAlive().millis();
      }
      context.keepAlive(keepAlive);
    } catch (Throwable e) {
      context.close();
      throw ExceptionsHelper.convertToRuntime(e);
    }

    return context;
  }

  private void freeAllContextForIndex(Index index) {
    assert index != null;
    for (SearchContext ctx : activeContexts.values()) {
      if (index.equals(ctx.indexShard().shardId().index())) {
        freeContext(ctx.id());
      }
    }
  }

  public boolean freeContext(long id) {
    final SearchContext context = removeContext(id);
    if (context != null) {
      try {
        context.indexShard().searchService().onFreeContext(context);
      } finally {
        context.close();
      }
      return true;
    }
    return false;
  }

  public void freeAllScrollContexts() {
    for (SearchContext searchContext : activeContexts.values()) {
      if (searchContext.scroll() != null) {
        freeContext(searchContext.id());
      }
    }
  }

  private void contextProcessing(SearchContext context) {
    // disable timeout while executing a search
    context.accessed(-1);
  }

  private void contextProcessedSuccessfully(SearchContext context) {
    context.accessed(threadPool.estimatedTimeInMillis());
  }

  private void cleanContext(SearchContext context) {
    assert context == SearchContext.current();
    context.clearReleasables(Lifetime.PHASE);
    SearchContext.removeCurrent();
  }

  private void processFailure(SearchContext context, Throwable t) {
    freeContext(context.id());
    try {
      if (Lucene.isCorruptionException(t)) {
        context.indexShard().failShard("search execution corruption failure", t);
      }
    } catch (Throwable e) {
      logger.warn(
          "failed to process shard failure to (potentially) send back shard failure on corruption",
          e);
    }
  }

  private void parseTemplate(ShardSearchRequest request) {

    BytesReference processedQuery;
    if (request.template() != null) {
      ExecutableScript executable =
          this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH);
      processedQuery = (BytesReference) executable.run();
    } else {
      if (!hasLength(request.templateSource())) {
        return;
      }
      XContentParser parser = null;
      Template template = null;

      try {
        parser =
            XContentFactory.xContent(request.templateSource())
                .createParser(request.templateSource());
        template = TemplateQueryParser.parse(parser, "params", "template");

        if (template.getType() == ScriptService.ScriptType.INLINE) {
          // Try to double parse for nested template id/file
          parser = null;
          try {
            ExecutableScript executable =
                this.scriptService.executable(template, ScriptContext.Standard.SEARCH);
            processedQuery = (BytesReference) executable.run();
            parser = XContentFactory.xContent(processedQuery).createParser(processedQuery);
          } catch (ElasticsearchParseException epe) {
            // This was an non-nested template, the parse failure was due to this, it is safe to
            // assume this refers to a file
            // for backwards compatibility and keep going
            template =
                new Template(
                    template.getScript(),
                    ScriptService.ScriptType.FILE,
                    MustacheScriptEngineService.NAME,
                    null,
                    template.getParams());
            ExecutableScript executable =
                this.scriptService.executable(template, ScriptContext.Standard.SEARCH);
            processedQuery = (BytesReference) executable.run();
          }
          if (parser != null) {
            try {
              Template innerTemplate = TemplateQueryParser.parse(parser);
              if (hasLength(innerTemplate.getScript())
                  && !innerTemplate.getType().equals(ScriptService.ScriptType.INLINE)) {
                // An inner template referring to a filename or id
                template =
                    new Template(
                        innerTemplate.getScript(),
                        innerTemplate.getType(),
                        MustacheScriptEngineService.NAME,
                        null,
                        template.getParams());
                ExecutableScript executable =
                    this.scriptService.executable(template, ScriptContext.Standard.SEARCH);
                processedQuery = (BytesReference) executable.run();
              }
            } catch (ScriptParseException e) {
              // No inner template found, use original template from above
            }
          }
        } else {
          ExecutableScript executable =
              this.scriptService.executable(template, ScriptContext.Standard.SEARCH);
          processedQuery = (BytesReference) executable.run();
        }
      } catch (IOException e) {
        throw new ElasticsearchParseException("Failed to parse template", e);
      } finally {
        Releasables.closeWhileHandlingException(parser);
      }

      if (!hasLength(template.getScript())) {
        throw new ElasticsearchParseException("Template must have [template] field configured");
      }
    }
    request.source(processedQuery);
  }

  private void parseSource(SearchContext context, BytesReference source)
      throws SearchParseException {
    // nothing to parse...
    if (source == null || source.length() == 0) {
      return;
    }
    XContentParser parser = null;
    try {
      parser = XContentFactory.xContent(source).createParser(source);
      XContentParser.Token token;
      token = parser.nextToken();
      if (token != XContentParser.Token.START_OBJECT) {
        throw new ElasticsearchParseException(
            "Expected START_OBJECT but got " + token.name() + " " + parser.currentName());
      }
      while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
          String fieldName = parser.currentName();
          parser.nextToken();
          SearchParseElement element = elementParsers.get(fieldName);
          if (element == null) {
            throw new SearchParseException(
                context, "No parser for element [" + fieldName + "]", parser.getTokenLocation());
          }
          element.parse(parser, context);
        } else {
          if (token == null) {
            throw new ElasticsearchParseException(
                "End of query source reached but query is not complete.");
          } else {
            throw new ElasticsearchParseException(
                "Expected field name but got "
                    + token.name()
                    + " \""
                    + parser.currentName()
                    + "\"");
          }
        }
      }
    } catch (Throwable e) {
      String sSource = "_na_";
      try {
        sSource = XContentHelper.convertToJson(source, false);
      } catch (Throwable e1) {
        // ignore
      }
      throw new SearchParseException(
          context, "Failed to parse source [" + sSource + "]", parser.getTokenLocation(), e);
    } finally {
      if (parser != null) {
        parser.close();
      }
    }
  }

  private static final int[] EMPTY_DOC_IDS = new int[0];

  /**
   * Shortcut ids to load, we load only "from" and up to "size". The phase controller handles this
   * as well since the result is always size * shards for Q_A_F
   */
  private void shortcutDocIdsToLoad(SearchContext context) {
    if (context.request().scroll() != null) {
      TopDocs topDocs = context.queryResult().topDocs();
      int[] docIdsToLoad = new int[topDocs.scoreDocs.length];
      for (int i = 0; i < topDocs.scoreDocs.length; i++) {
        docIdsToLoad[i] = topDocs.scoreDocs[i].doc;
      }
      context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
    } else {
      TopDocs topDocs = context.queryResult().topDocs();
      if (topDocs.scoreDocs.length < context.from()) {
        // no more docs...
        context.docIdsToLoad(EMPTY_DOC_IDS, 0, 0);
        return;
      }
      int totalSize = context.from() + context.size();
      int[] docIdsToLoad =
          new int[Math.min(topDocs.scoreDocs.length - context.from(), context.size())];
      int counter = 0;
      for (int i = context.from(); i < totalSize; i++) {
        if (i < topDocs.scoreDocs.length) {
          docIdsToLoad[counter] = topDocs.scoreDocs[i].doc;
        } else {
          break;
        }
        counter++;
      }
      context.docIdsToLoad(docIdsToLoad, 0, counter);
    }
  }

  private void shortcutDocIdsToLoadForScanning(SearchContext context) {
    TopDocs topDocs = context.queryResult().topDocs();
    if (topDocs.scoreDocs.length == 0) {
      // no more docs...
      context.docIdsToLoad(EMPTY_DOC_IDS, 0, 0);
      return;
    }
    int[] docIdsToLoad = new int[topDocs.scoreDocs.length];
    for (int i = 0; i < docIdsToLoad.length; i++) {
      docIdsToLoad[i] = topDocs.scoreDocs[i].doc;
    }
    context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
  }

  private void processScroll(InternalScrollSearchRequest request, SearchContext context) {
    // process scroll
    context.from(context.from() + context.size());
    context.scroll(request.scroll());
    // update the context keep alive based on the new scroll value
    if (request.scroll() != null && request.scroll().keepAlive() != null) {
      context.keepAlive(request.scroll().keepAlive().millis());
    }
  }

  /** Returns the number of active contexts in this SearchService */
  public int getActiveContexts() {
    return this.activeContexts.size();
  }

  static class NormsWarmer extends IndicesWarmer.Listener {

    @Override
    public TerminationHandle warmNewReaders(
        final IndexShard indexShard,
        IndexMetaData indexMetaData,
        final WarmerContext context,
        ThreadPool threadPool) {
      final Loading defaultLoading =
          Loading.parse(indexMetaData.settings().get(NORMS_LOADING_KEY), Loading.LAZY);
      final MapperService mapperService = indexShard.mapperService();
      final ObjectSet<String> warmUp = new ObjectHashSet<>();
      for (DocumentMapper docMapper : mapperService.docMappers(false)) {
        for (FieldMapper fieldMapper : docMapper.mappers()) {
          final String indexName = fieldMapper.fieldType().names().indexName();
          Loading normsLoading = fieldMapper.fieldType().normsLoading();
          if (normsLoading == null) {
            normsLoading = defaultLoading;
          }
          if (fieldMapper.fieldType().indexOptions() != IndexOptions.NONE
              && !fieldMapper.fieldType().omitNorms()
              && normsLoading == Loading.EAGER) {
            warmUp.add(indexName);
          }
        }
      }

      final CountDownLatch latch = new CountDownLatch(1);
      // Norms loading may be I/O intensive but is not CPU intensive, so we execute it in a single
      // task
      threadPool
          .executor(executor())
          .execute(
              new Runnable() {
                @Override
                public void run() {
                  try {
                    for (ObjectCursor<String> stringObjectCursor : warmUp) {
                      final String indexName = stringObjectCursor.value;
                      final long start = System.nanoTime();
                      for (final LeafReaderContext ctx : context.searcher().reader().leaves()) {
                        final NumericDocValues values = ctx.reader().getNormValues(indexName);
                        if (values != null) {
                          values.get(0);
                        }
                      }
                      if (indexShard.warmerService().logger().isTraceEnabled()) {
                        indexShard
                            .warmerService()
                            .logger()
                            .trace(
                                "warmed norms for [{}], took [{}]",
                                indexName,
                                TimeValue.timeValueNanos(System.nanoTime() - start));
                      }
                    }
                  } catch (Throwable t) {
                    indexShard.warmerService().logger().warn("failed to warm-up norms", t);
                  } finally {
                    latch.countDown();
                  }
                }
              });

      return new TerminationHandle() {
        @Override
        public void awaitTermination() throws InterruptedException {
          latch.await();
        }
      };
    }

    @Override
    public TerminationHandle warmTopReader(
        IndexShard indexShard,
        IndexMetaData indexMetaData,
        WarmerContext context,
        ThreadPool threadPool) {
      return TerminationHandle.NO_WAIT;
    }
  }

  static class FieldDataWarmer extends IndicesWarmer.Listener {

    @Override
    public TerminationHandle warmNewReaders(
        final IndexShard indexShard,
        IndexMetaData indexMetaData,
        final WarmerContext context,
        ThreadPool threadPool) {
      final MapperService mapperService = indexShard.mapperService();
      final Map<String, MappedFieldType> warmUp = new HashMap<>();
      for (DocumentMapper docMapper : mapperService.docMappers(false)) {
        for (FieldMapper fieldMapper : docMapper.mappers()) {
          final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType();
          if (fieldDataType == null) {
            continue;
          }
          if (fieldDataType.getLoading() == Loading.LAZY) {
            continue;
          }

          final String indexName = fieldMapper.fieldType().names().indexName();
          if (warmUp.containsKey(indexName)) {
            continue;
          }
          warmUp.put(indexName, fieldMapper.fieldType());
        }
      }
      final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
      final Executor executor = threadPool.executor(executor());
      final CountDownLatch latch =
          new CountDownLatch(context.searcher().reader().leaves().size() * warmUp.size());
      for (final LeafReaderContext ctx : context.searcher().reader().leaves()) {
        for (final MappedFieldType fieldType : warmUp.values()) {
          executor.execute(
              new Runnable() {

                @Override
                public void run() {
                  try {
                    final long start = System.nanoTime();
                    indexFieldDataService.getForField(fieldType).load(ctx);
                    if (indexShard.warmerService().logger().isTraceEnabled()) {
                      indexShard
                          .warmerService()
                          .logger()
                          .trace(
                              "warmed fielddata for [{}], took [{}]",
                              fieldType.names().fullName(),
                              TimeValue.timeValueNanos(System.nanoTime() - start));
                    }
                  } catch (Throwable t) {
                    indexShard
                        .warmerService()
                        .logger()
                        .warn(
                            "failed to warm-up fielddata for [{}]",
                            t,
                            fieldType.names().fullName());
                  } finally {
                    latch.countDown();
                  }
                }
              });
        }
      }
      return new TerminationHandle() {
        @Override
        public void awaitTermination() throws InterruptedException {
          latch.await();
        }
      };
    }

    @Override
    public TerminationHandle warmTopReader(
        final IndexShard indexShard,
        IndexMetaData indexMetaData,
        final WarmerContext context,
        ThreadPool threadPool) {
      final MapperService mapperService = indexShard.mapperService();
      final Map<String, MappedFieldType> warmUpGlobalOrdinals = new HashMap<>();
      for (DocumentMapper docMapper : mapperService.docMappers(false)) {
        for (FieldMapper fieldMapper : docMapper.mappers()) {
          final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType();
          if (fieldDataType == null) {
            continue;
          }
          if (fieldDataType.getLoading() != Loading.EAGER_GLOBAL_ORDINALS) {
            continue;
          }
          final String indexName = fieldMapper.fieldType().names().indexName();
          if (warmUpGlobalOrdinals.containsKey(indexName)) {
            continue;
          }
          warmUpGlobalOrdinals.put(indexName, fieldMapper.fieldType());
        }
      }
      final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
      final Executor executor = threadPool.executor(executor());
      final CountDownLatch latch = new CountDownLatch(warmUpGlobalOrdinals.size());
      for (final MappedFieldType fieldType : warmUpGlobalOrdinals.values()) {
        executor.execute(
            new Runnable() {
              @Override
              public void run() {
                try {
                  final long start = System.nanoTime();
                  IndexFieldData.Global ifd = indexFieldDataService.getForField(fieldType);
                  ifd.loadGlobal(context.reader());
                  if (indexShard.warmerService().logger().isTraceEnabled()) {
                    indexShard
                        .warmerService()
                        .logger()
                        .trace(
                            "warmed global ordinals for [{}], took [{}]",
                            fieldType.names().fullName(),
                            TimeValue.timeValueNanos(System.nanoTime() - start));
                  }
                } catch (Throwable t) {
                  indexShard
                      .warmerService()
                      .logger()
                      .warn(
                          "failed to warm-up global ordinals for [{}]",
                          t,
                          fieldType.names().fullName());
                } finally {
                  latch.countDown();
                }
              }
            });
      }
      return new TerminationHandle() {
        @Override
        public void awaitTermination() throws InterruptedException {
          latch.await();
        }
      };
    }
  }

  class SearchWarmer extends IndicesWarmer.Listener {

    @Override
    public TerminationHandle warmNewReaders(
        IndexShard indexShard,
        IndexMetaData indexMetaData,
        WarmerContext context,
        ThreadPool threadPool) {
      return internalWarm(indexShard, indexMetaData, context, threadPool, false);
    }

    @Override
    public TerminationHandle warmTopReader(
        IndexShard indexShard,
        IndexMetaData indexMetaData,
        WarmerContext context,
        ThreadPool threadPool) {
      return internalWarm(indexShard, indexMetaData, context, threadPool, true);
    }

    public TerminationHandle internalWarm(
        final IndexShard indexShard,
        final IndexMetaData indexMetaData,
        final IndicesWarmer.WarmerContext warmerContext,
        ThreadPool threadPool,
        final boolean top) {
      IndexWarmersMetaData custom = indexMetaData.custom(IndexWarmersMetaData.TYPE);
      if (custom == null) {
        return TerminationHandle.NO_WAIT;
      }
      final Executor executor = threadPool.executor(executor());
      final CountDownLatch latch = new CountDownLatch(custom.entries().size());
      for (final IndexWarmersMetaData.Entry entry : custom.entries()) {
        executor.execute(
            new Runnable() {

              @Override
              public void run() {
                SearchContext context = null;
                try {
                  long now = System.nanoTime();
                  ShardSearchRequest request =
                      new ShardSearchLocalRequest(
                          indexShard.shardId(),
                          indexMetaData.numberOfShards(),
                          SearchType.QUERY_THEN_FETCH,
                          entry.source(),
                          entry.types(),
                          entry.queryCache());
                  context = createContext(request, warmerContext.searcher());
                  // if we use sort, we need to do query to sort on it and load relevant field data
                  // if not, we might as well set size=0 (and cache if needed)
                  if (context.sort() == null) {
                    context.size(0);
                  }
                  boolean canCache = indicesQueryCache.canCache(request, context);
                  // early terminate when we can cache, since we can only do proper caching on top
                  // level searcher
                  // also, if we can't cache, and its top, we don't need to execute it, since we
                  // already did when its not top
                  if (canCache != top) {
                    return;
                  }
                  loadOrExecuteQueryPhase(request, context, queryPhase);
                  long took = System.nanoTime() - now;
                  if (indexShard.warmerService().logger().isTraceEnabled()) {
                    indexShard
                        .warmerService()
                        .logger()
                        .trace(
                            "warmed [{}], took [{}]", entry.name(), TimeValue.timeValueNanos(took));
                  }
                } catch (Throwable t) {
                  indexShard.warmerService().logger().warn("warmer [{}] failed", t, entry.name());
                } finally {
                  try {
                    if (context != null) {
                      freeContext(context.id());
                      cleanContext(context);
                    }
                  } finally {
                    latch.countDown();
                  }
                }
              }
            });
      }
      return new TerminationHandle() {
        @Override
        public void awaitTermination() throws InterruptedException {
          latch.await();
        }
      };
    }
  }

  class Reaper implements Runnable {
    @Override
    public void run() {
      final long time = threadPool.estimatedTimeInMillis();
      for (SearchContext context : activeContexts.values()) {
        // Use the same value for both checks since lastAccessTime can
        // be modified by another thread between checks!
        final long lastAccessTime = context.lastAccessTime();
        if (lastAccessTime == -1l) { // its being processed or timeout is disabled
          continue;
        }
        if ((time - lastAccessTime > context.keepAlive())) {
          logger.debug(
              "freeing search context [{}], time [{}], lastAccessTime [{}], keepAlive [{}]",
              context.id(),
              time,
              lastAccessTime,
              context.keepAlive());
          freeContext(context.id());
        }
      }
    }
  }
}
public class TransportService extends AbstractLifecycleComponent<TransportService> {

  private final AtomicBoolean started = new AtomicBoolean(false);
  protected final Transport transport;
  protected final ThreadPool threadPool;

  volatile ImmutableMap<String, TransportRequestHandler> serverHandlers = ImmutableMap.of();
  final Object serverHandlersMutex = new Object();

  final ConcurrentMapLong<RequestHolder> clientHandlers =
      ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();

  final AtomicLong requestIds = new AtomicLong();

  final CopyOnWriteArrayList<TransportConnectionListener> connectionListeners =
      new CopyOnWriteArrayList<>();

  // An LRU (don't really care about concurrency here) that holds the latest timed out requests so
  // if they
  // do show up, we can print more descriptive information about them
  final Map<Long, TimeoutInfoHolder> timeoutInfoHandlers =
      Collections.synchronizedMap(
          new LinkedHashMap<Long, TimeoutInfoHolder>(100, .75F, true) {
            protected boolean removeEldestEntry(Map.Entry eldest) {
              return size() > 100;
            }
          });

  private final TransportService.Adapter adapter = new Adapter();

  public TransportService(Transport transport, ThreadPool threadPool) {
    this(EMPTY_SETTINGS, transport, threadPool);
  }

  @Inject
  public TransportService(Settings settings, Transport transport, ThreadPool threadPool) {
    super(settings);
    this.transport = transport;
    this.threadPool = threadPool;
  }

  @Override
  protected void doStart() throws ElasticsearchException {
    adapter.rxMetric.clear();
    adapter.txMetric.clear();
    transport.transportServiceAdapter(adapter);
    transport.start();
    if (transport.boundAddress() != null && logger.isInfoEnabled()) {
      logger.info("{}", transport.boundAddress());
    }
    boolean setStarted = started.compareAndSet(false, true);
    assert setStarted : "service was already started";
  }

  @Override
  protected void doStop() throws ElasticsearchException {
    final boolean setStopped = started.compareAndSet(true, false);
    assert setStopped : "service has already been stopped";
    try {
      transport.stop();
    } finally {
      // in case the transport is not connected to our local node (thus cleaned on node disconnect)
      // make sure to clean any leftover on going handles
      for (Map.Entry<Long, RequestHolder> entry : clientHandlers.entrySet()) {
        final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey());
        if (holderToNotify != null) {
          // callback that an exception happened, but on a different thread since we don't
          // want handlers to worry about stack overflows
          threadPool
              .generic()
              .execute(
                  new Runnable() {
                    @Override
                    public void run() {
                      holderToNotify
                          .handler()
                          .handleException(
                              new TransportException(
                                  "transport stopped, action: " + holderToNotify.action()));
                    }
                  });
        }
      }
    }
  }

  @Override
  protected void doClose() throws ElasticsearchException {
    transport.close();
  }

  public boolean addressSupported(Class<? extends TransportAddress> address) {
    return transport.addressSupported(address);
  }

  public TransportInfo info() {
    BoundTransportAddress boundTransportAddress = boundAddress();
    if (boundTransportAddress == null) {
      return null;
    }
    return new TransportInfo(boundTransportAddress);
  }

  public TransportStats stats() {
    return new TransportStats(
        transport.serverOpen(),
        adapter.rxMetric.count(),
        adapter.rxMetric.sum(),
        adapter.txMetric.count(),
        adapter.txMetric.sum());
  }

  public BoundTransportAddress boundAddress() {
    return transport.boundAddress();
  }

  public boolean nodeConnected(DiscoveryNode node) {
    return transport.nodeConnected(node);
  }

  public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
    transport.connectToNode(node);
  }

  public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
    transport.connectToNodeLight(node);
  }

  public void disconnectFromNode(DiscoveryNode node) {
    transport.disconnectFromNode(node);
  }

  public void addConnectionListener(TransportConnectionListener listener) {
    connectionListeners.add(listener);
  }

  public void removeConnectionListener(TransportConnectionListener listener) {
    connectionListeners.remove(listener);
  }

  public <T extends TransportResponse> TransportFuture<T> submitRequest(
      DiscoveryNode node,
      String action,
      TransportRequest request,
      TransportResponseHandler<T> handler)
      throws TransportException {
    return submitRequest(node, action, request, TransportRequestOptions.EMPTY, handler);
  }

  public <T extends TransportResponse> TransportFuture<T> submitRequest(
      DiscoveryNode node,
      String action,
      TransportRequest request,
      TransportRequestOptions options,
      TransportResponseHandler<T> handler)
      throws TransportException {
    PlainTransportFuture<T> futureHandler = new PlainTransportFuture<>(handler);
    sendRequest(node, action, request, options, futureHandler);
    return futureHandler;
  }

  public <T extends TransportResponse> void sendRequest(
      final DiscoveryNode node,
      final String action,
      final TransportRequest request,
      final TransportResponseHandler<T> handler) {
    sendRequest(node, action, request, TransportRequestOptions.EMPTY, handler);
  }

  public <T extends TransportResponse> void sendRequest(
      final DiscoveryNode node,
      final String action,
      final TransportRequest request,
      final TransportRequestOptions options,
      TransportResponseHandler<T> handler) {
    if (node == null) {
      throw new ElasticsearchIllegalStateException("can't send request to a null node");
    }
    final long requestId = newRequestId();
    TimeoutHandler timeoutHandler = null;
    try {
      clientHandlers.put(requestId, new RequestHolder<>(handler, node, action, timeoutHandler));
      if (started.get() == false) {
        // if we are not started the exception handling will remove the RequestHolder again and
        // calls the handler to notify the caller.
        // it will only notify if the toStop code hasn't done the work yet.
        throw new TransportException("TransportService is closed stopped can't send request");
      }
      if (options.timeout() != null) {
        timeoutHandler = new TimeoutHandler(requestId);
        timeoutHandler.future =
            threadPool.schedule(options.timeout(), ThreadPool.Names.GENERIC, timeoutHandler);
      }
      transport.sendRequest(node, requestId, action, request, options);
    } catch (final Throwable e) {
      // usually happen either because we failed to connect to the node
      // or because we failed serializing the message
      final RequestHolder holderToNotify = clientHandlers.remove(requestId);
      // if the scheduler raise a EsRejectedExecutionException (due to shutdown), we may have a
      // timeout handler, but no future
      if (timeoutHandler != null) {
        FutureUtils.cancel(timeoutHandler.future);
      }

      // If holderToNotify == null then handler has already been taken care of.
      if (holderToNotify != null) {
        // callback that an exception happened, but on a different thread since we don't
        // want handlers to worry about stack overflows
        final SendRequestTransportException sendRequestException =
            new SendRequestTransportException(node, action, e);
        threadPool
            .executor(ThreadPool.Names.GENERIC)
            .execute(
                new Runnable() {
                  @Override
                  public void run() {
                    holderToNotify.handler().handleException(sendRequestException);
                  }
                });
      }
    }
  }

  private long newRequestId() {
    return requestIds.getAndIncrement();
  }

  public TransportAddress[] addressesFromString(String address) throws Exception {
    return transport.addressesFromString(address);
  }

  public void registerHandler(String action, TransportRequestHandler handler) {
    synchronized (serverHandlersMutex) {
      TransportRequestHandler handlerReplaced = serverHandlers.get(action);
      serverHandlers = MapBuilder.newMapBuilder(serverHandlers).put(action, handler).immutableMap();
      if (handlerReplaced != null) {
        logger.warn(
            "Registered two transport handlers for action {}, handlers: {}, {}",
            action,
            handler,
            handlerReplaced);
      }
    }
  }

  public void removeHandler(String action) {
    synchronized (serverHandlersMutex) {
      serverHandlers = MapBuilder.newMapBuilder(serverHandlers).remove(action).immutableMap();
    }
  }

  protected TransportRequestHandler getHandler(String action) {
    return serverHandlers.get(action);
  }

  class Adapter implements TransportServiceAdapter {

    final MeanMetric rxMetric = new MeanMetric();
    final MeanMetric txMetric = new MeanMetric();

    @Override
    public void received(long size) {
      rxMetric.inc(size);
    }

    @Override
    public void sent(long size) {
      txMetric.inc(size);
    }

    @Override
    public TransportRequestHandler handler(String action, Version version) {
      return serverHandlers.get(ActionNames.incomingAction(action, version));
    }

    @Override
    public TransportResponseHandler remove(long requestId) {
      RequestHolder holder = clientHandlers.remove(requestId);
      if (holder == null) {
        // lets see if its in the timeout holder
        TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId);
        if (timeoutInfoHolder != null) {
          long time = System.currentTimeMillis();
          logger.warn(
              "Received response for a request that has timed out, sent [{}ms] ago, timed out [{}ms] ago, action [{}], node [{}], id [{}]",
              time - timeoutInfoHolder.sentTime(),
              time - timeoutInfoHolder.timeoutTime(),
              timeoutInfoHolder.action(),
              timeoutInfoHolder.node(),
              requestId);
        } else {
          logger.warn("Transport response handler not found of id [{}]", requestId);
        }
        return null;
      }
      holder.cancel();
      return holder.handler();
    }

    @Override
    public void raiseNodeConnected(final DiscoveryNode node) {
      threadPool
          .generic()
          .execute(
              new Runnable() {
                @Override
                public void run() {
                  for (TransportConnectionListener connectionListener : connectionListeners) {
                    connectionListener.onNodeConnected(node);
                  }
                }
              });
    }

    @Override
    public void raiseNodeDisconnected(final DiscoveryNode node) {
      try {
        for (final TransportConnectionListener connectionListener : connectionListeners) {
          threadPool
              .generic()
              .execute(
                  new Runnable() {
                    @Override
                    public void run() {
                      connectionListener.onNodeDisconnected(node);
                    }
                  });
        }
        for (Map.Entry<Long, RequestHolder> entry : clientHandlers.entrySet()) {
          RequestHolder holder = entry.getValue();
          if (holder.node().equals(node)) {
            final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey());
            if (holderToNotify != null) {
              // callback that an exception happened, but on a different thread since we don't
              // want handlers to worry about stack overflows
              threadPool
                  .generic()
                  .execute(
                      new Runnable() {
                        @Override
                        public void run() {
                          holderToNotify
                              .handler()
                              .handleException(
                                  new NodeDisconnectedException(node, holderToNotify.action()));
                        }
                      });
            }
          }
        }
      } catch (EsRejectedExecutionException ex) {
        logger.debug("Rejected execution on NodeDisconnected", ex);
      }
    }

    @Override
    public String action(String action, Version version) {
      return ActionNames.outgoingAction(action, version);
    }
  }

  class TimeoutHandler implements Runnable {

    private final long requestId;

    private final long sentTime = System.currentTimeMillis();

    ScheduledFuture future;

    TimeoutHandler(long requestId) {
      this.requestId = requestId;
    }

    public long sentTime() {
      return sentTime;
    }

    @Override
    public void run() {
      if (future.isCancelled()) {
        return;
      }
      final RequestHolder holder = clientHandlers.remove(requestId);
      if (holder != null) {
        // add it to the timeout information holder, in case we are going to get a response later
        long timeoutTime = System.currentTimeMillis();
        timeoutInfoHandlers.put(
            requestId,
            new TimeoutInfoHolder(holder.node(), holder.action(), sentTime, timeoutTime));
        holder
            .handler()
            .handleException(
                new ReceiveTimeoutTransportException(
                    holder.node(),
                    holder.action(),
                    "request_id ["
                        + requestId
                        + "] timed out after ["
                        + (timeoutTime - sentTime)
                        + "ms]"));
      }
    }
  }

  static class TimeoutInfoHolder {

    private final DiscoveryNode node;

    private final String action;

    private final long sentTime;

    private final long timeoutTime;

    TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) {
      this.node = node;
      this.action = action;
      this.sentTime = sentTime;
      this.timeoutTime = timeoutTime;
    }

    public DiscoveryNode node() {
      return node;
    }

    public String action() {
      return action;
    }

    public long sentTime() {
      return sentTime;
    }

    public long timeoutTime() {
      return timeoutTime;
    }
  }

  static class RequestHolder<T extends TransportResponse> {

    private final TransportResponseHandler<T> handler;

    private final DiscoveryNode node;

    private final String action;

    private final TimeoutHandler timeout;

    RequestHolder(
        TransportResponseHandler<T> handler,
        DiscoveryNode node,
        String action,
        TimeoutHandler timeout) {
      this.handler = handler;
      this.node = node;
      this.action = action;
      this.timeout = timeout;
    }

    public TransportResponseHandler<T> handler() {
      return handler;
    }

    public DiscoveryNode node() {
      return this.node;
    }

    public String action() {
      return this.action;
    }

    public void cancel() {
      if (timeout != null) {
        FutureUtils.cancel(timeout.future);
      }
    }
  }
}