protected String handleRequest(
      Channel channel, StreamInput buffer, long requestId, Version version) throws IOException {
    final String action = buffer.readString();

    final NettyTransportChannel transportChannel =
        new NettyTransportChannel(transport, action, channel, requestId, version);
    try {
      final TransportRequestHandler handler = transportServiceAdapter.handler(action);
      if (handler == null) {
        throw new ActionNotFoundTransportException(action);
      }
      final TransportRequest request = handler.newInstance();
      request.remoteAddress(
          new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress()));
      request.readFrom(buffer);
      if (handler.executor() == ThreadPool.Names.SAME) {
        //noinspection unchecked
        handler.messageReceived(request, transportChannel);
      } else {
        threadPool
            .executor(handler.executor())
            .execute(new RequestHandler(handler, request, transportChannel, action));
      }
    } catch (Throwable e) {
      try {
        transportChannel.sendResponse(e);
      } catch (IOException e1) {
        logger.warn("Failed to send error message back to client for action [" + action + "]", e);
        logger.warn("Actual Exception", e1);
      }
    }
    return action;
  }
 protected void handleResponse(
     Channel channel, StreamInput buffer, final TransportResponseHandler handler) {
   final TransportResponse response = handler.newInstance();
   response.remoteAddress(
       new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress()));
   response.remoteAddress();
   try {
     response.readFrom(buffer);
   } catch (Throwable e) {
     handleException(
         handler,
         new TransportSerializationException(
             "Failed to deserialize response of type [" + response.getClass().getName() + "]", e));
     return;
   }
   try {
     if (handler.executor() == ThreadPool.Names.SAME) {
       //noinspection unchecked
       handler.handleResponse(response);
     } else {
       threadPool.executor(handler.executor()).execute(new ResponseHandler(handler, response));
     }
   } catch (Throwable e) {
     handleException(handler, new ResponseHandlerFailureTransportException(e));
   }
 }
 private void handleException(final TransportResponseHandler handler, Throwable error) {
   if (!(error instanceof RemoteTransportException)) {
     error = new RemoteTransportException(error.getMessage(), error);
   }
   final RemoteTransportException rtx = (RemoteTransportException) error;
   if (handler.executor() == ThreadPool.Names.SAME) {
     try {
       handler.handleException(rtx);
     } catch (Throwable e) {
       logger.error("failed to handle exception response [{}]", e, handler);
     }
   } else {
     threadPool
         .executor(handler.executor())
         .execute(
             new Runnable() {
               @Override
               public void run() {
                 try {
                   handler.handleException(rtx);
                 } catch (Throwable e) {
                   logger.error("failed to handle exception response [{}]", e, handler);
                 }
               }
             });
   }
 }
    @Override
    public void clusterChanged(ClusterChangedEvent event) {
      if (!master && event.localNodeMaster()) {
        master = true;
        for (LocalNodeMasterListener listener : listeners) {
          Executor executor = threadPool.executor(listener.executorName());
          executor.execute(new OnMasterRunnable(listener));
        }
        return;
      }

      if (master && !event.localNodeMaster()) {
        master = false;
        for (LocalNodeMasterListener listener : listeners) {
          Executor executor = threadPool.executor(listener.executorName());
          executor.execute(new OffMasterRunnable(listener));
        }
      }
    }
 IndexWarmer(Settings settings, ThreadPool threadPool, Listener... listeners) {
   super(settings);
   ArrayList<Listener> list = new ArrayList<>();
   final Executor executor = threadPool.executor(ThreadPool.Names.WARMER);
   list.add(new FieldDataWarmer(executor));
   for (Listener listener : listeners) {
     list.add(listener);
   }
   this.listeners = Collections.unmodifiableList(list);
 }
  public void testIndexingThreadPoolsMaxSize() throws InterruptedException {
    String threadPoolName = randomThreadPoolName();
    for (String name : new String[] {ThreadPool.Names.BULK, ThreadPool.Names.INDEX}) {
      ThreadPool threadPool = null;
      try {

        int maxSize = EsExecutors.boundedNumberOfProcessors(Settings.EMPTY);

        // try to create a too-big (maxSize+1) thread pool
        threadPool =
            new ThreadPool(
                Settings.builder()
                    .put("node.name", "testIndexingThreadPoolsMaxSize")
                    .put("threadpool." + name + ".size", maxSize + 1)
                    .build());

        // confirm it clipped us at the maxSize:
        assertEquals(
            maxSize, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize());

        ClusterSettings clusterSettings =
            new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
        threadPool.setClusterSettings(clusterSettings);

        // update it to a tiny size:
        clusterSettings.applySettings(
            Settings.builder().put("threadpool." + name + ".size", 1).build());

        // confirm it worked:
        assertEquals(1, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize());

        // try to update to too-big size:
        clusterSettings.applySettings(
            Settings.builder().put("threadpool." + name + ".size", maxSize + 1).build());

        // confirm it clipped us at the maxSize:
        assertEquals(
            maxSize, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize());
      } finally {
        terminateThreadPoolIfNeeded(threadPool);
      }
    }
  }
  public void testShutdownNowInterrupts() throws Exception {
    String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED);
    ThreadPool threadPool = null;
    try {
      Settings nodeSettings =
          Settings.builder()
              .put("threadpool." + threadPoolName + ".queue_size", 1000)
              .put("node.name", "testShutdownNowInterrupts")
              .build();
      threadPool = new ThreadPool(nodeSettings);
      ClusterSettings clusterSettings =
          new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
      threadPool.setClusterSettings(clusterSettings);
      assertEquals(info(threadPool, threadPoolName).getQueueSize().getSingles(), 1000L);

      final CountDownLatch latch = new CountDownLatch(1);
      ThreadPoolExecutor oldExecutor = (ThreadPoolExecutor) threadPool.executor(threadPoolName);
      threadPool
          .executor(threadPoolName)
          .execute(
              () -> {
                try {
                  new CountDownLatch(1).await();
                } catch (InterruptedException ex) {
                  latch.countDown();
                  Thread.currentThread().interrupt();
                }
              });
      clusterSettings.applySettings(
          Settings.builder().put("threadpool." + threadPoolName + ".queue_size", 2000).build());
      assertThat(threadPool.executor(threadPoolName), not(sameInstance(oldExecutor)));
      assertThat(oldExecutor.isShutdown(), equalTo(true));
      assertThat(oldExecutor.isTerminating(), equalTo(true));
      assertThat(oldExecutor.isTerminated(), equalTo(false));
      threadPool.shutdownNow(); // should interrupt the thread
      latch.await(
          3, TimeUnit.SECONDS); // If this throws then ThreadPool#shutdownNow didn't interrupt
    } finally {
      terminateThreadPoolIfNeeded(threadPool);
    }
  }
  public <T extends TransportResponse> void sendRequest(
      final DiscoveryNode node,
      final String action,
      final TransportRequest request,
      final TransportRequestOptions options,
      TransportResponseHandler<T> handler) {
    if (node == null) {
      throw new ElasticsearchIllegalStateException("can't send request to a null node");
    }
    final long requestId = newRequestId();
    TimeoutHandler timeoutHandler = null;
    try {
      clientHandlers.put(requestId, new RequestHolder<>(handler, node, action, timeoutHandler));
      if (started.get() == false) {
        // if we are not started the exception handling will remove the RequestHolder again and
        // calls the handler to notify the caller.
        // it will only notify if the toStop code hasn't done the work yet.
        throw new TransportException("TransportService is closed stopped can't send request");
      }
      if (options.timeout() != null) {
        timeoutHandler = new TimeoutHandler(requestId);
        timeoutHandler.future =
            threadPool.schedule(options.timeout(), ThreadPool.Names.GENERIC, timeoutHandler);
      }
      transport.sendRequest(node, requestId, action, request, options);
    } catch (final Throwable e) {
      // usually happen either because we failed to connect to the node
      // or because we failed serializing the message
      final RequestHolder holderToNotify = clientHandlers.remove(requestId);
      // if the scheduler raise a EsRejectedExecutionException (due to shutdown), we may have a
      // timeout handler, but no future
      if (timeoutHandler != null) {
        FutureUtils.cancel(timeoutHandler.future);
      }

      // If holderToNotify == null then handler has already been taken care of.
      if (holderToNotify != null) {
        // callback that an exception happened, but on a different thread since we don't
        // want handlers to worry about stack overflows
        final SendRequestTransportException sendRequestException =
            new SendRequestTransportException(node, action, e);
        threadPool
            .executor(ThreadPool.Names.GENERIC)
            .execute(
                new Runnable() {
                  @Override
                  public void run() {
                    holderToNotify.handler().handleException(sendRequestException);
                  }
                });
      }
    }
  }
 protected void handleParsedResponse(
     final TransportResponse response, final TransportResponseHandler handler) {
   threadPool
       .executor(handler.executor())
       .execute(
           () -> {
             try {
               handler.handleResponse(response);
             } catch (Throwable e) {
               handleException(handler, new ResponseHandlerFailureTransportException(e));
             }
           });
 }
  public void testScalingExecutorType() throws InterruptedException {
    String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING);
    ThreadPool threadPool = null;
    try {
      Settings nodeSettings =
          Settings.builder()
              .put("threadpool." + threadPoolName + ".size", 10)
              .put("node.name", "testScalingExecutorType")
              .build();
      threadPool = new ThreadPool(nodeSettings);
      ClusterSettings clusterSettings =
          new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
      threadPool.setClusterSettings(clusterSettings);
      final int expectedMinimum = "generic".equals(threadPoolName) ? 4 : 1;
      assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedMinimum));
      assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10));
      final long expectedKeepAlive = "generic".equals(threadPoolName) ? 30 : 300;
      assertThat(
          info(threadPool, threadPoolName).getKeepAlive().seconds(), equalTo(expectedKeepAlive));
      assertEquals(
          info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING);
      assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));

      // Change settings that doesn't require pool replacement
      Executor oldExecutor = threadPool.executor(threadPoolName);
      clusterSettings.applySettings(
          Settings.builder()
              .put("threadpool." + threadPoolName + ".keep_alive", "10m")
              .put("threadpool." + threadPoolName + ".min", "2")
              .put("threadpool." + threadPoolName + ".size", "15")
              .build());
      assertEquals(
          info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING);
      assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(),
          equalTo(2));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(),
          equalTo(15));
      assertThat(info(threadPool, threadPoolName).getMin(), equalTo(2));
      assertThat(info(threadPool, threadPoolName).getMax(), equalTo(15));
      // Make sure keep alive value changed
      assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(10L));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName))
              .getKeepAliveTime(TimeUnit.MINUTES),
          equalTo(10L));
      assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor));
    } finally {
      terminateThreadPoolIfNeeded(threadPool);
    }
  }
  public void testFixedExecutorType() throws InterruptedException {
    String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED);
    ThreadPool threadPool = null;

    try {
      Settings nodeSettings = Settings.builder().put("node.name", "testFixedExecutorType").build();
      threadPool = new ThreadPool(nodeSettings);
      ClusterSettings clusterSettings =
          new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
      threadPool.setClusterSettings(clusterSettings);
      assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
      Settings settings =
          clusterSettings.applySettings(
              Settings.builder().put("threadpool." + threadPoolName + ".size", "15").build());

      int expectedSize = getExpectedThreadPoolSize(nodeSettings, threadPoolName, 15);
      assertEquals(
          info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED);
      assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(),
          equalTo(expectedSize));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(),
          equalTo(expectedSize));
      assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedSize));
      assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize));
      // keep alive does not apply to fixed thread pools
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName))
              .getKeepAliveTime(TimeUnit.MINUTES),
          equalTo(0L));

      // Put old type back
      settings = clusterSettings.applySettings(Settings.EMPTY);
      assertEquals(
          info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED);
      // Make sure keep alive value is not used
      assertThat(info(threadPool, threadPoolName).getKeepAlive(), nullValue());
      // Make sure keep pool size value were reused
      assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedSize));
      assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize));
      assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(),
          equalTo(expectedSize));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(),
          equalTo(expectedSize));

      // Change size
      Executor oldExecutor = threadPool.executor(threadPoolName);
      settings =
          clusterSettings.applySettings(
              Settings.builder()
                  .put(settings)
                  .put("threadpool." + threadPoolName + ".size", "10")
                  .build());

      expectedSize = getExpectedThreadPoolSize(nodeSettings, threadPoolName, 10);

      // Make sure size values changed
      assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize));
      assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedSize));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(),
          equalTo(expectedSize));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(),
          equalTo(expectedSize));
      // Make sure executor didn't change
      assertEquals(
          info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED);
      assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor));

      // Change queue capacity
      clusterSettings.applySettings(
          Settings.builder()
              .put(settings)
              .put("threadpool." + threadPoolName + ".queue", "500")
              .build());
    } finally {
      terminateThreadPoolIfNeeded(threadPool);
    }
  }
  @Test(timeout = 20000)
  public void testUpdatingThreadPoolSettings() throws Exception {
    internalCluster().startNodesAsync(2).get();
    ThreadPool threadPool = internalCluster().getDataNodeInstance(ThreadPool.class);
    // Check that settings are changed
    assertThat(
        ((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES),
        equalTo(5L));
    client()
        .admin()
        .cluster()
        .prepareUpdateSettings()
        .setTransientSettings(settingsBuilder().put("threadpool.search.keep_alive", "10m").build())
        .execute()
        .actionGet();
    assertThat(
        ((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES),
        equalTo(10L));

    // Make sure that threads continue executing when executor is replaced
    final CyclicBarrier barrier = new CyclicBarrier(2);
    Executor oldExecutor = threadPool.executor(Names.SEARCH);
    threadPool
        .executor(Names.SEARCH)
        .execute(
            new Runnable() {
              @Override
              public void run() {
                try {
                  barrier.await();
                } catch (InterruptedException ex) {
                  Thread.currentThread().interrupt();
                } catch (BrokenBarrierException ex) {
                  //
                }
              }
            });
    client()
        .admin()
        .cluster()
        .prepareUpdateSettings()
        .setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build())
        .execute()
        .actionGet();
    assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor)));
    assertThat(((ThreadPoolExecutor) oldExecutor).isShutdown(), equalTo(true));
    assertThat(((ThreadPoolExecutor) oldExecutor).isTerminating(), equalTo(true));
    assertThat(((ThreadPoolExecutor) oldExecutor).isTerminated(), equalTo(false));
    barrier.await();

    // Make sure that new thread executor is functional
    threadPool
        .executor(Names.SEARCH)
        .execute(
            new Runnable() {
              @Override
              public void run() {
                try {
                  barrier.await();
                } catch (InterruptedException ex) {
                  Thread.currentThread().interrupt();
                } catch (BrokenBarrierException ex) {
                  //
                }
              }
            });
    client()
        .admin()
        .cluster()
        .prepareUpdateSettings()
        .setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build())
        .execute()
        .actionGet();
    barrier.await();
    Thread.sleep(200);

    // Check that node info is correct
    NodesInfoResponse nodesInfoResponse =
        client().admin().cluster().prepareNodesInfo().all().execute().actionGet();
    for (int i = 0; i < 2; i++) {
      NodeInfo nodeInfo = nodesInfoResponse.getNodes()[i];
      boolean found = false;
      for (ThreadPool.Info info : nodeInfo.getThreadPool()) {
        if (info.getName().equals(Names.SEARCH)) {
          assertThat(info.getType(), equalTo("fixed"));
          found = true;
          break;
        }
      }
      assertThat(found, equalTo(true));

      Map<String, Object> poolMap =
          getPoolSettingsThroughJson(nodeInfo.getThreadPool(), Names.SEARCH);
    }
  }
    @Override
    public TerminationHandle warmNewReaders(
        final IndexShard indexShard,
        IndexMetaData indexMetaData,
        final WarmerContext context,
        ThreadPool threadPool) {
      final Loading defaultLoading =
          Loading.parse(indexMetaData.settings().get(NORMS_LOADING_KEY), Loading.LAZY);
      final MapperService mapperService = indexShard.mapperService();
      final ObjectSet<String> warmUp = new ObjectHashSet<>();
      for (DocumentMapper docMapper : mapperService.docMappers(false)) {
        for (FieldMapper fieldMapper : docMapper.mappers()) {
          final String indexName = fieldMapper.fieldType().names().indexName();
          Loading normsLoading = fieldMapper.fieldType().normsLoading();
          if (normsLoading == null) {
            normsLoading = defaultLoading;
          }
          if (fieldMapper.fieldType().indexOptions() != IndexOptions.NONE
              && !fieldMapper.fieldType().omitNorms()
              && normsLoading == Loading.EAGER) {
            warmUp.add(indexName);
          }
        }
      }

      final CountDownLatch latch = new CountDownLatch(1);
      // Norms loading may be I/O intensive but is not CPU intensive, so we execute it in a single
      // task
      threadPool
          .executor(executor())
          .execute(
              new Runnable() {
                @Override
                public void run() {
                  try {
                    for (ObjectCursor<String> stringObjectCursor : warmUp) {
                      final String indexName = stringObjectCursor.value;
                      final long start = System.nanoTime();
                      for (final LeafReaderContext ctx : context.searcher().reader().leaves()) {
                        final NumericDocValues values = ctx.reader().getNormValues(indexName);
                        if (values != null) {
                          values.get(0);
                        }
                      }
                      if (indexShard.warmerService().logger().isTraceEnabled()) {
                        indexShard
                            .warmerService()
                            .logger()
                            .trace(
                                "warmed norms for [{}], took [{}]",
                                indexName,
                                TimeValue.timeValueNanos(System.nanoTime() - start));
                      }
                    }
                  } catch (Throwable t) {
                    indexShard.warmerService().logger().warn("failed to warm-up norms", t);
                  } finally {
                    latch.countDown();
                  }
                }
              });

      return new TerminationHandle() {
        @Override
        public void awaitTermination() throws InterruptedException {
          latch.await();
        }
      };
    }
    public TerminationHandle internalWarm(
        final IndexShard indexShard,
        final IndexMetaData indexMetaData,
        final IndicesWarmer.WarmerContext warmerContext,
        ThreadPool threadPool,
        final boolean top) {
      IndexWarmersMetaData custom = indexMetaData.custom(IndexWarmersMetaData.TYPE);
      if (custom == null) {
        return TerminationHandle.NO_WAIT;
      }
      final Executor executor = threadPool.executor(executor());
      final CountDownLatch latch = new CountDownLatch(custom.entries().size());
      for (final IndexWarmersMetaData.Entry entry : custom.entries()) {
        executor.execute(
            new Runnable() {

              @Override
              public void run() {
                SearchContext context = null;
                try {
                  long now = System.nanoTime();
                  ShardSearchRequest request =
                      new ShardSearchLocalRequest(
                          indexShard.shardId(),
                          indexMetaData.numberOfShards(),
                          SearchType.QUERY_THEN_FETCH,
                          entry.source(),
                          entry.types(),
                          entry.queryCache());
                  context = createContext(request, warmerContext.searcher());
                  // if we use sort, we need to do query to sort on it and load relevant field data
                  // if not, we might as well set size=0 (and cache if needed)
                  if (context.sort() == null) {
                    context.size(0);
                  }
                  boolean canCache = indicesQueryCache.canCache(request, context);
                  // early terminate when we can cache, since we can only do proper caching on top
                  // level searcher
                  // also, if we can't cache, and its top, we don't need to execute it, since we
                  // already did when its not top
                  if (canCache != top) {
                    return;
                  }
                  loadOrExecuteQueryPhase(request, context, queryPhase);
                  long took = System.nanoTime() - now;
                  if (indexShard.warmerService().logger().isTraceEnabled()) {
                    indexShard
                        .warmerService()
                        .logger()
                        .trace(
                            "warmed [{}], took [{}]", entry.name(), TimeValue.timeValueNanos(took));
                  }
                } catch (Throwable t) {
                  indexShard.warmerService().logger().warn("warmer [{}] failed", t, entry.name());
                } finally {
                  try {
                    if (context != null) {
                      freeContext(context.id());
                      cleanContext(context);
                    }
                  } finally {
                    latch.countDown();
                  }
                }
              }
            });
      }
      return new TerminationHandle() {
        @Override
        public void awaitTermination() throws InterruptedException {
          latch.await();
        }
      };
    }
 @Override
 public TerminationHandle warmTopReader(
     final IndexShard indexShard,
     IndexMetaData indexMetaData,
     final WarmerContext context,
     ThreadPool threadPool) {
   final MapperService mapperService = indexShard.mapperService();
   final Map<String, MappedFieldType> warmUpGlobalOrdinals = new HashMap<>();
   for (DocumentMapper docMapper : mapperService.docMappers(false)) {
     for (FieldMapper fieldMapper : docMapper.mappers()) {
       final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType();
       if (fieldDataType == null) {
         continue;
       }
       if (fieldDataType.getLoading() != Loading.EAGER_GLOBAL_ORDINALS) {
         continue;
       }
       final String indexName = fieldMapper.fieldType().names().indexName();
       if (warmUpGlobalOrdinals.containsKey(indexName)) {
         continue;
       }
       warmUpGlobalOrdinals.put(indexName, fieldMapper.fieldType());
     }
   }
   final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
   final Executor executor = threadPool.executor(executor());
   final CountDownLatch latch = new CountDownLatch(warmUpGlobalOrdinals.size());
   for (final MappedFieldType fieldType : warmUpGlobalOrdinals.values()) {
     executor.execute(
         new Runnable() {
           @Override
           public void run() {
             try {
               final long start = System.nanoTime();
               IndexFieldData.Global ifd = indexFieldDataService.getForField(fieldType);
               ifd.loadGlobal(context.reader());
               if (indexShard.warmerService().logger().isTraceEnabled()) {
                 indexShard
                     .warmerService()
                     .logger()
                     .trace(
                         "warmed global ordinals for [{}], took [{}]",
                         fieldType.names().fullName(),
                         TimeValue.timeValueNanos(System.nanoTime() - start));
               }
             } catch (Throwable t) {
               indexShard
                   .warmerService()
                   .logger()
                   .warn(
                       "failed to warm-up global ordinals for [{}]",
                       t,
                       fieldType.names().fullName());
             } finally {
               latch.countDown();
             }
           }
         });
   }
   return new TerminationHandle() {
     @Override
     public void awaitTermination() throws InterruptedException {
       latch.await();
     }
   };
 }
  private void handleRequest(
      StreamInput stream,
      long requestId,
      int messageLengthBytes,
      LocalTransport sourceTransport,
      Version version)
      throws Exception {
    stream = new NamedWriteableAwareStreamInput(stream, namedWriteableRegistry);
    final String action = stream.readString();
    transportServiceAdapter.onRequestReceived(requestId, action);
    inFlightRequestsBreaker()
        .addEstimateBytesAndMaybeBreak(messageLengthBytes, "<transport_request>");
    final LocalTransportChannel transportChannel =
        new LocalTransportChannel(
            this,
            transportServiceAdapter,
            sourceTransport,
            action,
            requestId,
            version,
            messageLengthBytes);
    try {
      final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action);
      if (reg == null) {
        throw new ActionNotFoundTransportException("Action [" + action + "] not found");
      }
      final TransportRequest request = reg.newRequest();
      request.remoteAddress(sourceTransport.boundAddress.publishAddress());
      request.readFrom(stream);
      if (ThreadPool.Names.SAME.equals(reg.getExecutor())) {
        //noinspection unchecked
        reg.processMessageReceived(request, transportChannel);
      } else {
        threadPool
            .executor(reg.getExecutor())
            .execute(
                new AbstractRunnable() {
                  @Override
                  protected void doRun() throws Exception {
                    //noinspection unchecked
                    reg.processMessageReceived(request, transportChannel);
                  }

                  @Override
                  public boolean isForceExecution() {
                    return reg.isForceExecution();
                  }

                  @Override
                  public void onFailure(Throwable e) {
                    if (lifecycleState() == Lifecycle.State.STARTED) {
                      // we can only send a response transport is started....
                      try {
                        transportChannel.sendResponse(e);
                      } catch (Throwable e1) {
                        logger.warn(
                            "Failed to send error message back to client for action [{}]",
                            e1,
                            action);
                        logger.warn("Actual Exception", e);
                      }
                    }
                  }
                });
      }
    } catch (Throwable e) {
      try {
        transportChannel.sendResponse(e);
      } catch (Throwable e1) {
        logger.warn("Failed to send error message back to client for action [{}]", e, action);
        logger.warn("Actual Exception", e1);
      }
    }
  }