public void testThreadContextRestored() throws Exception {
   String header = randomAsciiOfLength(5);
   threadPool.getThreadContext().putHeader("test", header);
   AtomicBoolean called = new AtomicBoolean();
   sourceWithMockedRemoteCall("start_ok.json")
       .doStart(
           r -> {
             assertEquals(header, threadPool.getThreadContext().getHeader("test"));
             called.set(true);
           });
   assertTrue(called.get());
 }
  public <T extends TransportResponse> void sendRequest(
      final DiscoveryNode node,
      final String action,
      final TransportRequest request,
      final TransportRequestOptions options,
      TransportResponseHandler<T> handler) {
    if (node == null) {
      throw new ElasticsearchIllegalStateException("can't send request to a null node");
    }
    final long requestId = newRequestId();
    TimeoutHandler timeoutHandler = null;
    try {
      clientHandlers.put(requestId, new RequestHolder<>(handler, node, action, timeoutHandler));
      if (started.get() == false) {
        // if we are not started the exception handling will remove the RequestHolder again and
        // calls the handler to notify the caller.
        // it will only notify if the toStop code hasn't done the work yet.
        throw new TransportException("TransportService is closed stopped can't send request");
      }
      if (options.timeout() != null) {
        timeoutHandler = new TimeoutHandler(requestId);
        timeoutHandler.future =
            threadPool.schedule(options.timeout(), ThreadPool.Names.GENERIC, timeoutHandler);
      }
      transport.sendRequest(node, requestId, action, request, options);
    } catch (final Throwable e) {
      // usually happen either because we failed to connect to the node
      // or because we failed serializing the message
      final RequestHolder holderToNotify = clientHandlers.remove(requestId);
      // if the scheduler raise a EsRejectedExecutionException (due to shutdown), we may have a
      // timeout handler, but no future
      if (timeoutHandler != null) {
        FutureUtils.cancel(timeoutHandler.future);
      }

      // If holderToNotify == null then handler has already been taken care of.
      if (holderToNotify != null) {
        // callback that an exception happened, but on a different thread since we don't
        // want handlers to worry about stack overflows
        final SendRequestTransportException sendRequestException =
            new SendRequestTransportException(node, action, e);
        threadPool
            .executor(ThreadPool.Names.GENERIC)
            .execute(
                new Runnable() {
                  @Override
                  public void run() {
                    holderToNotify.handler().handleException(sendRequestException);
                  }
                });
      }
    }
  }
 @Override
 public BloomFilter filter(IndexReader reader, String fieldName, boolean asyncLoad) {
   int currentNumDocs = reader.numDocs();
   if (currentNumDocs == 0) {
     return BloomFilter.EMPTY;
   }
   ConcurrentMap<String, BloomFilterEntry> fieldCache = cache.get(reader.getFieldCacheKey());
   if (fieldCache == null) {
     synchronized (creationMutex) {
       fieldCache = cache.get(reader.getFieldCacheKey());
       if (fieldCache == null) {
         fieldCache = ConcurrentCollections.newConcurrentMap();
         cache.put(reader.getFieldCacheKey(), fieldCache);
       }
     }
   }
   BloomFilterEntry filter = fieldCache.get(fieldName);
   if (filter == null) {
     synchronized (fieldCache) {
       filter = fieldCache.get(fieldName);
       if (filter == null) {
         filter = new BloomFilterEntry(reader.numDocs(), BloomFilter.NONE);
         filter.loading.set(true);
         fieldCache.put(fieldName, filter);
         // now, do the async load of it...
         BloomFilterLoader loader = new BloomFilterLoader(reader, fieldName);
         if (asyncLoad) {
           threadPool.cached().execute(loader);
         } else {
           loader.run();
           filter = fieldCache.get(fieldName);
         }
       }
     }
   }
   // if we too many deletes, we need to reload the bloom filter so it will be more effective
   if (filter.numDocs > 1000 && (currentNumDocs / filter.numDocs) < 0.6) {
     if (filter.loading.compareAndSet(false, true)) {
       // do the async loading
       BloomFilterLoader loader = new BloomFilterLoader(reader, fieldName);
       if (asyncLoad) {
         threadPool.cached().execute(loader);
       } else {
         loader.run();
         filter = fieldCache.get(fieldName);
       }
     }
   }
   return filter.filter;
 }
  public void testScalingExecutorType() throws InterruptedException {
    String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING);
    ThreadPool threadPool = null;
    try {
      Settings nodeSettings =
          Settings.builder()
              .put("threadpool." + threadPoolName + ".size", 10)
              .put("node.name", "testScalingExecutorType")
              .build();
      threadPool = new ThreadPool(nodeSettings);
      ClusterSettings clusterSettings =
          new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
      threadPool.setClusterSettings(clusterSettings);
      final int expectedMinimum = "generic".equals(threadPoolName) ? 4 : 1;
      assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedMinimum));
      assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10));
      final long expectedKeepAlive = "generic".equals(threadPoolName) ? 30 : 300;
      assertThat(
          info(threadPool, threadPoolName).getKeepAlive().seconds(), equalTo(expectedKeepAlive));
      assertEquals(
          info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING);
      assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));

      // Change settings that doesn't require pool replacement
      Executor oldExecutor = threadPool.executor(threadPoolName);
      clusterSettings.applySettings(
          Settings.builder()
              .put("threadpool." + threadPoolName + ".keep_alive", "10m")
              .put("threadpool." + threadPoolName + ".min", "2")
              .put("threadpool." + threadPoolName + ".size", "15")
              .build());
      assertEquals(
          info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING);
      assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(),
          equalTo(2));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(),
          equalTo(15));
      assertThat(info(threadPool, threadPoolName).getMin(), equalTo(2));
      assertThat(info(threadPool, threadPoolName).getMax(), equalTo(15));
      // Make sure keep alive value changed
      assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(10L));
      assertThat(
          ((EsThreadPoolExecutor) threadPool.executor(threadPoolName))
              .getKeepAliveTime(TimeUnit.MINUTES),
          equalTo(10L));
      assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor));
    } finally {
      terminateThreadPoolIfNeeded(threadPool);
    }
  }
 private synchronized void scheduleSnapshotIfNeeded() {
   if (!shardGateway.requiresSnapshot()) {
     return;
   }
   if (!shardGateway.requiresSnapshotScheduling()) {
     return;
   }
   if (!indexShard.routingEntry().primary()) {
     // we only do snapshotting on the primary shard
     return;
   }
   if (!indexShard.routingEntry().started()) {
     // we only schedule when the cluster assumes we have started
     return;
   }
   if (snapshotScheduleFuture != null) {
     // we are already scheduling this one, ignore
     return;
   }
   if (snapshotInterval.millis() != -1) {
     // we need to schedule snapshot
     if (logger.isDebugEnabled()) {
       logger.debug("scheduling snapshot every [{}]", snapshotInterval);
     }
     snapshotScheduleFuture =
         threadPool.schedule(snapshotInterval, ThreadPool.Names.SNAPSHOT, snapshotRunnable);
   }
 }
 @Override
 protected void doStop() throws ElasticsearchException {
   final boolean setStopped = started.compareAndSet(true, false);
   assert setStopped : "service has already been stopped";
   try {
     transport.stop();
   } finally {
     // in case the transport is not connected to our local node (thus cleaned on node disconnect)
     // make sure to clean any leftover on going handles
     for (Map.Entry<Long, RequestHolder> entry : clientHandlers.entrySet()) {
       final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey());
       if (holderToNotify != null) {
         // callback that an exception happened, but on a different thread since we don't
         // want handlers to worry about stack overflows
         threadPool
             .generic()
             .execute(
                 new Runnable() {
                   @Override
                   public void run() {
                     holderToNotify
                         .handler()
                         .handleException(
                             new TransportException(
                                 "transport stopped, action: " + holderToNotify.action()));
                   }
                 });
       }
     }
   }
 }
 private void handleTransportDisconnect(DiscoveryNode node) {
   if (!latestNodes.nodeExists(node.id())) {
     return;
   }
   NodeFD nodeFD = nodesFD.remove(node);
   if (nodeFD == null) {
     return;
   }
   if (!running) {
     return;
   }
   nodeFD.running = false;
   if (connectOnNetworkDisconnect) {
     try {
       transportService.connectToNode(node);
       nodesFD.put(node, new NodeFD());
       threadPool.schedule(pingInterval, ThreadPool.Names.SAME, new SendPingRequest(node));
     } catch (Exception e) {
       logger.trace("[node  ] [{}] transport disconnected (with verified connect)", node);
       notifyNodeFailure(node, "transport disconnected (with verified connect)");
     }
   } else {
     logger.trace("[node  ] [{}] transport disconnected", node);
     notifyNodeFailure(node, "transport disconnected");
   }
 }
示例#8
0
 @Inject
 public LocalTransport(
     Settings settings,
     ThreadPool threadPool,
     Version version,
     NamedWriteableRegistry namedWriteableRegistry,
     CircuitBreakerService circuitBreakerService) {
   super(settings);
   this.threadPool = threadPool;
   this.version = version;
   int workerCount =
       this.settings.getAsInt(
           TRANSPORT_LOCAL_WORKERS, EsExecutors.boundedNumberOfProcessors(settings));
   int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1);
   logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize);
   final ThreadFactory threadFactory =
       EsExecutors.daemonThreadFactory(this.settings, LOCAL_TRANSPORT_THREAD_NAME_PREFIX);
   this.workers =
       EsExecutors.newFixed(
           LOCAL_TRANSPORT_THREAD_NAME_PREFIX,
           workerCount,
           queueSize,
           threadFactory,
           threadPool.getThreadContext());
   this.namedWriteableRegistry = namedWriteableRegistry;
   this.circuitBreakerService = circuitBreakerService;
 }
 @AfterClass
 public static void stopThreadPool() {
   if (threadPool != null) {
     threadPool.shutdownNow();
     threadPool = null;
   }
 }
 AckCountDownListener(
     AckedClusterStateTaskListener ackedTaskListener,
     long clusterStateVersion,
     DiscoveryNodes nodes,
     ThreadPool threadPool) {
   this.ackedTaskListener = ackedTaskListener;
   this.clusterStateVersion = clusterStateVersion;
   this.nodes = nodes;
   int countDown = 0;
   for (DiscoveryNode node : nodes) {
     if (ackedTaskListener.mustAck(node)) {
       countDown++;
     }
   }
   // we always wait for at least 1 node (the master)
   countDown = Math.max(1, countDown);
   logger.trace(
       "expecting {} acknowledgements for cluster_state update (version: {})",
       countDown,
       clusterStateVersion);
   this.countDown = new CountDown(countDown);
   this.ackTimeoutCallback =
       threadPool.schedule(
           ackedTaskListener.ackTimeout(),
           ThreadPool.Names.GENERIC,
           new Runnable() {
             @Override
             public void run() {
               onTimeout();
             }
           });
 }
 protected void handleResponse(
     Channel channel, StreamInput buffer, final TransportResponseHandler handler) {
   final TransportResponse response = handler.newInstance();
   response.remoteAddress(
       new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress()));
   response.remoteAddress();
   try {
     response.readFrom(buffer);
   } catch (Throwable e) {
     handleException(
         handler,
         new TransportSerializationException(
             "Failed to deserialize response of type [" + response.getClass().getName() + "]", e));
     return;
   }
   try {
     if (handler.executor() == ThreadPool.Names.SAME) {
       //noinspection unchecked
       handler.handleResponse(response);
     } else {
       threadPool.executor(handler.executor()).execute(new ResponseHandler(handler, response));
     }
   } catch (Throwable e) {
     handleException(handler, new ResponseHandlerFailureTransportException(e));
   }
 }
  protected String handleRequest(
      Channel channel, StreamInput buffer, long requestId, Version version) throws IOException {
    final String action = buffer.readString();

    final NettyTransportChannel transportChannel =
        new NettyTransportChannel(transport, action, channel, requestId, version);
    try {
      final TransportRequestHandler handler = transportServiceAdapter.handler(action);
      if (handler == null) {
        throw new ActionNotFoundTransportException(action);
      }
      final TransportRequest request = handler.newInstance();
      request.remoteAddress(
          new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress()));
      request.readFrom(buffer);
      if (handler.executor() == ThreadPool.Names.SAME) {
        //noinspection unchecked
        handler.messageReceived(request, transportChannel);
      } else {
        threadPool
            .executor(handler.executor())
            .execute(new RequestHandler(handler, request, transportChannel, action));
      }
    } catch (Throwable e) {
      try {
        transportChannel.sendResponse(e);
      } catch (IOException e1) {
        logger.warn("Failed to send error message back to client for action [" + action + "]", e);
        logger.warn("Actual Exception", e1);
      }
    }
    return action;
  }
 private static void execute(
     Settings settings,
     BiFunction<Runnable, TimeValue, ScheduledFuture<?>> scheduler,
     Consumer<Throwable> consumer,
     boolean constructionShouldFail,
     Runnable asserts)
     throws InterruptedException {
   assert constructionShouldFail == (consumer != null);
   assert constructionShouldFail == (asserts == null);
   ThreadPool threadPool = null;
   try {
     threadPool =
         new ThreadPool(JvmGcMonitorServiceSettingsTests.class.getCanonicalName()) {
           @Override
           public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, TimeValue interval) {
             return scheduler.apply(command, interval);
           }
         };
     try {
       JvmGcMonitorService service = new JvmGcMonitorService(settings, threadPool);
       if (constructionShouldFail) {
         fail("construction of jvm gc service should have failed");
       }
       service.doStart();
       asserts.run();
       service.doStop();
     } catch (Throwable t) {
       consumer.accept(t);
     }
   } finally {
     ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
   }
 }
  @Inject
  public TransportClientNodesService(
      Settings settings,
      ClusterName clusterName,
      TransportService transportService,
      ThreadPool threadPool,
      Headers headers,
      Version version) {
    super(settings);
    this.clusterName = clusterName;
    this.transportService = transportService;
    this.threadPool = threadPool;
    this.minCompatibilityVersion = version.minimumCompatibilityVersion();
    this.headers = headers;

    this.nodesSamplerInterval =
        this.settings.getAsTime("client.transport.nodes_sampler_interval", timeValueSeconds(5));
    this.pingTimeout =
        this.settings.getAsTime("client.transport.ping_timeout", timeValueSeconds(5)).millis();
    this.ignoreClusterName =
        this.settings.getAsBoolean("client.transport.ignore_cluster_name", false);

    if (logger.isDebugEnabled()) {
      logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
    }

    if (this.settings.getAsBoolean("client.transport.sniff", false)) {
      this.nodesSampler = new SniffNodesSampler();
    } else {
      this.nodesSampler = new SimpleNodeSampler();
    }
    this.nodesSamplerFuture =
        threadPool.schedule(
            nodesSamplerInterval, ThreadPool.Names.GENERIC, new ScheduledNodeSampler());
  }
  @AfterMethod
  public void tearDown() {
    serviceA.close();
    serviceB.close();

    threadPool.shutdown();
  }
 private void handleException(final TransportResponseHandler handler, Throwable error) {
   if (!(error instanceof RemoteTransportException)) {
     error = new RemoteTransportException(error.getMessage(), error);
   }
   final RemoteTransportException rtx = (RemoteTransportException) error;
   if (handler.executor() == ThreadPool.Names.SAME) {
     try {
       handler.handleException(rtx);
     } catch (Throwable e) {
       logger.error("failed to handle exception response [{}]", e, handler);
     }
   } else {
     threadPool
         .executor(handler.executor())
         .execute(
             new Runnable() {
               @Override
               public void run() {
                 try {
                   handler.handleException(rtx);
                 } catch (Throwable e) {
                   logger.error("failed to handle exception response [{}]", e, handler);
                 }
               }
             });
   }
 }
 @Override
 public void close(final boolean delete) {
   try {
     Set<Integer> shardIds = shardIds();
     final CountDownLatch latch = new CountDownLatch(shardIds.size());
     for (final int shardId : shardIds) {
       threadPool
           .cached()
           .execute(
               new Runnable() {
                 @Override
                 public void run() {
                   try {
                     deleteShard(shardId, delete, !delete, delete);
                   } catch (Exception e) {
                     logger.warn("failed to close shard, delete [{}]", e, delete);
                   } finally {
                     latch.countDown();
                   }
                 }
               });
     }
     try {
       latch.await();
     } catch (InterruptedException e) {
       throw new ElasticSearchInterruptedException(
           "interrupted closing index [ " + index().name() + "]", e);
     }
   } finally {
     indicesLifecycle.removeListener(cleanCacheOnIndicesLifecycleListener);
   }
 }
示例#18
0
 @After
 public void tearDown() throws Exception {
   super.tearDown();
   serviceA.close();
   serviceB.close();
   threadPool.shutdown();
 }
 @Override
 protected void doStart() throws ElasticSearchException {
   this.clusterState = newClusterStateBuilder().blocks(initialBlocks).build();
   this.updateTasksExecutor =
       newSingleThreadExecutor(daemonThreadFactory(settings, "clusterService#updateTask"));
   this.reconnectToNodes =
       threadPool.scheduleWithFixedDelay(new ReconnectToNodes(), reconnectInterval);
 }
    /** Builds a new instance of the transport client. */
    public TransportClient build() {
      Settings settings = InternalSettingsPreparer.prepareSettings(this.settings);
      settings =
          settingsBuilder()
              .put(
                  NettyTransport.PING_SCHEDULE,
                  "5s") // enable by default the transport schedule ping interval
              .put(settings)
              .put("network.server", false)
              .put("node.client", true)
              .put(CLIENT_TYPE_SETTING, CLIENT_TYPE)
              .build();

      PluginsService pluginsService = new PluginsService(settings, null, null, pluginClasses);
      this.settings = pluginsService.updatedSettings();

      Version version = Version.CURRENT;

      final ThreadPool threadPool = new ThreadPool(settings);
      final NetworkService networkService = new NetworkService(settings);
      final SettingsFilter settingsFilter = new SettingsFilter(settings);
      NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
      boolean success = false;
      try {
        ModulesBuilder modules = new ModulesBuilder();
        modules.add(new Version.Module(version));
        // plugin modules must be added here, before others or we can get crazy injection errors...
        for (Module pluginModule : pluginsService.nodeModules()) {
          modules.add(pluginModule);
        }
        modules.add(new PluginsModule(pluginsService));
        modules.add(new SettingsModule(this.settings, settingsFilter));
        modules.add(new NetworkModule(networkService, this.settings, true, namedWriteableRegistry));
        modules.add(new ClusterNameModule(this.settings));
        modules.add(new ThreadPoolModule(threadPool));
        modules.add(
            new SearchModule(settings, namedWriteableRegistry) {
              @Override
              protected void configure() {
                // noop
              }
            });
        modules.add(new ActionModule(true));
        modules.add(new CircuitBreakerModule(this.settings));

        pluginsService.processModules(modules);

        Injector injector = modules.createInjector();
        injector.getInstance(TransportService.class).start();
        TransportClient transportClient = new TransportClient(injector);
        success = true;
        return transportClient;
      } finally {
        if (!success) {
          ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
        }
      }
    }
示例#21
0
  @Inject
  public SearchService(
      Settings settings,
      ClusterService clusterService,
      IndicesService indicesService,
      IndicesWarmer indicesWarmer,
      ThreadPool threadPool,
      ScriptService scriptService,
      PageCacheRecycler pageCacheRecycler,
      BigArrays bigArrays,
      DfsPhase dfsPhase,
      QueryPhase queryPhase,
      FetchPhase fetchPhase,
      IndicesQueryCache indicesQueryCache) {
    super(settings);
    this.threadPool = threadPool;
    this.clusterService = clusterService;
    this.indicesService = indicesService;
    indicesService
        .indicesLifecycle()
        .addListener(
            new IndicesLifecycle.Listener() {

              @Override
              public void afterIndexDeleted(Index index, @IndexSettings Settings indexSettings) {
                // once an index is closed we can just clean up all the pending search context
                // information
                // to release memory and let references to the filesystem go etc.
                freeAllContextForIndex(index);
              }
            });
    this.indicesWarmer = indicesWarmer;
    this.scriptService = scriptService;
    this.pageCacheRecycler = pageCacheRecycler;
    this.bigArrays = bigArrays;
    this.dfsPhase = dfsPhase;
    this.queryPhase = queryPhase;
    this.fetchPhase = fetchPhase;
    this.indicesQueryCache = indicesQueryCache;

    TimeValue keepAliveInterval = settings.getAsTime(KEEPALIVE_INTERVAL_KEY, timeValueMinutes(1));
    // we can have 5 minutes here, since we make sure to clean with search requests and when
    // shard/index closes
    this.defaultKeepAlive = settings.getAsTime(DEFAULT_KEEPALIVE_KEY, timeValueMinutes(5)).millis();

    Map<String, SearchParseElement> elementParsers = new HashMap<>();
    elementParsers.putAll(dfsPhase.parseElements());
    elementParsers.putAll(queryPhase.parseElements());
    elementParsers.putAll(fetchPhase.parseElements());
    elementParsers.put("stats", new StatsGroupsParseElement());
    this.elementParsers = ImmutableMap.copyOf(elementParsers);

    this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval);

    this.indicesWarmer.addListener(new NormsWarmer());
    this.indicesWarmer.addListener(new FieldDataWarmer());
    this.indicesWarmer.addListener(new SearchWarmer());
  }
 private synchronized void onTaskCompletion() {
   if (mustReschedule()) {
     indexService.logger.debug("scheduling {} every {}", toString(), interval);
     this.scheduledFuture = threadPool.schedule(interval, getThreadPool(), BaseAsyncTask.this);
   } else {
     indexService.logger.debug("scheduled {} disabled", toString());
     this.scheduledFuture = null;
   }
 }
  public static void main(String[] args) throws Exception {
    ShardId shardId = new ShardId(new Index("index"), 1);
    Settings settings = EMPTY_SETTINGS;

    //        Store store = new RamStore(shardId, settings);
    Store store = new ByteBufferStore(shardId, settings, null, new ByteBufferCache(settings));
    //        Store store = new NioFsStore(shardId, settings);

    store.deleteContent();

    ThreadPool threadPool = new ScalingThreadPool();
    SnapshotDeletionPolicy deletionPolicy =
        new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, settings));
    Engine engine =
        new RobinEngine(
            shardId,
            settings,
            store,
            deletionPolicy,
            new MemoryTranslog(shardId, settings),
            new LogByteSizeMergePolicyProvider(store),
            new ConcurrentMergeSchedulerProvider(shardId, settings),
            new AnalysisService(shardId.index()),
            new SimilarityService(shardId.index()));
    engine.start();

    SimpleEngineBenchmark benchmark =
        new SimpleEngineBenchmark(store, engine)
            .numberOfContentItems(1000)
            .searcherThreads(50)
            .searcherIterations(10000)
            .writerThreads(10)
            .writerIterations(10000)
            .refreshSchedule(new TimeValue(1, TimeUnit.SECONDS))
            .flushSchedule(new TimeValue(1, TimeUnit.MINUTES))
            .create(false)
            .build();

    benchmark.run();

    engine.close();
    store.close();
    threadPool.shutdown();
  }
示例#24
0
 IndexWarmer(Settings settings, ThreadPool threadPool, Listener... listeners) {
   super(settings);
   ArrayList<Listener> list = new ArrayList<>();
   final Executor executor = threadPool.executor(ThreadPool.Names.WARMER);
   list.add(new FieldDataWarmer(executor));
   for (Listener listener : listeners) {
     list.add(listener);
   }
   this.listeners = Collections.unmodifiableList(list);
 }
  public void testIndexingThreadPoolsMaxSize() throws InterruptedException {
    String threadPoolName = randomThreadPoolName();
    for (String name : new String[] {ThreadPool.Names.BULK, ThreadPool.Names.INDEX}) {
      ThreadPool threadPool = null;
      try {

        int maxSize = EsExecutors.boundedNumberOfProcessors(Settings.EMPTY);

        // try to create a too-big (maxSize+1) thread pool
        threadPool =
            new ThreadPool(
                Settings.builder()
                    .put("node.name", "testIndexingThreadPoolsMaxSize")
                    .put("threadpool." + name + ".size", maxSize + 1)
                    .build());

        // confirm it clipped us at the maxSize:
        assertEquals(
            maxSize, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize());

        ClusterSettings clusterSettings =
            new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
        threadPool.setClusterSettings(clusterSettings);

        // update it to a tiny size:
        clusterSettings.applySettings(
            Settings.builder().put("threadpool." + name + ".size", 1).build());

        // confirm it worked:
        assertEquals(1, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize());

        // try to update to too-big size:
        clusterSettings.applySettings(
            Settings.builder().put("threadpool." + name + ".size", maxSize + 1).build());

        // confirm it clipped us at the maxSize:
        assertEquals(
            maxSize, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize());
      } finally {
        terminateThreadPoolIfNeeded(threadPool);
      }
    }
  }
    @Override
    public void clusterChanged(ClusterChangedEvent event) {
      if (!master && event.localNodeMaster()) {
        master = true;
        for (LocalNodeMasterListener listener : listeners) {
          Executor executor = threadPool.executor(listener.executorName());
          executor.execute(new OnMasterRunnable(listener));
        }
        return;
      }

      if (master && !event.localNodeMaster()) {
        master = false;
        for (LocalNodeMasterListener listener : listeners) {
          Executor executor = threadPool.executor(listener.executorName());
          executor.execute(new OffMasterRunnable(listener));
        }
      }
    }
 public void startRecovery(
     final IndexShard indexShard,
     final DiscoveryNode sourceNode,
     final RecoveryListener listener) {
   // create a new recovery status, and process...
   final long recoveryId =
       onGoingRecoveries.startRecovery(
           indexShard, sourceNode, listener, recoverySettings.activityTimeout());
   threadPool.generic().execute(new RecoveryRunner(recoveryId));
 }
  public void testUpdateSettingsCanNotChangeThreadPoolType() throws InterruptedException {
    String threadPoolName = randomThreadPoolName();
    ThreadPool.ThreadPoolType invalidThreadPoolType = randomIncorrectThreadPoolType(threadPoolName);
    ThreadPool.ThreadPoolType validThreadPoolType =
        ThreadPool.THREAD_POOL_TYPES.get(threadPoolName);
    ThreadPool threadPool = null;
    try {
      threadPool =
          new ThreadPool(
              Settings.builder()
                  .put("node.name", "testUpdateSettingsCanNotChangeThreadPoolType")
                  .build());
      ClusterSettings clusterSettings =
          new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
      threadPool.setClusterSettings(clusterSettings);

      clusterSettings.applySettings(
          Settings.builder()
              .put("threadpool." + threadPoolName + ".type", invalidThreadPoolType.getType())
              .build());
      fail("expected IllegalArgumentException");
    } catch (IllegalArgumentException e) {
      assertEquals(
          "illegal value can't update [threadpool.] from [{}] to [{"
              + threadPoolName
              + ".type="
              + invalidThreadPoolType.getType()
              + "}]",
          e.getMessage());
      assertThat(
          e.getCause().getMessage(),
          is(
              "setting threadpool."
                  + threadPoolName
                  + ".type to "
                  + invalidThreadPoolType.getType()
                  + " is not permitted; must be "
                  + validThreadPoolType.getType()));
    } finally {
      terminateThreadPoolIfNeeded(threadPool);
    }
  }
示例#29
0
  private void performStateRecovery(boolean enforceRecoverAfterTime, String reason) {
    final Gateway.GatewayStateRecoveredListener recoveryListener = new GatewayRecoveryListener();

    if (enforceRecoverAfterTime && recoverAfterTime != null) {
      if (scheduledRecovery.compareAndSet(false, true)) {
        logger.info("delaying initial state recovery for [{}]. {}", recoverAfterTime, reason);
        threadPool.schedule(
            recoverAfterTime,
            ThreadPool.Names.GENERIC,
            () -> {
              if (recovered.compareAndSet(false, true)) {
                logger.info(
                    "recover_after_time [{}] elapsed. performing state recovery...",
                    recoverAfterTime);
                gateway.performStateRecovery(recoveryListener);
              }
            });
      }
    } else {
      if (recovered.compareAndSet(false, true)) {
        threadPool
            .generic()
            .execute(
                new AbstractRunnable() {
                  @Override
                  public void onFailure(Exception e) {
                    logger.warn("Recovery failed", e);
                    // we reset `recovered` in the listener don't reset it here otherwise there
                    // might be a race
                    // that resets it to false while a new recover is already running?
                    recoveryListener.onFailure("state recovery failed: " + e.getMessage());
                  }

                  @Override
                  protected void doRun() throws Exception {
                    gateway.performStateRecovery(recoveryListener);
                  }
                });
      }
    }
  }
  @AfterMethod
  public void tearDown() throws Exception {
    replicaEngine.close();
    storeReplica.close();

    engine.close();
    store.close();

    if (threadPool != null) {
      threadPool.shutdownNow();
    }
  }