/** * Returns a Thread pool for the RPC's to region replicas. Similar to Connection's thread pool. */ private ExecutorService getDefaultThreadPool(Configuration conf) { int maxThreads = conf.getInt("hbase.region.replica.replication.threads.max", 256); int coreThreads = conf.getInt("hbase.region.replica.replication.threads.core", 16); if (maxThreads == 0) { maxThreads = Runtime.getRuntime().availableProcessors() * 8; } if (coreThreads == 0) { coreThreads = Runtime.getRuntime().availableProcessors() * 8; } long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60); LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>( maxThreads * conf.getInt( HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); ThreadPoolExecutor tpe = new ThreadPoolExecutor( coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory(this.getClass().getSimpleName() + "-rpc-shared-")); tpe.allowCoreThreadTimeOut(true); return tpe; }
private void addExecutorForVolume(final File volume) { ThreadFactory threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = new Thread(threadGroup, r); t.setName("Async RamDisk lazy persist worker for volume " + volume); return t; } }; ThreadPoolExecutor executor = new ThreadPoolExecutor( CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME, THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); // This can reduce the number of running threads executor.allowCoreThreadTimeOut(true); executors.put(volume, executor); }
static { if (maxTasks > 120) maxTasks = 120; if (OSValidator.isWindows() && maxTasks > 10) maxTasks = 10; SDFSLogger.getLog().info("WriteCacheBuffer Pool List Size will be " + maxTasks); worksQueue = new LinkedBlockingQueue<Runnable>(maxTasks); executor = new ThreadPoolExecutor( Main.writeThreads, Main.writeThreads, 10, TimeUnit.SECONDS, worksQueue, executionHandler); lworksQueue = new LinkedBlockingQueue<Runnable>(maxTasks); lexecutor = new ThreadPoolExecutor( Main.writeThreads, Main.writeThreads, 10, TimeUnit.SECONDS, lworksQueue, lexecutionHandler); executor.allowCoreThreadTimeOut(true); }
private void createBackgroundOperationPool() { int poolSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS); LOG.info("HiveServer2: Background operation thread pool size: " + poolSize); int poolQueueSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE); LOG.info("HiveServer2: Background operation thread wait queue size: " + poolQueueSize); long keepAliveTime = HiveConf.getTimeVar( hiveConf, ConfVars.HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME, TimeUnit.SECONDS); LOG.info( "HiveServer2: Background operation thread keepalive time: " + keepAliveTime + " seconds"); // Create a thread pool with #poolSize threads // Threads terminate when they are idle for more than the keepAliveTime // A bounded blocking queue is used to queue incoming operations, if #operations > poolSize String threadPoolName = "HiveServer2-Background-Pool"; backgroundOperationPool = new ThreadPoolExecutor( poolSize, poolSize, keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(poolQueueSize), new ThreadFactoryWithGarbageCleanup(threadPoolName)); backgroundOperationPool.allowCoreThreadTimeOut(true); checkInterval = HiveConf.getTimeVar( hiveConf, ConfVars.HIVE_SERVER2_SESSION_CHECK_INTERVAL, TimeUnit.MILLISECONDS); sessionTimeout = HiveConf.getTimeVar( hiveConf, ConfVars.HIVE_SERVER2_IDLE_SESSION_TIMEOUT, TimeUnit.MILLISECONDS); }
public static void setNumThreads(int numThreads) { cleanupService(); _numThreads = numThreads; ThreadPoolExecutor pool = new ThreadPoolExecutor( numThreads, numThreads, 1L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); pool.allowCoreThreadTimeOut(true); _service = pool; }
public AbstractRestore( IConfiguration config, IBackupFileSystem fs, String name, Sleeper sleeper) { super(config); this.config = config; this.fs = fs; this.sleeper = sleeper; executor = new NamedThreadPoolExecutor(config.getMaxBackupDownloadThreads(), name); executor.allowCoreThreadTimeOut(true); }
/** * Creates a new {@link ThreadPoolExecutor} ready to carry out work. All pools are pre-started by * default and will terminate after not receiving work for the argued timeout value. * * @param poolName the name of this thread pool. * @param poolSize the size of this thread pool. * @param poolPriority the priority of this thread pool. * @param timeout how long in minutes it takes for threads in this pool to timeout. * @return the newly constructed thread pool. */ public static ThreadPoolExecutor createThreadPool( String poolName, int poolSize, int poolPriority, long timeout) { ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(poolSize); threadPool.setThreadFactory(new ThreadProvider(poolName, poolPriority, true)); threadPool.setRejectedExecutionHandler(new ThreadPoolRejectedExecutionHook()); threadPool.setKeepAliveTime(timeout, TimeUnit.MINUTES); threadPool.allowCoreThreadTimeOut(true); threadPool.prestartAllCoreThreads(); return threadPool; }
public static void initBIO(int aMaxThreads) { bio = new ThreadPoolExecutor( aMaxThreads, aMaxThreads, 1L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new DeamonThreadFactory("platypus-abio-", false)); bio.allowCoreThreadTimeOut(true); }
private ExecutorService createScanExecutor(Id.Stream streamId) { ThreadFactory threadFactory = Threads.newDaemonThreadFactory( String.format( "stream-%s-%s-consumer-scanner-", streamId.getNamespaceId(), streamId.getId())); ThreadPoolExecutor executor = new ThreadPoolExecutor( 1, 20, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), threadFactory); executor.allowCoreThreadTimeOut(true); return executor; }
public ActorPool(int poolSize) { if (poolSize > 0) { inbox = new LinkedBlockingQueue<Runnable>(); ThreadPoolExecutor pool = new ThreadPoolExecutor(poolSize, poolSize, keepAliveTime, TimeUnit.MILLISECONDS, inbox); pool.allowCoreThreadTimeOut(true); this.executor = pool; } else { inbox = null; executor = null; } }
static { final BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(); final ThreadFactory threadFactory = new JBossThreadFactory( new ThreadGroup("cli-remoting"), Boolean.FALSE, null, "%G - %t", null, null, doPrivileged(GetAccessControlContextAction.getInstance())); executorService = new ThreadPoolExecutor(2, 4, 60L, TimeUnit.SECONDS, workQueue, threadFactory); // Allow the core threads to time out as well executorService.allowCoreThreadTimeOut(true); try { endpoint = Remoting.createEndpoint("cli-client", OptionMap.EMPTY); endpoint.addConnectionProvider( "remote", new RemoteConnectionProviderFactory(), OptionMap.EMPTY); endpoint.addConnectionProvider( "http-remoting", new HttpUpgradeConnectionProviderFactory(), OptionMap.create(Options.SSL_ENABLED, Boolean.FALSE)); endpoint.addConnectionProvider( "https-remoting", new HttpUpgradeConnectionProviderFactory(), OptionMap.create(Options.SSL_ENABLED, Boolean.TRUE)); } catch (IOException e) { throw new IllegalStateException("Failed to create remoting endpoint", e); } CliShutdownHook.add( new CliShutdownHook.Handler() { @Override public void shutdown() { executorService.shutdown(); try { executorService.awaitTermination(1, TimeUnit.SECONDS); } catch (InterruptedException e) { } try { endpoint.close(); } catch (IOException e) { } } }); }
private static ExecutorService buildDownloadExecutor() { final int maxConcurrent = 5; // FIXME transfermanager // final int maxConcurrent = Resources.getSystem().getInteger( // com.android.internal.R.integer.config_MaxConcurrentDownloadsAllowed); // Create a bounded thread pool for executing downloads; it creates // threads as needed (up to maximum) and reclaims them when finished. final ThreadPoolExecutor executor = new ThreadPoolExecutor( maxConcurrent, maxConcurrent, 10, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); executor.allowCoreThreadTimeOut(true); return executor; }
protected ThreadPoolExecutor createNewThreadPoolService(Configuration conf) { int nThreads = conf.getInt( YarnConfiguration.RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT, YarnConfiguration.DEFAULT_RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT); ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("DelegationTokenRenewer #%d").build(); ThreadPoolExecutor pool = new ThreadPoolExecutor( (5 < nThreads ? 5 : nThreads), nThreads, 3L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); pool.setThreadFactory(tf); pool.allowCoreThreadTimeOut(true); return pool; }
private void configure(final String name) { /* * All executors should be shutdown on application shutdown. */ ShutdownHookManager.register(shutdownHook); /* * All threads should stop after 60 seconds of idle time */ delegate.setKeepAliveTime( FIXED_THREAD_KEEPALIVE_TIMEOUT.longValue(), FIXED_THREAD_KEEPALIVE_TIMEOUT.getTimeUnit()); /* * Fixes non starting with corepoolsize von 0 and not filled queue (Java Conurrency In Practice Chapter 8.3.1). * If this bug can occur, a exception would be thrown here. */ delegate.allowCoreThreadTimeOut(true); /* * Named threads improve debugging. */ delegate.setThreadFactory(new WrappedThreadFactory(name, delegate.getThreadFactory())); }
// Copied from ConnectionImplementation.getBatchPool() // We should get rid of this when Connection.processBatchCallback is un-deprecated and provides // an API to manage a batch pool private void createBatchPool(Configuration conf) { // Use the same config for keep alive as in ConnectionImplementation.getBatchPool(); int maxThreads = conf.getInt("hbase.multihconnection.threads.max", 256); if (maxThreads == 0) { maxThreads = Runtime.getRuntime().availableProcessors() * 8; } long keepAliveTime = conf.getLong("hbase.multihconnection.threads.keepalivetime", 60); LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>( maxThreads * conf.getInt( HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); ThreadPoolExecutor tpe = new ThreadPoolExecutor( maxThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory("MultiHConnection" + "-shared-")); tpe.allowCoreThreadTimeOut(true); this.batchPool = tpe; }
static { MESSAGE_SYNC_THREAD_POOL.allowCoreThreadTimeOut(true); }
static { threadPool.allowCoreThreadTimeOut(true); }
/** * @param args the command line arguments * @throws IOException * @throws Exception */ public static void main(String[] args) throws IOException, Exception { checkUserHome(); GeneralResourceProvider.registerDrivers(); parseArgs(args); if (url == null || url.isEmpty()) { throw new IllegalArgumentException("Application url (-url parameter) is required."); } SSLContext sslContext = PlatypusConnection.createSSLContext(); ScriptedDatabasesClient serverCoreDbClient; if (url.toLowerCase().startsWith("file")) { File f = new File(new URI(url)); if (f.exists() && f.isDirectory()) { Logger.getLogger(ServerMain.class.getName()) .log(Level.INFO, "Application is located at: {0}", f.getPath()); GeneralResourceProvider.registerDrivers(); ScriptsConfigs scriptsConfigs = new ScriptsConfigs(); ServerTasksScanner tasksScanner = new ServerTasksScanner(scriptsConfigs); ApplicationSourceIndexer indexer = new ApplicationSourceIndexer(f.getPath(), tasksScanner); // TODO: add command line argument "watch" after watcher refactoring // indexer.watch(); Scripts.initBIO(threadsConfig.getMaxServicesTreads()); int maxWorkerThreads = parseNumWorkerThreads(); ThreadPoolExecutor serverProcessor = new ThreadPoolExecutor( maxWorkerThreads, maxWorkerThreads, 3L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new DeamonThreadFactory("TSA-", false)); serverProcessor.allowCoreThreadTimeOut(true); Scripts.initTasks( (Runnable aTask) -> { serverProcessor.submit(aTask); }); serverCoreDbClient = new ScriptedDatabasesClient( defDatasource, indexer, true, tasksScanner.getValidators(), threadsConfig.getMaxJdbcTreads()); QueriesProxy<SqlQuery> queries = new LocalQueriesProxy(serverCoreDbClient, indexer); serverCoreDbClient.setQueries(queries); PlatypusServer server = new PlatypusServer( indexer, new LocalModulesProxy(indexer, new ModelsDocuments(), appElement), queries, serverCoreDbClient, sslContext, parseListenAddresses(), parsePortsProtocols(), parsePortsSessionIdleTimeouts(), parsePortsSessionIdleCheckIntervals(), serverProcessor, scriptsConfigs, appElement); serverCoreDbClient.setContextHost(server); ScriptedResource.init(server, ScriptedResource.lookupPlatypusJs()); SensorsFactory.init(server.getAcceptorsFactory()); RetranslateFactory.init(server.getRetranslateFactory()); // server.start(tasksScanner.getResidents(), tasksScanner.getAcceptors()); } else { throw new IllegalArgumentException( "applicationUrl: " + url + " doesn't point to existent directory."); } } else { throw new Exception("Unknown protocol in url: " + url); } }
/** * Creates a handler with the specified input URL, max thread count, and message backlog support. * * @param inputUrl - the URL provided by Loggly for sending log messages to * @param maxThreads - the max number of concurrent background threads that are allowed to send * data to Loggly * @param backlog - the max number of log messages that can be queued up (anything beyond will be * thrown away) */ public LogglyHandler(String inputUrl, int maxThreads, int backlog) { this.inputUrl = inputUrl; pool = new ThreadPoolExecutor( maxThreads, maxThreads, 60L, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(backlog), new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread thread = new Thread(r, "Loggly Thread"); thread.setDaemon(true); return thread; } }, new ThreadPoolExecutor.DiscardOldestPolicy()); pool.allowCoreThreadTimeOut(true); retryQueue = new LinkedBlockingQueue<LogglySample>(backlog); Thread retryThread = new Thread( new Runnable() { @Override public void run() { while (allowRetry) { // drain the retry requests LogglySample sample = null; while ((sample = retryQueue.poll()) != null) { if (sample.retryCount > 10) { // todo: capture statistics about the failure (exception and/or status code) // and then report on it in some sort of thoughtful way to standard err } else { pool.submit(sample); } } // retry every 10 seconds try { Thread.sleep(10000); } catch (InterruptedException e) { System.err.println("Retry sleep was interrupted, giving up on retry thread"); return; } } } }, "Loggly Retry Thread"); retryThread.setDaemon(true); retryThread.start(); HttpParams params = new BasicHttpParams(); ConnManagerParams.setMaxTotalConnections(params, maxThreads); ConnPerRouteBean connPerRoute = new ConnPerRouteBean(maxThreads); ConnManagerParams.setMaxConnectionsPerRoute(params, connPerRoute); // set 15 second timeouts, since Loggly should return quickly params.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, 15000); params.setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 15000); SchemeRegistry registry = new SchemeRegistry(); try { registry.register(new Scheme("https", SSLSocketFactory.getSocketFactory(), 443)); } catch (Exception e) { throw new RuntimeException("Could not register SSL socket factor for Loggly", e); } ThreadSafeClientConnManager connManager = new ThreadSafeClientConnManager(params, registry); httpClient = new DefaultHttpClient(connManager, params); // because the threads a daemon threads, we want to give them a chance // to finish up before we totally shut down Runtime.getRuntime() .addShutdownHook( new Thread( new Runnable() { @Override public void run() { close(); } })); }