public void cancel(Runnable runnable) { if (threadPoolExecutor != null && !threadPoolExecutor.isShutdown() && !threadPoolExecutor.isTerminated()) { threadPoolExecutor.remove(runnable); } }
/** * Method that will be executed for each rejected task * * @param r Task that has been rejected * @param executor Executor that has rejected the task */ @Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { System.out.printf("RejectedTaskController: The task %s has been rejected\n", r.toString()); System.out.printf("RejectedTaskController: %s\n", executor.toString()); System.out.printf("RejectedTaskController: Terminating: %s\n", executor.isTerminating()); System.out.printf("RejectedTaksController: Terminated: %s\n", executor.isTerminated()); }
/** @see de.willuhn.jameica.messaging.MessagingQueue#flush() */ public void flush() { if (pool.isTerminated()) return; try { while (messages != null && messages.size() > 0) Thread.sleep(5); } catch (Exception e) { Logger.error("unable to flush queue", e); } }
/** * @see * de.willuhn.jameica.messaging.MessagingQueue#sendSyncMessage(de.willuhn.jameica.messaging.Message) */ public void sendSyncMessage(Message message) { if (message == null || pool.isTerminating() || pool.isTerminated()) return; if (this.consumers.size() == 0) { // Das ist bewusst Debug-Level weil das durchaus vorkommen kann. Logger.debug("no message consumers found, ignoring message"); return; } deliver(message); }
/** * @see * de.willuhn.jameica.messaging.MessagingQueue#sendMessage(de.willuhn.jameica.messaging.Message) */ public void sendMessage(final Message message) { if (message == null || pool.isTerminating() || pool.isTerminated()) return; if (this.consumers.size() == 0) { // Das ist bewusst Debug-Level weil das durchaus vorkommen kann. Logger.debug("no message consumers found, ignoring message"); return; } pool.execute( new Runnable() { public void run() { deliver(message); } }); }
/** * Stellt die Nachricht an alle Consumer zu. * * @param consumers die Message-Consumer. * @param msg */ private void deliver(Message msg) { if (pool.isTerminating() || pool.isTerminated()) { Logger.warn("shutdown in progress, no more messages accepted"); return; // wir nehmen keine Nachrichten mehr entgegen. } // BUGZILLA 1413 Wir koennen leider doch nicht auf einer Kopie der Liste arbeiten, weil // diese waehrend der Zustellung erweitert werden kann. Z.bsp. der "AutoRegisterMessageConsumer" // erhaelt die SYSTEM_STARTED-Message und registriert daraufhin neue Consumer. Unter anderem // den DeployMessageConsumer aus jameica.webadmin, der ebenfalls auf die SYSTEM_STARTED-Message // lauscht. Logger.debug("deliver message " + msg.toString()); MessageConsumer consumer = null; for (int i = 0; i < this.consumers.size(); ++i) { consumer = this.consumers.get(i); Class[] expected = consumer.getExpectedMessageTypes(); boolean send = expected == null; if (expected != null) { for (int j = 0; j < expected.length; ++j) { if (expected[j].isInstance(msg)) { send = true; break; } } } try { if (send) consumer.handleMessage(msg); } catch (ApplicationException ae) { Application.getMessagingFactory() .sendSyncMessage(new StatusBarMessage(ae.getMessage(), StatusBarMessage.TYPE_ERROR)); } catch (OperationCanceledException oce) { Logger.debug("consumer " + consumer.getClass().getName() + " cancelled message " + msg); } catch (Throwable t) { Logger.error( "consumer " + consumer.getClass().getName() + " produced an error (" + t.getClass().getName() + ": " + t + ") while consuming message " + msg); Logger.write(Level.INFO, "error while processing message", t); } } }
/** * @see * de.willuhn.jameica.messaging.MessagingQueue#queueMessage(de.willuhn.jameica.messaging.Message) */ public void queueMessage(final Message message) { if (message == null || pool.isTerminating() || pool.isTerminated()) return; // wir koennen direkt zustellen if (this.consumers.size() > 0) { this.sendMessage(message); return; } // Ansonsten queuen boolean added = this.queue.offer( new Runnable() { public void run() { deliver(message); } }); if (!added) Logger.debug("queue " + this.name + " full"); }
/** * Starts the crawler and writes the results to a file. * * @return the results array. This consists of the visited hosts and their request times. * @throws UnknownHostException * @throws IOException * @throws URISyntaxException */ public String[] startCrawl() throws IOException, URISyntaxException { executeCrawl(); System.out.println("Found " + m_linkCounts + " links."); System.out.println(m_results.toString()); System.out.println("Shutting down crawlers..."); m_executorPool.shutdownNow(); System.out.println("Writing results to file."); writeResultsToFile(); System.out.println("Finished writing results to file."); while (!m_executorPool.isTerminated()) { // Wait till threads in the executor pool are stopped. } System.out.println("Stopped all crawlers."); return m_results.toArray(new String[0]); }
public void testShutdownNowInterrupts() throws Exception { String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED); ThreadPool threadPool = null; try { Settings nodeSettings = Settings.builder() .put("threadpool." + threadPoolName + ".queue_size", 1000) .put("node.name", "testShutdownNowInterrupts") .build(); threadPool = new ThreadPool(nodeSettings); ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); threadPool.setClusterSettings(clusterSettings); assertEquals(info(threadPool, threadPoolName).getQueueSize().getSingles(), 1000L); final CountDownLatch latch = new CountDownLatch(1); ThreadPoolExecutor oldExecutor = (ThreadPoolExecutor) threadPool.executor(threadPoolName); threadPool .executor(threadPoolName) .execute( () -> { try { new CountDownLatch(1).await(); } catch (InterruptedException ex) { latch.countDown(); Thread.currentThread().interrupt(); } }); clusterSettings.applySettings( Settings.builder().put("threadpool." + threadPoolName + ".queue_size", 2000).build()); assertThat(threadPool.executor(threadPoolName), not(sameInstance(oldExecutor))); assertThat(oldExecutor.isShutdown(), equalTo(true)); assertThat(oldExecutor.isTerminating(), equalTo(true)); assertThat(oldExecutor.isTerminated(), equalTo(false)); threadPool.shutdownNow(); // should interrupt the thread latch.await( 3, TimeUnit.SECONDS); // If this throws then ThreadPool#shutdownNow didn't interrupt } finally { terminateThreadPoolIfNeeded(threadPool); } }
protected void doStart() throws Exception { if (executor.isTerminated() || executor.isTerminating() || executor.isShutdown()) throw new IllegalStateException("Cannot restart"); }
public boolean isStopped() { return executor.isTerminated(); }
public boolean isStarted() { return !executor.isTerminated() && !executor.isTerminating(); }
/** @throws ExecutionException */ protected void monitor() throws ExecutionException { int displayMillis = DISPLAY_MILLIS; int futureMillis = FUTURE_MILLIS; int sleepMillis = SLEEP_MILLIS; Future<TimedEvent[]> future = null; /* Initialize lastFutureMillis so that we do not get * warnings on slow queue startup. */ long currentMillis = System.currentTimeMillis(); long lastDisplayMillis = 0; long lastFutureMillis = currentMillis; TimedEvent[] lastEvent = null; logger.finest( "looping every " + sleepMillis + ", core=" + pool.getCorePoolSize() + ", active=" + pool.getActiveCount() + ", tasks=" + taskCount); timer = new Timer(); // run until all futures have been checked do { // try to avoid thread starvation yield(); // check completed tasks // sometimes this goes so fast that we never leave the loop, // so progress is never displayed... so limit the number of loops. do { try { future = completionService.poll(SLEEP_MILLIS, TimeUnit.MILLISECONDS); if (null != future) { // record result, or throw exception lastFutureMillis = System.currentTimeMillis(); try { lastEvent = future.get(); if (null == lastEvent) { throw new FatalException("unexpected null event"); } for (int i = 0; i < lastEvent.length; i++) { // discard events to reduce memory utilization if (null != lastEvent[i]) { timer.add(lastEvent[i], false); } } } catch (ExecutionException e) { if (fatalErrors) { throw e; } Throwable cause = e.getCause(); if (null != cause && cause instanceof FatalException) { throw (FatalException) cause; } logger.logException("non-fatal", e); timer.incrementEventCount(false); } } } catch (InterruptedException e) { // reset interrupt status and continue Thread.interrupted(); logger.logException("interrupted in poll() or get()", e); continue; } currentMillis = System.currentTimeMillis(); if (currentMillis - lastDisplayMillis > displayMillis) { lastDisplayMillis = currentMillis; logger.finer( "thread count: core=" + pool.getCorePoolSize() + ", active=" + pool.getActiveCount() + ", tasks=" + taskCount); if (null != lastEvent) { logger.info( "" + timer.getEventCount() + "/" + taskCount + ", " + timer.getProgressMessage(false) + ", " + lastEvent[0].getDescription()); if (config.doPrintCurrRate()) { String currMsg = timer.getCurrProgressMessage(); if (currMsg != null) logger.info(currMsg); } } } } while (null != future); logger.finer( "running = " + running + ", terminated = " + pool.isTerminated() + ", last future = " + lastFutureMillis); // currentMillis has already been set recently if (currentMillis - lastFutureMillis > futureMillis) { logger.warning("no futures received in over " + futureMillis + " ms"); break; } } while (running && !pool.isTerminated()); // NB - caller will set running to false to ensure exit }
/** * Creates reference files for top and bottom half of the * * @param hstoreFilesToSplit map of store files to create half file references for. * @return the number of reference files that were created. * @throws IOException */ private Pair<Integer, Integer> splitStoreFiles( final Map<byte[], List<StoreFile>> hstoreFilesToSplit) throws IOException { if (hstoreFilesToSplit == null) { // Could be null because close didn't succeed -- for now consider it fatal throw new IOException("Close returned empty list of StoreFiles"); } // The following code sets up a thread pool executor with as many slots as // there's files to split. It then fires up everything, waits for // completion and finally checks for any exception int nbFiles = 0; for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) { nbFiles += entry.getValue().size(); } if (nbFiles == 0) { // no file needs to be splitted. return new Pair<Integer, Integer>(0, 0); } // Default max #threads to use is the smaller of table's configured number of blocking store // files or the available number of logical cores. int defMaxThreads = Math.min( parent.conf.getInt( HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT), Runtime.getRuntime().availableProcessors()); // Max #threads is the smaller of the number of storefiles or the default max determined above. int maxThreads = Math.min(parent.conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles); LOG.info( "Preparing to split " + nbFiles + " storefiles for region " + this.parent + " using " + maxThreads + " threads"); ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); builder.setNameFormat("StoreFileSplitter-%1$d"); ThreadFactory factory = builder.build(); ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, factory); List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles); // Split each store file. for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) { for (StoreFile sf : entry.getValue()) { StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf); futures.add(threadPool.submit(sfs)); } } // Shutdown the pool threadPool.shutdown(); // Wait for all the tasks to finish try { boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS); if (stillRunning) { threadPool.shutdownNow(); // wait for the thread to shutdown completely. while (!threadPool.isTerminated()) { Thread.sleep(50); } throw new IOException( "Took too long to split the" + " files and create the references, aborting split"); } } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } int created_a = 0; int created_b = 0; // Look for any exception for (Future<Pair<Path, Path>> future : futures) { try { Pair<Path, Path> p = future.get(); created_a += p.getFirst() != null ? 1 : 0; created_b += p.getSecond() != null ? 1 : 0; } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { throw new IOException(e); } } if (LOG.isDebugEnabled()) { LOG.debug( "Split storefiles for region " + this.parent + " Daughter A: " + created_a + " storefiles, Daughter B: " + created_b + " storefiles."); } return new Pair<Integer, Integer>(created_a, created_b); }