@Test public void testInvokeAllTimeoutCancelled() throws Exception { ExecutorService executor = createSingleNodeExecutorService("testInvokeAll"); assertFalse(executor.isShutdown()); // Only one task ArrayList<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>(); tasks.add(new CancellationAwareTask(0)); List<Future<Boolean>> futures = executor.invokeAll(tasks, 5, TimeUnit.SECONDS); assertEquals(futures.size(), 1); assertEquals(futures.get(0).get(), Boolean.TRUE); // More tasks tasks.clear(); for (int i = 0; i < COUNT; i++) { tasks.add(new CancellationAwareTask(i < 2 ? 0 : 20000)); } futures = executor.invokeAll(tasks, 5, TimeUnit.SECONDS); assertEquals(futures.size(), COUNT); for (int i = 0; i < COUNT; i++) { if (i < 2) { assertEquals(futures.get(i).get(), Boolean.TRUE); } else { boolean excepted = false; try { futures.get(i).get(); } catch (CancellationException e) { excepted = true; } assertTrue(excepted); } } }
@Override public BxDocument segmentDocument(BxDocument document) throws AnalysisException { Map<BxPage, List<Component>> componentMap = new HashMap<BxPage, List<Component>>(); ExecutorService exec = Executors.newFixedThreadPool(PdfNLMContentExtractor.THREADS_NUMBER); ArrayList<Callable<NumBxPage>> tasks = new ArrayList<Callable<NumBxPage>>(); for (BxPage page : document.getPages()) { tasks.add(new ComponentCounter(page)); } List<Future<NumBxPage>> results; try { results = exec.invokeAll(tasks); exec.shutdown(); for (Future<NumBxPage> result : results) { NumBxPage p = result.get(); componentMap.put(p.page, p.components); } } catch (ExecutionException ex) { throw new AnalysisException("Cannot segment pages!", ex); } catch (InterruptedException ex) { throw new AnalysisException("Cannot segment pages!", ex); } this.computeDocumentOrientation(componentMap); BxDocument output = new BxDocument(); BxPage[] pages = new BxPage[document.getPages().size()]; exec = Executors.newFixedThreadPool(PdfNLMContentExtractor.THREADS_NUMBER); tasks = new ArrayList<Callable<NumBxPage>>(); int i = 0; for (BxPage page : document.getPages()) { tasks.add(new SingleSegmenter(page, i++)); } try { results = exec.invokeAll(tasks); exec.shutdown(); for (Future<NumBxPage> result : results) { NumBxPage p = result.get(); pages[p.index] = p.page; } for (BxPage p : pages) { if (p.getBounds() != null) { output.addPage(p); } } return output; } catch (ExecutionException ex) { throw new AnalysisException("Cannot segment pages!", ex); } catch (InterruptedException ex) { throw new AnalysisException("Cannot segment pages!", ex); } }
public List<TravelQuote> getRankedTravelQuotes( TravelInfo travelInfo, Set<TravelCompany> companies, Comparator<TravelQuote> ranking, long time, TimeUnit unit) throws InterruptedException { List<QuoteTask> tasks = new ArrayList<QuoteTask>(); for (TravelCompany company : companies) tasks.add(new QuoteTask(company, travelInfo)); List<Future<TravelQuote>> futures = exec.invokeAll(tasks, time, unit); List<TravelQuote> quotes = new ArrayList<TravelQuote>(tasks.size()); Iterator<QuoteTask> taskIter = tasks.iterator(); for (Future<TravelQuote> f : futures) { QuoteTask task = taskIter.next(); try { quotes.add(f.get()); } catch (ExecutionException e) { quotes.add(task.getFailureQuote(e.getCause())); } catch (CancellationException e) { quotes.add(task.getTimeoutQuote(e)); } } Collections.sort(quotes, ranking); return quotes; }
@Override public void executeTest() throws Exception { ODatabaseDocumentTx database = poolFactory.get(getDatabaseURL(serverInstance.get(0)), "admin", "admin").acquire(); System.out.println("Creating Writers and Readers threads..."); final ExecutorService writerExecutors = Executors.newCachedThreadPool(); runningWriters = new CountDownLatch(serverInstance.size() * writerCount); int serverId = 0; int threadId = 0; List<Callable<Void>> writerWorkers = new ArrayList<Callable<Void>>(); for (ServerRun server : serverInstance) { for (int j = 0; j < writerCount; j++) { Callable writer = createWriter(serverId, threadId++, getDatabaseURL(server)); writerWorkers.add(writer); } serverId++; } List<Future<Void>> futures = writerExecutors.invokeAll(writerWorkers); System.out.println("Threads started, waiting for the end"); for (Future<Void> future : futures) { future.get(); } writerExecutors.shutdown(); Assert.assertTrue(writerExecutors.awaitTermination(1, TimeUnit.MINUTES)); System.out.println("All writer threads have finished, shutting down readers"); }
/** * Execute the {@link Callable} tasks in parallel (per the configured size of the {@link * WorkerPool}) and wait for them to complete. * * @param tasks a map of {@link Callable}s with keys by which you will be able to access each * return value * @return the return values of each {@link Callable}s mapped by their input key */ public <K, V> Map<K, V> invokeAll(Map<K, Callable<V>> tasks) { String caller = LOGGER.isDebugEnabled() ? Thread.currentThread().getStackTrace()[2].toString() : "n/a"; LOGGER.debug("[%s] is invoking %d mapped tasks", caller, tasks.size()); List<K> orderedKeys = new ArrayList<K>(tasks.size()); List<Callable<V>> orderedTasks = new ArrayList<Callable<V>>(tasks.size()); for (Map.Entry<K, Callable<V>> entry : tasks.entrySet()) { orderedKeys.add(entry.getKey()); orderedTasks.add(entry.getValue()); } try { long start = System.currentTimeMillis(); List<Future<V>> executorResults = executorService.invokeAll(orderedTasks); long finish = System.currentTimeMillis(); LOGGER.debug("[%s] invoked %d mapped tasks in %d ms", caller, tasks.size(), finish - start); Map<K, V> mappedResults = new LinkedHashMap<K, V>(tasks.size()); for (int i = 0; i < tasks.size(); i++) { K key = orderedKeys.get(i); V result = executorResults.get(i).get(); mappedResults.put(key, result); } return mappedResults; } catch (InterruptedException e) { throw new RuntimeException(e); } catch (ExecutionException e) { throw new RuntimeException(e); } }
/** * We will test this by setting up two threads, asserting on the hash values the instances * generate for each thread * * @throws InterruptedException * @throws ExecutionException */ @Test public void test_001_concurrent_match() throws InterruptedException, ExecutionException { // Setup callable method that reports the hashcode for the thread int threadCount = 2; Callable<Integer> task = new Callable<Integer>() { @Override public Integer call() { return Match.getInstance().hashCode(); } }; // Create n tasks to execute List<Callable<Integer>> tasks = Collections.nCopies(threadCount, task); // Execute the task for both tasks ExecutorService es = Executors.newFixedThreadPool(threadCount); List<Future<Integer>> futures = es.invokeAll(tasks); int hash1 = futures.get(0).get(); int hash2 = futures.get(1).get(); // The two hashcodes must NOT be equal Assert.assertThat(hash1, not(equalTo(hash2))); }
@Test public void checkOnlyOneThreadExecutesPing() throws InterruptedException, ExecutionException { Callable<PingResult> task = createTask(); List<Callable<PingResult>> tasks = Collections.nCopies(3, task); ExecutorService executorService = Executors.newFixedThreadPool(3); List<Future<PingResult>> futures = executorService.invokeAll(tasks); assertEquals("We are fine.", futures.get(0).get().getMessage()); assertEquals("only for initialization", futures.get(1).get().getMessage()); assertEquals("only for initialization", futures.get(2).get().getMessage()); }
@Test public void testInvokeAllTimeoutSuccess() throws Exception { ExecutorService executor = createSingleNodeExecutorService("testInvokeAll"); assertFalse(executor.isShutdown()); // Only one task ArrayList<Callable<String>> tasks = new ArrayList<Callable<String>>(); tasks.add(new BasicTestTask()); List<Future<String>> futures = executor.invokeAll(tasks, 5, TimeUnit.SECONDS); assertEquals(futures.size(), 1); assertEquals(futures.get(0).get(), BasicTestTask.RESULT); // More tasks tasks.clear(); for (int i = 0; i < COUNT; i++) { tasks.add(new BasicTestTask()); } futures = executor.invokeAll(tasks, 5, TimeUnit.SECONDS); assertEquals(futures.size(), COUNT); for (int i = 0; i < COUNT; i++) { assertEquals(futures.get(i).get(), BasicTestTask.RESULT); } }
/** * Execute the {@link Callable} tasks in parallel (per the configured size of the {@link * WorkerPool}) and wait for them to complete. * * @param tasks a list of {@link Callable}s * @return the ordered return values */ public <T> List<T> invokeAll(List<Callable<T>> tasks) { try { List<Future<T>> executorResults = executorService.invokeAll(tasks); List<T> results = new ArrayList<T>(tasks.size()); for (Future<T> future : executorResults) { results.add(future.get()); } return results; } catch (InterruptedException e) { throw new RuntimeException(e); } catch (ExecutionException e) { throw new RuntimeException(e); } }
public List<SimplifiedLog> consume(String topicName, int topicPartitions, int expectedMsg) throws InterruptedException { ConsumerConnector consumer = KafkaUtils.createConsumer(zkServer.getConnectString(), "test_group", "1"); List<KafkaStream<String, SimplifiedLog>> streams = KafkaUtils.getConsumerStreams(consumer, topicName, topicPartitions); List<Callable<List<SimplifiedLog>>> tasks = new ArrayList<>(); streams.forEach(stream -> tasks.add(createConsumerThread(stream.iterator(), expectedMsg))); ExecutorService executor = Executors.newFixedThreadPool(streams.size()); List<Future<List<SimplifiedLog>>> futures = executor.invokeAll(tasks, 5 * expectedMsg, TimeUnit.SECONDS); List<SimplifiedLog> received = getResultsFromFutures(futures); consumer.shutdown(); return received; }
/** * Execute the {@link Callable} tasks in parallel (per the configured size of the {@link * WorkerPool}) and wait for them to complete. * * @param tasks a list of {@link Callable}s * @return the ordered return values */ public <T> List<T> invokeAll(List<Callable<T>> tasks) { String caller = LOGGER.isDebugEnabled() ? Thread.currentThread().getStackTrace()[2].toString() : "n/a"; LOGGER.debug("[%s] is invoking %d listed tasks", caller, tasks.size()); try { long start = System.currentTimeMillis(); List<Future<T>> executorResults = executorService.invokeAll(tasks); long finish = System.currentTimeMillis(); LOGGER.debug("[%s] invoked %d listed tasks in %d ms", caller, tasks.size(), finish - start); List<T> results = new ArrayList<T>(tasks.size()); for (Future<T> future : executorResults) { results.add(future.get()); } return results; } catch (InterruptedException e) { throw new RuntimeException(e); } catch (ExecutionException e) { throw new RuntimeException(e); } }
private <TRequest, TResponse> Response<TResponse> executeAllParallel( ResponseCommand<TRequest, TResponse> responseCommand, boolean includeLocal) { Collection<Callable<Response<TResponse>>> commandExecutors = new ArrayList<>(); if (includeLocal) { for (Membership.Member machine : membershipService.getMembershipList().getMemberList()) { commandExecutors.add(createResponseExecutor(machine, responseCommand)); } } else { for (Membership.Member machine : membershipService.getMembershipListNoLocal().getMemberList()) { commandExecutors.add(createResponseExecutor(machine, responseCommand)); } } List<Future<Response<TResponse>>> results; try { results = pool.invokeAll(commandExecutors); Response<TResponse> response = null; for (Future<Response<TResponse>> future : results) { try { if (response == null) response = future.get(); else { Response<TResponse> tResponse = future.get(); if (tResponse != null) { response.setResponseData(response.getResponseData().add(tResponse.getResponseData())); response.setResponse( responseCommand.add(response.getResponse(), tResponse.getResponse())); } } } catch (ExecutionException e) { logger.logLine(DefaultLogger.SEVERE, String.valueOf(e)); } } return response; } catch (InterruptedException e) { e.printStackTrace(); } return null; }
/** * Execute the {@link Callable} tasks in parallel (per the configured size of the {@link * WorkerPool}) and wait for them to complete. * * @param tasks a map of {@link Callable}s with keys by which you will be able to access each * return value * @return the return values of each {@link Callable}s mapped by their input key */ public <K, V> Map<K, V> invokeAll(Map<K, Callable<V>> tasks) { List<K> orderedKeys = new ArrayList<K>(tasks.size()); List<Callable<V>> orderedTasks = new ArrayList<Callable<V>>(tasks.size()); for (Map.Entry<K, Callable<V>> entry : tasks.entrySet()) { orderedKeys.add(entry.getKey()); orderedTasks.add(entry.getValue()); } try { List<Future<V>> executorResults = executorService.invokeAll(orderedTasks); Map<K, V> mappedResults = new LinkedHashMap<K, V>(tasks.size()); for (int i = 0; i < tasks.size(); i++) { K key = orderedKeys.get(i); V result = executorResults.get(i).get(); mappedResults.put(key, result); } return mappedResults; } catch (InterruptedException e) { throw new RuntimeException(e); } catch (ExecutionException e) { throw new RuntimeException(e); } }
public <T> List<Future<T>> invokeAll( Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException { return e.invokeAll(tasks, timeout, unit); }
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException { return e.invokeAll(tasks); }
@Override public TTPSolution doSearch(TTPInstance instance) { if (searchStatistics != null) searchStatistics.start(); ExecutorService executorService = Executors.newFixedThreadPool(noThreads); List<LocalSearchCallable> localSearches = new ArrayList<LocalSearchCallable>(noTries); constructionHeuristic.setProblemInstance(instance); try { for (int i = 0; i < noTries; i++) { IConstructionHeuristics<TTPInstance, TTPSolution> clonedConstructionHeuristic = constructionHeuristic.clone(); ILocalSearch<TTPSolution> clonedLocalSearch = localSearch.clone(); localSearches.add( new LocalSearchCallable( clonedConstructionHeuristic, clonedLocalSearch, i, searchStatistics != null)); } } catch (CloneNotSupportedException e) { logger.error("Failed to create clone for multi-threaded local search", e); } try { List<Future<LocalSearchResult>> solutions = executorService.invokeAll(localSearches); TTPSolution bestSolution = null; for (Future<LocalSearchResult> futureSolution : solutions) { try { LocalSearchResult result = futureSolution.get(); TTPSolution solution = result.getSolution(); if (solution == null) { logger.warn("Solution was null"); continue; } if (searchStatistics != null) searchStatistics.addLocalSearchStatistic(result.getIteration(), result.getStatistics()); if (solution != null && (bestSolution == null || solution.getCost() < bestSolution.getCost())) bestSolution = solution; } catch (ExecutionException e) { logger.warn("One of the local search executors failed", e); e.printStackTrace(); } } executorService.shutdown(); if (searchStatistics != null) { searchStatistics.end(); searchStatistics.setSolution(bestSolution); } return bestSolution; } catch (InterruptedException e) { logger.error("Failed to do multi-threaded local search", e); if (searchStatistics != null) { searchStatistics.setException(e); searchStatistics.end(); } return null; } }