/** * Main method of the example * * @param args */ public static void main(String[] args) { // Generate an array of 1000 integers ArrayGenerator generator = new ArrayGenerator(); int array[] = generator.generateArray(1000); // Create a TaskManager object TaskManager manager = new TaskManager(); // Create a ForkJoinPool with the default constructor ForkJoinPool pool = new ForkJoinPool(); // Create a Task to process the array SearchNumberTask task = new SearchNumberTask(array, 0, 1000, 5, manager); // Execute the task pool.execute(task); // Shutdown the pool pool.shutdown(); // Wait for the finalization of the task try { pool.awaitTermination(1, TimeUnit.DAYS); } catch (InterruptedException e) { e.printStackTrace(); } // Write a message to indicate the end of the program System.out.printf("Main: The program has finished\n"); }
public static void main(String[] args) throws Exception { // 8、创建ForkJoinPool对象 ForkJoinPool pool = new ForkJoinPool(); // 9、创建元素有10000个的int数组 int array[] = new int[10000]; // 10、创建一个新的任务,处理整个数组 Task task1 = new Task(array, 0, array.length); // 11、使用execute()方法将任务发给pool执行, pool.execute(task1); // 12、当任务未结束时,调用showLog()方法输出ForkJoinPool类的状态信息,并使线程睡眠1秒 while (!task1.isDone()) { showLog(pool); TimeUnit.SECONDS.sleep(1); } // 13、使用shutDown()y方法关闭pool pool.shutdown(); // 14、使用awaitTermination()方法等待pool结束 pool.awaitTermination(1, TimeUnit.DAYS); // 15、调用showLog()方法,输出ForkJoinPool类的状态信息,并输出程序执行结束的信息 showLog(pool); System.out.printf("Main: End of the program.\n"); }
/** Pool maintains parallelism when using ManagedBlocker */ public void testBlockingForkJoinTask() throws Throwable { ForkJoinPool p = new ForkJoinPool(4); try { ReentrantLock lock = new ReentrantLock(); ManagedLocker locker = new ManagedLocker(lock); ForkJoinTask<Integer> f = new LockingFibTask(20, locker, lock); p.execute(f); assertEquals(6765, (int) f.get()); } finally { p.shutdownNow(); // don't wait out shutdown } }
/** * Main method of the example * * @param args */ public static void main(String[] args) { // Create a list of products ProductListGenerator generator = new ProductListGenerator(); List<Product> products = generator.generate(10000); // Craete a task Task task = new Task(products, 0, products.size(), 0.20); // Create a ForkJoinPool ForkJoinPool pool = new ForkJoinPool(); // Execute the Task pool.execute(task); // Write information about the pool do { System.out.printf("Main: Thread Count: %d\n", pool.getActiveThreadCount()); System.out.printf("Main: Thread Steal: %d\n", pool.getStealCount()); System.out.printf("Main: Paralelism: %d\n", pool.getParallelism()); try { TimeUnit.MILLISECONDS.sleep(5); } catch (InterruptedException e) { e.printStackTrace(); } } while (!task.isDone()); // Shutdown the pool pool.shutdown(); // Check if the task has completed normally if (task.isCompletedNormally()) { System.out.printf("Main: The process has completed normally.\n"); } // Expected result: 12. Write products which price is not 12 for (int i = 0; i < products.size(); i++) { Product product = products.get(i); if (product.getPrice() != 12) { System.out.printf("Product %s: %f\n", product.getName(), product.getPrice()); } } // End of the program System.out.println("Main: End of the program.\n"); }
/** * setUncaughtExceptionHandler changes handler for uncaught exceptions. * * <p>Additionally tests: Overriding ForkJoinWorkerThread.onStart performs its defined action */ public void testSetUncaughtExceptionHandler() throws InterruptedException { final CountDownLatch uehInvoked = new CountDownLatch(1); final Thread.UncaughtExceptionHandler ueh = new Thread.UncaughtExceptionHandler() { public void uncaughtException(Thread t, Throwable e) { threadAssertTrue(e instanceof MyError); threadAssertTrue(t instanceof FailingFJWSubclass); uehInvoked.countDown(); } }; ForkJoinPool p = new ForkJoinPool(1, new FailingThreadFactory(), ueh, false); try (PoolCleaner cleaner = cleaner(p)) { assertSame(ueh, p.getUncaughtExceptionHandler()); try { p.execute(new FibTask(8)); await(uehInvoked); } finally { p.shutdownNow(); // failure might have prevented processing task } } }
public OWLOntology findLsignature( OWLOntology ontology, LogicFragment fragment, Statistics stats) { Timer t = new Timer(); this.stats = stats; Logger_MORe.logInfo("extracting " + fragment.toString() + "-signature"); OWLOntology ret = null; OWLOntologyManager manager = ontology.getOWLOntologyManager(); try { ret = manager.createOntology(); manager.addAxioms(ret, ontology.getAxioms()); } catch (OWLOntologyCreationException e) { e.printStackTrace(); } lSignatureClasses = new HashSet<OWLClass>(); lSignatureOther = new HashSet<OWLEntity>(); compSignatureClasses = new HashSet<OWLClass>(); compSignatureOther = new HashSet<OWLEntity>(); LsignatureExtractorLauncher elkSignatureExtractorLauncher = null; LsignatureExtractorLauncher elkSignatureExtractorIntegratingRangesLauncher = null; LsignatureExtractorViaInverseRewritingLauncher elkSignatureExtractorRewritingInversesLauncher = null; ForkJoinPool executor = new ForkJoinPool(); elkSignatureExtractorLauncher = new LsignatureExtractorLauncher(ontology, LogicFragment.ELK, false); executor.execute(elkSignatureExtractorLauncher); if (ret != null) { // otherwise we have nowhere to return the axioms in the normalised ontologies necessary to // really classify all the extra classses in the lSignature if (rewriteInverses) { elkSignatureExtractorRewritingInversesLauncher = new LsignatureExtractorViaInverseRewritingLauncher(ontology, LogicFragment.ELK); executor.execute(elkSignatureExtractorRewritingInversesLauncher); } if (integrateRanges) { elkSignatureExtractorIntegratingRangesLauncher = new LsignatureExtractorLauncher(ontology, LogicFragment.ELK, true); executor.execute(elkSignatureExtractorIntegratingRangesLauncher); } // check the output of the normal ELKsignature and cancel the other threads if the lSig is the // whole signature initialiseLsignature((LsignatureExtractor) elkSignatureExtractorLauncher.join()); if (compSignatureClasses.isEmpty()) cancelTasks( elkSignatureExtractorIntegratingRangesLauncher, elkSignatureExtractorRewritingInversesLauncher); else { if (elkSignatureExtractorRewritingInversesLauncher != null && extendLsignature( (LsignatureExtractor) elkSignatureExtractorRewritingInversesLauncher.join()) > 0) { manager.addAxioms( ret, ((LsignatureExtractorViaInverseRewritingLauncher) elkSignatureExtractorRewritingInversesLauncher) .getOntology() .getAxioms()); } if (compSignatureClasses.isEmpty()) cancelTasks(elkSignatureExtractorRewritingInversesLauncher); else if (elkSignatureExtractorIntegratingRangesLauncher != null && extendLsignature( (LsignatureExtractor) elkSignatureExtractorIntegratingRangesLauncher.join()) > 0) { manager.addAxioms( ret, ((LsignatureExtractorLauncher) elkSignatureExtractorIntegratingRangesLauncher) .getOntology() .getAxioms()); } } stats.updateLsignatureSize(lSignatureClasses.size(), true); } else { ret = ontology; initialiseLsignature((LsignatureExtractor) elkSignatureExtractorLauncher.join()); } Logger_MORe.logInfo(lSignatureClasses.size() + "classes in lSignature"); Logger_MORe.logDebug(lSignatureClasses.toString()); Logger_MORe.logInfo(compSignatureClasses.size() + "classes in compSignature"); // might be a good idea to try to isolate extra axioms in the normalisation/rewriting - is this // possible/worth the effort? // check the order in which we try to extend the lSignature with each of the rewritten // ontologies and consider if one may be better that the other Logger_MORe.logDebug(t.duration() + "s to find Lsignature"); return ret; }
@Test public void testAsyncRangeRequests() throws IOException, URISyntaxException, InterruptedException { final URL testResourceUrl = new URL(VAULT_BASE_URI.toURL(), "asyncRangeRequestTestFile.txt"); final MultiThreadedHttpConnectionManager cm = new MultiThreadedHttpConnectionManager(); cm.getParams().setDefaultMaxConnectionsPerHost(50); final HttpClient client = new HttpClient(cm); // prepare 8MiB test data: final byte[] plaintextData = new byte[2097152 * Integer.BYTES]; final ByteBuffer bbIn = ByteBuffer.wrap(plaintextData); for (int i = 0; i < 2097152; i++) { bbIn.putInt(i); } // put request: final EntityEnclosingMethod putMethod = new PutMethod(testResourceUrl.toString()); putMethod.setRequestEntity(new ByteArrayRequestEntity(plaintextData)); final int putResponse = client.executeMethod(putMethod); putMethod.releaseConnection(); Assert.assertEquals(201, putResponse); // multiple async range requests: final List<ForkJoinTask<?>> tasks = new ArrayList<>(); final Random generator = new Random(System.currentTimeMillis()); final AtomicBoolean success = new AtomicBoolean(true); // 10 full interrupted requests: for (int i = 0; i < 10; i++) { final ForkJoinTask<?> task = ForkJoinTask.adapt( () -> { try { final HttpMethod getMethod = new GetMethod(testResourceUrl.toString()); final int statusCode = client.executeMethod(getMethod); if (statusCode != 200) { LOG.error("Invalid status code for interrupted full request"); success.set(false); } getMethod.getResponseBodyAsStream().read(); getMethod.getResponseBodyAsStream().close(); getMethod.releaseConnection(); } catch (IOException e) { throw new RuntimeException(e); } }); tasks.add(task); } // 50 crappy interrupted range requests: for (int i = 0; i < 50; i++) { final int lower = generator.nextInt(plaintextData.length); final ForkJoinTask<?> task = ForkJoinTask.adapt( () -> { try { final HttpMethod getMethod = new GetMethod(testResourceUrl.toString()); getMethod.addRequestHeader("Range", "bytes=" + lower + "-"); final int statusCode = client.executeMethod(getMethod); if (statusCode != 206) { LOG.error("Invalid status code for interrupted range request"); success.set(false); } getMethod.getResponseBodyAsStream().read(); getMethod.getResponseBodyAsStream().close(); getMethod.releaseConnection(); } catch (IOException e) { throw new RuntimeException(e); } }); tasks.add(task); } // 50 normal open range requests: for (int i = 0; i < 50; i++) { final int lower = generator.nextInt(plaintextData.length - 512); final int upper = plaintextData.length - 1; final ForkJoinTask<?> task = ForkJoinTask.adapt( () -> { try { final HttpMethod getMethod = new GetMethod(testResourceUrl.toString()); getMethod.addRequestHeader("Range", "bytes=" + lower + "-"); final byte[] expected = Arrays.copyOfRange(plaintextData, lower, upper + 1); final int statusCode = client.executeMethod(getMethod); final byte[] responseBody = new byte[upper - lower + 10]; final int bytesRead = IOUtils.read(getMethod.getResponseBodyAsStream(), responseBody); getMethod.releaseConnection(); if (statusCode != 206) { LOG.error("Invalid status code for open range request"); success.set(false); } else if (upper - lower + 1 != bytesRead) { LOG.error("Invalid response length for open range request"); success.set(false); } else if (!Arrays.equals( expected, Arrays.copyOfRange(responseBody, 0, bytesRead))) { LOG.error("Invalid response body for open range request"); success.set(false); } } catch (IOException e) { throw new RuntimeException(e); } }); tasks.add(task); } // 200 normal closed range requests: for (int i = 0; i < 200; i++) { final int pos1 = generator.nextInt(plaintextData.length - 512); final int pos2 = pos1 + 512; final ForkJoinTask<?> task = ForkJoinTask.adapt( () -> { try { final int lower = Math.min(pos1, pos2); final int upper = Math.max(pos1, pos2); final HttpMethod getMethod = new GetMethod(testResourceUrl.toString()); getMethod.addRequestHeader("Range", "bytes=" + lower + "-" + upper); final byte[] expected = Arrays.copyOfRange(plaintextData, lower, upper + 1); final int statusCode = client.executeMethod(getMethod); final byte[] responseBody = new byte[upper - lower + 1]; final int bytesRead = IOUtils.read(getMethod.getResponseBodyAsStream(), responseBody); getMethod.releaseConnection(); if (statusCode != 206) { LOG.error("Invalid status code for closed range request"); success.set(false); } else if (upper - lower + 1 != bytesRead) { LOG.error("Invalid response length for closed range request"); success.set(false); } else if (!Arrays.equals( expected, Arrays.copyOfRange(responseBody, 0, bytesRead))) { LOG.error("Invalid response body for closed range request"); success.set(false); } } catch (IOException e) { throw new RuntimeException(e); } }); tasks.add(task); } Collections.shuffle(tasks, generator); final ForkJoinPool pool = new ForkJoinPool(4); for (ForkJoinTask<?> task : tasks) { pool.execute(task); } for (ForkJoinTask<?> task : tasks) { task.join(); } pool.shutdown(); cm.shutdown(); Assert.assertTrue(success.get()); }