private void doWalk( Path path, FileStatusCallback callback, AtomicLong taskCount, SettableFuture<Void> future) { try (SetThreadName ignored = new SetThreadName("HiveHdfsWalker")) { RemoteIterator<LocatedFileStatus> iterator = getLocatedFileStatusRemoteIterator(path); while (iterator.hasNext()) { LocatedFileStatus status = getLocatedFileStatus(iterator); // ignore hidden files. Hive ignores files starting with _ and . as well. String fileName = status.getPath().getName(); if (fileName.startsWith("_") || fileName.startsWith(".")) { continue; } if (isDirectory(status)) { recursiveWalk(status.getPath(), callback, taskCount, future); } else { callback.process(status, status.getBlockLocations()); } if (future.isDone()) { return; } } } catch (FileNotFoundException e) { future.setException(new FileNotFoundException("Partition location does not exist: " + path)); } catch (Throwable t) { future.setException(t); } finally { if (taskCount.decrementAndGet() == 0) { future.set(null); } } }
@Test public void failureMonitor() throws Exception { ProducerToken token = ProducerToken.create(SimpleProducerModule_SettableFutureStrFactory.class); SettableFuture<String> strFuture = SettableFuture.create(); SettableFuture<SettableFuture<String>> strFutureFuture = SettableFuture.create(); Producer<SettableFuture<String>> strFutureProducer = producerOfFuture(strFutureFuture); Producer<String> producer = new SimpleProducerModule_SettableFutureStrFactory( executorProvider, componentMonitorProvider, strFutureProducer); assertThat(producer.get().isDone()).isFalse(); InOrder order = inOrder(componentMonitor, monitor); order.verify(componentMonitor).producerMonitorFor(token); strFutureFuture.set(strFuture); order.verify(monitor).methodStarting(); order.verify(monitor).methodFinished(); assertThat(producer.get().isDone()).isFalse(); Throwable t = new RuntimeException("monkey"); strFuture.setException(t); try { producer.get().get(); fail(); } catch (ExecutionException e) { assertThat(e.getCause()).isSameAs(t); order.verify(monitor).failed(t); } order.verifyNoMoreInteractions(); }
private void handleFailure( final SettableFuture<Response> future, final Supplier<ListenableFuture<Response>> code, final long deadline, final long delay, final TimeUnit timeUnit, final Throwable t) { if (clock.now().getMillis() < deadline) { if (delay > 0) { executorService.schedule( new Runnable() { @Override public void run() { startRetry(future, code, deadline - 1, delay, timeUnit); } }, delay, timeUnit); } else { startRetry(future, code, deadline - 1, delay, timeUnit); } } else { future.setException(t); } }
@Test public void testFireOnceMajorityFailed1() { SettableFuture<Boolean> f1 = SettableFuture.create(); SettableFuture<Boolean> f2 = SettableFuture.create(); List<ListenableFuture<Boolean>> responses = Lists.newArrayList(f1, f2, Futures.<Boolean>immediateFuture(Boolean.TRUE)); ListenableFuture<Boolean> collector = majorityResponse(responses, Identity); f1.setException(new Exception()); assertFalse(collector.isDone()); f2.setException(new Exception()); assertTrue(collector.isDone()); assertFalse(Futures.getUnchecked(collector)); }
@Override public ListenableFuture<Boolean> consumeAsync() { // TODO make this actually asynch final SettableFuture<Boolean> result = SettableFuture.create(); try { consume(); result.set(true); } catch (final Exception e) { LOGGER.warn("Got exception consuming RDF stream", e); result.setException(e); result.set(false); } return result; }
private void recursiveWalk( final Path path, final FileStatusCallback callback, final AtomicLong taskCount, final SettableFuture<Void> future) { taskCount.incrementAndGet(); try { executor.execute( new Runnable() { @Override public void run() { doWalk(path, callback, taskCount, future); } }); } catch (Throwable t) { future.setException(t); } }
static void pingBeforeActivate( final AsyncCommand<?, ?, ?> cmd, final SettableFuture<Boolean> initializedFuture, final ChannelHandlerContext ctx, final List<ChannelHandler> handlers) throws Exception { cmd.handle( (o, throwable) -> { if (throwable == null) { initializedFuture.set(true); ctx.fireChannelActive(); } else { initializedFuture.setException(throwable); } return null; }); ctx.channel().writeAndFlush(cmd); }
public ListenableFuture<?> process() throws Exception { try { long start = ticker.read(); ListenableFuture<?> blocked = split.processFor(SPLIT_RUN_QUANTA); long endTime = ticker.read(); // update priority level base on total thread usage of task long durationNanos = endTime - start; long threadUsageNanos = taskHandle.addThreadUsageNanos(durationNanos); this.threadUsageNanos.set(threadUsageNanos); priorityLevel.set(calculatePriorityLevel(threadUsageNanos)); // record last run for prioritization within a level lastRun.set(endTime); return blocked; } catch (Throwable e) { finishedFuture.setException(e); throw e; } }
@Test public void testRun() throws Exception { HttpClient httpClient = EasyMock.createMock(HttpClient.class); final URL url = new URL("http://foo/druid/v2/"); SettableFuture<InputStream> futureResult = SettableFuture.create(); Capture<Request> capturedRequest = EasyMock.newCapture(); EasyMock.expect( httpClient.go( EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())) .andReturn(futureResult) .times(1); SettableFuture futureException = SettableFuture.create(); EasyMock.expect( httpClient.go( EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())) .andReturn(futureException) .times(1); EasyMock.expect( httpClient.go( EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())) .andReturn(SettableFuture.create()) .atLeastOnce(); EasyMock.replay(httpClient); final ServerSelector serverSelector = new ServerSelector( new DataSegment( "test", new Interval("2013-01-01/2013-01-02"), new DateTime("2013-01-01").toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 0L), new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy())); DirectDruidClient client1 = new DirectDruidClient( new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "foo", new NoopServiceEmitter()); DirectDruidClient client2 = new DirectDruidClient( new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "foo2", new NoopServiceEmitter()); QueryableDruidServer queryableDruidServer1 = new QueryableDruidServer( new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0), client1); serverSelector.addServerAndUpdateSegment(queryableDruidServer1, serverSelector.getSegment()); QueryableDruidServer queryableDruidServer2 = new QueryableDruidServer( new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0), client2); serverSelector.addServerAndUpdateSegment(queryableDruidServer2, serverSelector.getSegment()); TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build(); HashMap<String, List> context = Maps.newHashMap(); Sequence s1 = client1.run(query, context); Assert.assertTrue(capturedRequest.hasCaptured()); Assert.assertEquals(url, capturedRequest.getValue().getUrl()); Assert.assertEquals(HttpMethod.POST, capturedRequest.getValue().getMethod()); Assert.assertEquals(1, client1.getNumOpenConnections()); // simulate read timeout Sequence s2 = client1.run(query, context); Assert.assertEquals(2, client1.getNumOpenConnections()); futureException.setException(new ReadTimeoutException()); Assert.assertEquals(1, client1.getNumOpenConnections()); // subsequent connections should work Sequence s3 = client1.run(query, context); Sequence s4 = client1.run(query, context); Sequence s5 = client1.run(query, context); Assert.assertTrue(client1.getNumOpenConnections() == 4); // produce result for first connection futureResult.set( new ByteArrayInputStream( "[{\"timestamp\":\"2014-01-01T01:02:03Z\", \"result\": 42.0}]".getBytes())); List<Result> results = Sequences.toList(s1, Lists.<Result>newArrayList()); Assert.assertEquals(1, results.size()); Assert.assertEquals(new DateTime("2014-01-01T01:02:03Z"), results.get(0).getTimestamp()); Assert.assertEquals(3, client1.getNumOpenConnections()); client2.run(query, context); client2.run(query, context); Assert.assertTrue(client2.getNumOpenConnections() == 2); Assert.assertTrue(serverSelector.pick() == queryableDruidServer2); EasyMock.verify(httpClient); }
/** Sets the result to an exception. */ public boolean setException(Throwable throwable) { return settableFuture.setException(throwable); }
@Override public void onFailure(Throwable e) { future.setException(e); }