private S3Object getS3Object(final Path path, final long start) throws IOException { try { return retry() .maxAttempts(maxClientRetry) .exponentialBackoff( new Duration(1, TimeUnit.SECONDS), maxBackoffTime, maxRetryTime, 2.0) .stopOn(InterruptedException.class, UnrecoverableS3OperationException.class) .run( "getS3Object", () -> { try { return s3.getObject( new GetObjectRequest(host, keyFromPath(path)) .withRange(start, Long.MAX_VALUE)); } catch (AmazonServiceException e) { if (e.getStatusCode() == SC_FORBIDDEN) { throw new UnrecoverableS3OperationException(e); } throw Throwables.propagate(e); } }); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw Throwables.propagate(e); } }
/** @see #setTaskPreprocessorForTag(Object, TaskPreprocessor) */ @SuppressWarnings("deprecation") public void setTaskPreprocessorForTag( Object tag, Class<? extends TaskPreprocessor> preprocessor) { synchronized (preprocessorByTag) { TaskPreprocessor old = getTaskPreprocessorForTag(tag); if (old != null) { if (preprocessor.isAssignableFrom(old.getClass())) { /* already have such an instance */ return; } // might support multiple in future... throw new IllegalStateException( "Not allowed to set multiple TaskProcessors on ExecutionManager tag (tag " + tag + ", has " + old + ", setting new " + preprocessor + ")"); } try { setTaskPreprocessorForTag(tag, preprocessor.newInstance()); } catch (InstantiationException e) { throw Throwables.propagate(e); } catch (IllegalAccessException e) { throw Throwables.propagate(e); } } }
private ObjectMetadata getS3ObjectMetadata(final Path path) throws IOException { try { return retry() .maxAttempts(maxClientRetries) .exponentialBackoff(new Duration(1, TimeUnit.SECONDS), maxBackoffTime, maxRetryTime, 2.0) .stopOn(InterruptedException.class, UnrecoverableS3OperationException.class) .run( "getS3ObjectMetadata", () -> { try { return s3.getObjectMetadata(uri.getHost(), keyFromPath(path)); } catch (AmazonS3Exception e) { if (e.getStatusCode() == SC_NOT_FOUND) { return null; } else if (e.getStatusCode() == SC_FORBIDDEN) { throw new UnrecoverableS3OperationException(e); } throw Throwables.propagate(e); } }); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw Throwables.propagate(e); } }
@Override public int read(final byte[] buffer, final int offset, final int length) throws IOException { try { int bytesRead = retry() .maxAttempts(maxClientRetry) .exponentialBackoff( new Duration(1, TimeUnit.SECONDS), maxBackoffTime, maxRetryTime, 2.0) .stopOn(InterruptedException.class) .run( "readStream", () -> { openStream(); try { return in.read(buffer, offset, length); } catch (Exception e) { closeStream(); throw e; } }); if (bytesRead != -1) { position += bytesRead; } return bytesRead; } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw Throwables.propagate(e); } }
@Override public ListenableFuture<HttpResponse> submit(HttpCommand command) { try { for (; ; ) { Future<Response> responseF = client.executeRequest(convertToNingRequest.apply(command.getRequest())); final HttpResponse httpResponse = convertToJCloudsResponse.apply(responseF.get()); int statusCode = httpResponse.getStatusCode(); if (statusCode >= 300) { if (retryHandler.shouldRetryRequest(command, httpResponse)) { continue; } else { errorHandler.handleError(command, httpResponse); return wrapAsFuture(httpResponse); } } else { return wrapAsFuture(httpResponse); } } } catch (IOException e) { throw Throwables.propagate(e); } catch (InterruptedException e) { throw Throwables.propagate(e); } catch (ExecutionException e) { throw Throwables.propagate(e); } }
private SecurityGroup getSecurityGroup( final String nodeId, final SecurityGroupExtension securityApi, final String locationId) { // Expect to have two security groups on the node: one shared between all nodes in the location, // that is cached in sharedGroupCache, and one created by Jclouds that is unique to the node. // Relies on customize having been called before. This should be safe because the arguments // needed to call this method are not available until post-instance creation. SecurityGroup machineUniqueSecurityGroup; Tasks.setBlockingDetails("Loading unique security group for node: " + nodeId); try { machineUniqueSecurityGroup = uniqueGroupCache.get( nodeId, new Callable<SecurityGroup>() { @Override public SecurityGroup call() throws Exception { SecurityGroup sg = getUniqueSecurityGroupForNodeCachingSharedGroupIfPreviouslyUnknown( nodeId, locationId, securityApi); if (sg == null) { throw new IllegalStateException( "Failed to find machine-unique group on node: " + nodeId); } return sg; } }); } catch (UncheckedExecutionException e) { throw Throwables.propagate(new Exception(e.getCause())); } catch (ExecutionException e) { throw Throwables.propagate(new Exception(e.getCause())); } finally { Tasks.resetBlockingDetails(); } return machineUniqueSecurityGroup; }
public void setTaskSchedulerForTag(Object tag, Class<? extends TaskScheduler> scheduler) { synchronized (schedulerByTag) { TaskScheduler old = getTaskSchedulerForTag(tag); if (old != null) { if (scheduler.isAssignableFrom(old.getClass())) { /* already have such an instance */ return; } // might support multiple in future... throw new IllegalStateException( "Not allowed to set multiple TaskSchedulers on ExecutionManager tag (tag " + tag + ", has " + old + ", setting new " + scheduler + ")"); } try { TaskScheduler schedulerI = scheduler.newInstance(); // allow scheduler to have a nice name, for logging etc if (schedulerI instanceof CanSetName) ((CanSetName) schedulerI).setName("" + tag); setTaskSchedulerForTag(tag, schedulerI); } catch (InstantiationException e) { throw Throwables.propagate(e); } catch (IllegalAccessException e) { throw Throwables.propagate(e); } } }
/** * Synchronously sends an opaque message to the RpcHandler on the server-side, waiting for up to a * specified timeout for a response. */ public ByteBuffer sendRpcSync(ByteBuffer message, long timeoutMs) { final SettableFuture<ByteBuffer> result = SettableFuture.create(); sendRpc( message, new RpcResponseCallback() { @Override public void onSuccess(ByteBuffer response) { result.set(response); } @Override public void onFailure(Throwable e) { result.setException(e); } }); try { return result.get(timeoutMs, TimeUnit.MILLISECONDS); } catch (ExecutionException e) { throw Throwables.propagate(e.getCause()); } catch (Exception e) { throw Throwables.propagate(e); } }
void complete() { try { writeFile(); } catch (FilerException ex) { throw Throwables.propagate(ex); } catch (IOException ex) { throw Throwables.propagate(ex); } }
private void awaitDone() { try { done.await(10, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (BrokenBarrierException | TimeoutException e) { throw Throwables.propagate(e); } }
private ParquetRecordReader<FakeParquetRecord> createParquetRecordReader( Configuration configuration, Path path, long start, long length, List<HiveColumnHandle> columns, boolean useParquetColumnNames) { try { ParquetMetadata parquetMetadata = ParquetFileReader.readFooter(configuration, path); List<BlockMetaData> blocks = parquetMetadata.getBlocks(); FileMetaData fileMetaData = parquetMetadata.getFileMetaData(); PrestoReadSupport readSupport = new PrestoReadSupport(useParquetColumnNames, columns, fileMetaData.getSchema()); ReadContext readContext = readSupport.init( configuration, fileMetaData.getKeyValueMetaData(), fileMetaData.getSchema()); List<BlockMetaData> splitGroup = new ArrayList<>(); long splitStart = start; long splitLength = length; for (BlockMetaData block : blocks) { long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset(); if (firstDataPage >= splitStart && firstDataPage < splitStart + splitLength) { splitGroup.add(block); } } ParquetInputSplit split; split = new ParquetInputSplit( path, splitStart, splitLength, null, splitGroup, readContext.getRequestedSchema().toString(), fileMetaData.getSchema().toString(), fileMetaData.getKeyValueMetaData(), readContext.getReadSupportMetadata()); TaskAttemptContext taskContext = ContextUtil.newTaskAttemptContext(configuration, new TaskAttemptID()); ParquetRecordReader<FakeParquetRecord> realReader = new PrestoParquetRecordReader(readSupport); realReader.initialize(split, taskContext); return realReader; } catch (IOException e) { throw Throwables.propagate(e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } }
public void startZookeeper(int numServers) { try { this.zkCluster = new MiniZooKeeperCluster(); zkCluster.setDefaultClientPort(Integer.parseInt(this.zkUrl.split(":")[1])); zkCluster.startup(testDir, numServers); } catch (IOException e) { propagate(e); } catch (InterruptedException e) { propagate(e); } }
// TODO: move this to a utility package private static <T, X extends Throwable> T getFutureResult(Future<T> future, Class<X> type) throws X { try { return future.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (ExecutionException e) { Throwables.propagateIfPossible(e.getCause(), type); throw Throwables.propagate(e.getCause()); } }
@Override public void startAndWait() { try { readInitialFiles(); } catch (Throwable t) { throw Throwables.propagate(t); } future = this.scheduler.scheduleAtFixedRate( new Runnable() { @Override public void run() { final long start = System.currentTimeMillis(); runLock.lock(); if (isStopped()) { LOG.warn("Driver is stopped, not checking uploads"); return; } int uploads = 0; final int uploaders = metadataToUploader.size(); metrics.startUploads(); try { uploads = checkUploads(); } catch (Throwable t) { LOG.error("Uncaught exception while checking {} upload(s)", uploaders, t); } finally { runLock.unlock(); metrics.finishUploads(); LOG.info( "Found {} items from {} uploader(s) in {}", uploads, uploaders, JavaUtils.duration(start)); } } }, configuration.getCheckUploadsEverySeconds(), configuration.getCheckUploadsEverySeconds(), TimeUnit.SECONDS); try { super.watch(); } catch (Throwable t) { throw Throwables.propagate(t); } }
public T newEntity2(Map flags, Entity parent) { try { Constructor<T> constructor = clazz.getConstructor(Map.class, Entity.class); return constructor.newInstance(flags, parent); } catch (InstantiationException e) { throw Throwables.propagate(e); } catch (IllegalAccessException e) { throw Throwables.propagate(e); } catch (InvocationTargetException e) { throw Throwables.propagate(e); } catch (NoSuchMethodException e) { throw Throwables.propagate(e); } }
@SuppressWarnings("unchecked") @Override public void reconstruct(RebindContext rebindContext, EntityMemento memento) { if (LOG.isTraceEnabled()) LOG.trace("Reconstructing entity: {}", memento.toVerboseString()); // Note that the id should have been set in the constructor; it is immutable entity.setDisplayName(memento.getDisplayName()); for (Effector<?> eff : memento.getEffectors()) ((EntityInternal) entity).getMutableEntityType().addEffector(eff); for (Map.Entry<ConfigKey<?>, Object> entry : memento.getConfig().entrySet()) { try { ConfigKey<?> key = entry.getKey(); Object value = entry.getValue(); Class<?> type = (key.getType() != null) ? key.getType() : rebindContext.loadClass(key.getTypeName()); entity.setConfig((ConfigKey<Object>) key, value); } catch (ClassNotFoundException e) { throw Throwables.propagate(e); } } ((EntityInternal) entity).getConfigMap().addToLocalBag(memento.getConfigUnmatched()); ((EntityInternal) entity).refreshInheritedConfig(); for (Map.Entry<AttributeSensor<?>, Object> entry : memento.getAttributes().entrySet()) { try { AttributeSensor<?> key = entry.getKey(); Object value = entry.getValue(); Class<?> type = (key.getType() != null) ? key.getType() : rebindContext.loadClass(key.getTypeName()); ((EntityInternal) entity) .setAttributeWithoutPublishing((AttributeSensor<Object>) key, value); } catch (ClassNotFoundException e) { throw Throwables.propagate(e); } } setParent(rebindContext, memento); addChildren(rebindContext, memento); addPolicies(rebindContext, memento); addEnrichers(rebindContext, memento); addMembers(rebindContext, memento); addTags(rebindContext, memento); addLocations(rebindContext, memento); doReconstruct(rebindContext, memento); ((AbstractEntity) entity).rebind(); }
/* * Invoke the constructor for creating the subgraph container with the given * parameters. */ @SuppressWarnings({"rawtypes", "unchecked"}) private static StorageWithDistributionStrategy invoke( String className, AbstractP2PNetwork r, IDistribution key, BindingsFactory bf) { try { /* * get the class, which has to be a subclass of SubgraphContainer */ Class<?> c = Class.forName(className); if (!StorageWithDistributionStrategy.class.isAssignableFrom(c)) { throw new RuntimeException( String.format( "The type \"%s\" is not a class extended from %s", className, StorageWithDistributionStrategy.class)); } else { Class<? extends StorageWithDistributionStrategy> c1 = (Class<? extends StorageWithDistributionStrategy>) c; try { Constructor<? extends StorageWithDistributionStrategy> construct = c1.getConstructor( AbstractP2PNetwork.class, IDistribution.class, BindingsFactory.class); return construct.newInstance(r, key, bf); } catch (NoSuchMethodException e) { throw new RuntimeException( String.format("The class \"%s\" has no valid constructor.", className)); } catch (SecurityException e) { propagate(e); } catch (InstantiationException e) { propagate(e); } catch (IllegalAccessException e) { propagate(e); } catch (IllegalArgumentException e) { throw new RuntimeException( String.format( "The type \"%s\" of subgraph-container has no valid constructor.", className)); } catch (InvocationTargetException e) { propagate(e); } } } catch (ClassNotFoundException e) { throw new RuntimeException( String.format( "The class \"%s\" of subgraph-container is not known in actual class path. Cannot deserialize this subgraph container.", className)); } /* * if not possible or error occurred. */ return null; }
@Override public Operator createOperator(DriverContext driverContext) { checkState(!closed, "Factory is already closed"); OperatorContext operatorContext = driverContext.addOperatorContext( operatorId, constructor.getDeclaringClass().getSimpleName()); try { return constructor.newInstance(operatorContext, types); } catch (InvocationTargetException e) { throw Throwables.propagate(e.getCause()); } catch (ReflectiveOperationException e) { throw Throwables.propagate(e); } }
/* * Invoke the constructor for creating the subgraph container with the given * parameters. */ @SuppressWarnings({"rawtypes", "unchecked"}) private SubgraphContainer<?> invoke( final String className, final Root r, final Object key, final ISubgraphExecutor<?> executer) { try { /* * get the class, which has to be a subclass of SubgraphContainer */ final Class<?> c = Class.forName(className); if (!SubgraphContainer.class.isAssignableFrom(c)) { throw new RuntimeException( String.format( "The type \"%s\" of subgraph-container is not a class extended from lupos.distributed.operator.SubgraphContainer", className)); } else { final Class<? extends SubgraphContainer> c1 = (Class<? extends SubgraphContainer>) c; try { final Constructor<? extends SubgraphContainer> construct = c1.getConstructor(Root.class, Object.class, ISubgraphExecutor.class); return construct.newInstance(r, key, executer); } catch (final NoSuchMethodException e) { throw new RuntimeException( String.format( "The class \"%s\" of subgraph-container has no valid constructor.", className)); } catch (final SecurityException e) { propagate(e); } catch (final InstantiationException e) { propagate(e); } catch (final IllegalAccessException e) { propagate(e); } catch (final IllegalArgumentException e) { throw new RuntimeException( String.format( "The type \"%s\" of subgraph-container has no valid constructor.", className)); } catch (final InvocationTargetException e) { propagate(e); } } } catch (final ClassNotFoundException e) { throw new RuntimeException( String.format( "The class \"%s\" of subgraph-container is not known in actual class path. Cannot deserialize this subgraph container.", className)); } /* * if not possible or error occurred. */ return null; }
/** * Creates a temp block meta only if allocator finds available space. This method will not trigger * any eviction. * * @param sessionId session Id * @param blockId block Id * @param location location to create the block * @param initialBlockSize initial block size in bytes * @param newBlock true if this temp block is created for a new block * @return a temp block created if successful, or null if allocation failed (instead of throwing * {@link WorkerOutOfSpaceException} because allocation failure could be an expected case) * @throws BlockAlreadyExistsException if there is already a block with the same block id */ private TempBlockMeta createBlockMetaInternal( long sessionId, long blockId, BlockStoreLocation location, long initialBlockSize, boolean newBlock) throws BlockAlreadyExistsException { // NOTE: a temp block is supposed to be visible for its own writer, unnecessary to acquire // block lock here since no sharing mMetadataWriteLock.lock(); try { if (newBlock) { checkTempBlockIdAvailable(blockId); } StorageDirView dirView = mAllocator.allocateBlockWithView(sessionId, initialBlockSize, location, getUpdatedView()); if (dirView == null) { // Allocator fails to find a proper place for this new block. return null; } // TODO(carson): Add tempBlock to corresponding storageDir and remove the use of // StorageDirView.createTempBlockMeta. TempBlockMeta tempBlock = dirView.createTempBlockMeta(sessionId, blockId, initialBlockSize); try { // Add allocated temp block to metadata manager. This should never fail if allocator // correctly assigns a StorageDir. mMetaManager.addTempBlockMeta(tempBlock); } catch (WorkerOutOfSpaceException e) { // If we reach here, allocator is not working properly LOG.error( "Unexpected failure: {} bytes allocated at {} by allocator, " + "but addTempBlockMeta failed", initialBlockSize, location); throw Throwables.propagate(e); } catch (BlockAlreadyExistsException e) { // If we reach here, allocator is not working properly LOG.error( "Unexpected failure: {} bytes allocated at {} by allocator, " + "but addTempBlockMeta failed", initialBlockSize, location); throw Throwables.propagate(e); } return tempBlock; } finally { mMetadataWriteLock.unlock(); } }
@Override public void configure() { try { setName("AppWithStreamSizeSchedule"); setDescription("Sample application"); ObjectStores.createObjectStore(getConfigurer(), "input", String.class); ObjectStores.createObjectStore(getConfigurer(), "output", String.class); addWorkflow(new SampleWorkflow()); addStream(new Stream("stream")); Map<String, String> scheduleProperties = Maps.newHashMap(); scheduleProperties.put("oneKey", "oneValue"); scheduleProperties.put("anotherKey", "anotherValue"); scheduleProperties.put("someKey", "someValue"); scheduleWorkflow( Schedules.createDataSchedule("SampleSchedule1", "", Schedules.Source.STREAM, "stream", 1), "SampleWorkflow", scheduleProperties); scheduleWorkflow( Schedules.createDataSchedule("SampleSchedule2", "", Schedules.Source.STREAM, "stream", 2), "SampleWorkflow", scheduleProperties); } catch (UnsupportedTypeException e) { throw Throwables.propagate(e); } }
@Override protected void shutDown() throws Exception { LOG.info("Stopping Metrics Processor ..."); Throwable throwable = null; try { Services.chainStop(election, resourceClient).get(); } catch (Throwable th) { throwable = th; LOG.error("Exception while shutting down.", th); } try { cancelResourceHandler.cancel(); } catch (Throwable th) { throwable = th; LOG.error("Exception while shutting down.", th); } try { cancelDiscoverable.cancel(); } catch (Throwable th) { throwable = th; LOG.error("Exception while shutting down.", th); } if (throwable != null) { throw Throwables.propagate(throwable); } }
@Override public void dropPartition(String databaseName, String tableName, List<String> parts) { try { retry() .stopOn(NoSuchObjectException.class, MetaException.class) .stopOnIllegalExceptions() .run( "dropPartition", stats .getDropPartition() .wrap( () -> { try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { client.dropPartition(databaseName, tableName, parts, true); } return null; })); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); } catch (Exception e) { if (e instanceof InterruptedException) { Thread.currentThread().interrupt(); } throw Throwables.propagate(e); } finally { invalidatePartitionCache(databaseName, tableName); } }
private static <K, V> Map<K, V> getAll(LoadingCache<K, V> cache, Iterable<K> keys) { try { return cache.getAll(keys); } catch (ExecutionException | UncheckedExecutionException | ExecutionError e) { throw Throwables.propagate(e.getCause()); } }
private static <K, V> V get(LoadingCache<K, V> cache, K key) { try { return cache.get(key); } catch (ExecutionException | UncheckedExecutionException | ExecutionError e) { throw Throwables.propagate(e.getCause()); } }
@Override @SuppressWarnings({"unchecked", "rawtypes"}) public void render() { response.setContentType(CONTENT_TYPE); Enumeration<String> attrs = request.getAttributeNames(); Map root = new HashMap(); while (attrs.hasMoreElements()) { String attrName = attrs.nextElement(); root.put(attrName, request.getAttribute(attrName)); } Writer writer = null; try { writer = response.getWriter(); Template template = getConfiguration().getTemplate(view); template.process(root, writer); // Merge the data-model and the template } catch (Exception e) { throw new RenderException(e); } finally { try { if (writer != null) { writer.close(); } } catch (IOException e) { Throwables.propagate(e); } } }
protected <T> List<T> getAsyncChildren(final String parent, final Transcoder<T> transcoder) { try { return getAsyncChildrenThrows(parent, transcoder); } catch (Throwable t) { throw Throwables.propagate(t); } }
public void close() { final Throwable throwable = throwableHolder.get(); if (throwable != null) { throwableHolder.set(null); throw Throwables.propagate(throwable); } }
private static String fixture(String name) { try { return Resources.toString(Resources.getResource(name), StandardCharsets.UTF_8); } catch (IOException e) { throw Throwables.propagate(e); } }
/** * This method checks if the reader thread has finished, and re-throw any exceptions thrown by the * reader thread. * * @throws SqoopException if the consumer thread threw it. * @throws RuntimeException if some other exception was thrown. */ private void waitForConsumer() { try { consumerFuture.get(); } catch (ExecutionException ex) { // In almost all cases, the exception will be SqoopException, // because all exceptions are caught and propagated as // SqoopExceptions // There are race conditions with exceptions where the free sema is // no released. So sense we are in single threaded mode at this point // we can ask if there are availablePermits and release if needed if (free.availablePermits() == 0) { free.release(); } Throwable t = ex.getCause(); if (t instanceof SqoopException) { throw (SqoopException) t; } // In the rare case, it was not a SqoopException Throwables.propagate(t); } catch (Exception ex) { // There are race conditions with exceptions where the free sema is // no released. So sense we are in single threaded mode at this point // we can ask if there are availablePermits and release if needed if (free.availablePermits() == 0) { free.release(); } throw new SqoopException(SparkExecutionError.SPARK_EXEC_0019, ex); } }