@TypeParameter("E") @SqlType(StandardTypes.BOOLEAN) public static boolean equals( @OperatorDependency( operator = EQUAL, returnType = StandardTypes.BOOLEAN, argumentTypes = {"E", "E"}) MethodHandle equalsFunction, @TypeParameter("E") Type type, @SqlType("array(E)") Block leftArray, @SqlType("array(E)") Block rightArray) { if (leftArray.getPositionCount() != rightArray.getPositionCount()) { return false; } for (int i = 0; i < leftArray.getPositionCount(); i++) { checkElementNotNull(leftArray.isNull(i), ARRAY_NULL_ELEMENT_MSG); checkElementNotNull(rightArray.isNull(i), ARRAY_NULL_ELEMENT_MSG); Object leftElement = readNativeValue(type, leftArray, i); Object rightElement = readNativeValue(type, rightArray, i); try { if (!(boolean) equalsFunction.invoke(leftElement, rightElement)) { return false; } } catch (Throwable t) { Throwables.propagateIfInstanceOf(t, Error.class); Throwables.propagateIfInstanceOf(t, PrestoException.class); throw new PrestoException(GENERIC_INTERNAL_ERROR, t); } } return true; }
/** * Synchronously sends an opaque message to the RpcHandler on the server-side, waiting for up to a * specified timeout for a response. */ public ByteBuffer sendRpcSync(ByteBuffer message, long timeoutMs) { final SettableFuture<ByteBuffer> result = SettableFuture.create(); sendRpc( message, new RpcResponseCallback() { @Override public void onSuccess(ByteBuffer response) { result.set(response); } @Override public void onFailure(Throwable e) { result.setException(e); } }); try { return result.get(timeoutMs, TimeUnit.MILLISECONDS); } catch (ExecutionException e) { throw Throwables.propagate(e.getCause()); } catch (Exception e) { throw Throwables.propagate(e); } }
@TypeParameter("T") @SqlType(StandardTypes.BOOLEAN) @Nullable public static Boolean contains( @TypeParameter("T") Type elementType, @OperatorDependency( operator = EQUAL, returnType = StandardTypes.BOOLEAN, argumentTypes = {"T", "T"}) MethodHandle equals, @SqlType("array(T)") Block arrayBlock, @SqlType("T") Slice value) { boolean foundNull = false; for (int i = 0; i < arrayBlock.getPositionCount(); i++) { if (arrayBlock.isNull(i)) { foundNull = true; continue; } try { if ((boolean) equals.invokeExact(elementType.getSlice(arrayBlock, i), value)) { return true; } } catch (Throwable t) { Throwables.propagateIfInstanceOf(t, Error.class); Throwables.propagateIfInstanceOf(t, PrestoException.class); throw new PrestoException(INTERNAL_ERROR, t); } } if (foundNull) { return null; } return false; }
private S3Object getS3Object(final Path path, final long start) throws IOException { try { return retry() .maxAttempts(maxClientRetry) .exponentialBackoff( new Duration(1, TimeUnit.SECONDS), maxBackoffTime, maxRetryTime, 2.0) .stopOn(InterruptedException.class, UnrecoverableS3OperationException.class) .run( "getS3Object", () -> { try { return s3.getObject( new GetObjectRequest(host, keyFromPath(path)) .withRange(start, Long.MAX_VALUE)); } catch (AmazonServiceException e) { if (e.getStatusCode() == SC_FORBIDDEN) { throw new UnrecoverableS3OperationException(e); } throw Throwables.propagate(e); } }); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw Throwables.propagate(e); } }
/** @see #setTaskPreprocessorForTag(Object, TaskPreprocessor) */ @SuppressWarnings("deprecation") public void setTaskPreprocessorForTag( Object tag, Class<? extends TaskPreprocessor> preprocessor) { synchronized (preprocessorByTag) { TaskPreprocessor old = getTaskPreprocessorForTag(tag); if (old != null) { if (preprocessor.isAssignableFrom(old.getClass())) { /* already have such an instance */ return; } // might support multiple in future... throw new IllegalStateException( "Not allowed to set multiple TaskProcessors on ExecutionManager tag (tag " + tag + ", has " + old + ", setting new " + preprocessor + ")"); } try { setTaskPreprocessorForTag(tag, preprocessor.newInstance()); } catch (InstantiationException e) { throw Throwables.propagate(e); } catch (IllegalAccessException e) { throw Throwables.propagate(e); } } }
private ObjectMetadata getS3ObjectMetadata(final Path path) throws IOException { try { return retry() .maxAttempts(maxClientRetries) .exponentialBackoff(new Duration(1, TimeUnit.SECONDS), maxBackoffTime, maxRetryTime, 2.0) .stopOn(InterruptedException.class, UnrecoverableS3OperationException.class) .run( "getS3ObjectMetadata", () -> { try { return s3.getObjectMetadata(uri.getHost(), keyFromPath(path)); } catch (AmazonS3Exception e) { if (e.getStatusCode() == SC_NOT_FOUND) { return null; } else if (e.getStatusCode() == SC_FORBIDDEN) { throw new UnrecoverableS3OperationException(e); } throw Throwables.propagate(e); } }); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw Throwables.propagate(e); } }
@Override public int read(final byte[] buffer, final int offset, final int length) throws IOException { try { int bytesRead = retry() .maxAttempts(maxClientRetry) .exponentialBackoff( new Duration(1, TimeUnit.SECONDS), maxBackoffTime, maxRetryTime, 2.0) .stopOn(InterruptedException.class) .run( "readStream", () -> { openStream(); try { return in.read(buffer, offset, length); } catch (Exception e) { closeStream(); throw e; } }); if (bytesRead != -1) { position += bytesRead; } return bytesRead; } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw Throwables.propagate(e); } }
@TypeParameter("T") @SqlType(StandardTypes.BIGINT) public static long arrayPosition( @TypeParameter("T") Type type, @OperatorDependency( operator = EQUAL, returnType = StandardTypes.BOOLEAN, argumentTypes = {"T", "T"}) MethodHandle equalMethodHandle, @SqlType("array(T)") Block array, @SqlType("T") double element) { int size = array.getPositionCount(); for (int i = 0; i < size; i++) { if (!array.isNull(i)) { double arrayValue = type.getDouble(array, i); try { if ((boolean) equalMethodHandle.invokeExact(arrayValue, element)) { return i + 1; // result is 1-based (instead of 0) } } catch (Throwable t) { Throwables.propagateIfInstanceOf(t, Error.class); Throwables.propagateIfInstanceOf(t, PrestoException.class); throw new PrestoException(INTERNAL_ERROR, t); } } } return 0; }
@Override public ListenableFuture<HttpResponse> submit(HttpCommand command) { try { for (; ; ) { Future<Response> responseF = client.executeRequest(convertToNingRequest.apply(command.getRequest())); final HttpResponse httpResponse = convertToJCloudsResponse.apply(responseF.get()); int statusCode = httpResponse.getStatusCode(); if (statusCode >= 300) { if (retryHandler.shouldRetryRequest(command, httpResponse)) { continue; } else { errorHandler.handleError(command, httpResponse); return wrapAsFuture(httpResponse); } } else { return wrapAsFuture(httpResponse); } } } catch (IOException e) { throw Throwables.propagate(e); } catch (InterruptedException e) { throw Throwables.propagate(e); } catch (ExecutionException e) { throw Throwables.propagate(e); } }
private SecurityGroup getSecurityGroup( final String nodeId, final SecurityGroupExtension securityApi, final String locationId) { // Expect to have two security groups on the node: one shared between all nodes in the location, // that is cached in sharedGroupCache, and one created by Jclouds that is unique to the node. // Relies on customize having been called before. This should be safe because the arguments // needed to call this method are not available until post-instance creation. SecurityGroup machineUniqueSecurityGroup; Tasks.setBlockingDetails("Loading unique security group for node: " + nodeId); try { machineUniqueSecurityGroup = uniqueGroupCache.get( nodeId, new Callable<SecurityGroup>() { @Override public SecurityGroup call() throws Exception { SecurityGroup sg = getUniqueSecurityGroupForNodeCachingSharedGroupIfPreviouslyUnknown( nodeId, locationId, securityApi); if (sg == null) { throw new IllegalStateException( "Failed to find machine-unique group on node: " + nodeId); } return sg; } }); } catch (UncheckedExecutionException e) { throw Throwables.propagate(new Exception(e.getCause())); } catch (ExecutionException e) { throw Throwables.propagate(new Exception(e.getCause())); } finally { Tasks.resetBlockingDetails(); } return machineUniqueSecurityGroup; }
public static boolean greaterThanOrEqual( MethodHandle greaterThanFunction, Type type, Block leftArray, Block rightArray) { int len = Math.min(leftArray.getPositionCount(), rightArray.getPositionCount()); int index = 0; while (index < len) { checkElementNotNull(leftArray.isNull(index), ARRAY_NULL_ELEMENT_MSG); checkElementNotNull(rightArray.isNull(index), ARRAY_NULL_ELEMENT_MSG); Object leftElement = readNativeValue(type, leftArray, index); Object rightElement = readNativeValue(type, rightArray, index); try { if ((boolean) greaterThanFunction.invoke(leftElement, rightElement)) { return true; } if ((boolean) greaterThanFunction.invoke(rightElement, leftElement)) { return false; } } catch (Throwable t) { Throwables.propagateIfInstanceOf(t, Error.class); Throwables.propagateIfInstanceOf(t, PrestoException.class); throw new PrestoException(INTERNAL_ERROR, t); } index++; } return leftArray.getPositionCount() >= rightArray.getPositionCount(); }
public void setTaskSchedulerForTag(Object tag, Class<? extends TaskScheduler> scheduler) { synchronized (schedulerByTag) { TaskScheduler old = getTaskSchedulerForTag(tag); if (old != null) { if (scheduler.isAssignableFrom(old.getClass())) { /* already have such an instance */ return; } // might support multiple in future... throw new IllegalStateException( "Not allowed to set multiple TaskSchedulers on ExecutionManager tag (tag " + tag + ", has " + old + ", setting new " + scheduler + ")"); } try { TaskScheduler schedulerI = scheduler.newInstance(); // allow scheduler to have a nice name, for logging etc if (schedulerI instanceof CanSetName) ((CanSetName) schedulerI).setName("" + tag); setTaskSchedulerForTag(tag, schedulerI); } catch (InstantiationException e) { throw Throwables.propagate(e); } catch (IllegalAccessException e) { throw Throwables.propagate(e); } } }
/** * Deep clones an object. The object to be clone does not need to implement Cloneable or be * serializable for this method to work. * * @param <T> the type of "o" * @param o The object to be cloned. * @return The cloned object. */ @SuppressWarnings("FinalStaticMethod") public static <T extends Object> T deepClone(final T o) { try { return (new Cloner()).deepClone(o); } catch (Exception e) { log.error("Unable to Clone object. {}", Throwables.getStackTraceAsString(e)); throw new BroadwickException("Unable to Clone object." + Throwables.getStackTraceAsString(e)); } }
void complete() { try { writeFile(); } catch (FilerException ex) { throw Throwables.propagate(ex); } catch (IOException ex) { throw Throwables.propagate(ex); } }
private void awaitDone() { try { done.await(10, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (BrokenBarrierException | TimeoutException e) { throw Throwables.propagate(e); } }
/** Test of chain method, of class ExceptionChain. */ @Test public void testChain() { System.out.println("chain"); Throwable t = new RuntimeException("", new SocketTimeoutException("Boo timeout")); Throwable newRootCause = new TimeoutException("Booo"); Throwable result = ExceptionChain.chain(t, newRootCause); result.printStackTrace(); Assert.assertEquals(newRootCause, Throwables.getRootCause(result)); Assert.assertEquals(3, Throwables.getCausalChain(result).size()); }
private ParquetRecordReader<FakeParquetRecord> createParquetRecordReader( Configuration configuration, Path path, long start, long length, List<HiveColumnHandle> columns, boolean useParquetColumnNames) { try { ParquetMetadata parquetMetadata = ParquetFileReader.readFooter(configuration, path); List<BlockMetaData> blocks = parquetMetadata.getBlocks(); FileMetaData fileMetaData = parquetMetadata.getFileMetaData(); PrestoReadSupport readSupport = new PrestoReadSupport(useParquetColumnNames, columns, fileMetaData.getSchema()); ReadContext readContext = readSupport.init( configuration, fileMetaData.getKeyValueMetaData(), fileMetaData.getSchema()); List<BlockMetaData> splitGroup = new ArrayList<>(); long splitStart = start; long splitLength = length; for (BlockMetaData block : blocks) { long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset(); if (firstDataPage >= splitStart && firstDataPage < splitStart + splitLength) { splitGroup.add(block); } } ParquetInputSplit split; split = new ParquetInputSplit( path, splitStart, splitLength, null, splitGroup, readContext.getRequestedSchema().toString(), fileMetaData.getSchema().toString(), fileMetaData.getKeyValueMetaData(), readContext.getReadSupportMetadata()); TaskAttemptContext taskContext = ContextUtil.newTaskAttemptContext(configuration, new TaskAttemptID()); ParquetRecordReader<FakeParquetRecord> realReader = new PrestoParquetRecordReader(readSupport); realReader.initialize(split, taskContext); return realReader; } catch (IOException e) { throw Throwables.propagate(e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } }
// TODO: move this to a utility package private static <T, X extends Throwable> T getFutureResult(Future<T> future, Class<X> type) throws X { try { return future.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (ExecutionException e) { Throwables.propagateIfPossible(e.getCause(), type); throw Throwables.propagate(e.getCause()); } }
@Override public void startAndWait() { try { readInitialFiles(); } catch (Throwable t) { throw Throwables.propagate(t); } future = this.scheduler.scheduleAtFixedRate( new Runnable() { @Override public void run() { final long start = System.currentTimeMillis(); runLock.lock(); if (isStopped()) { LOG.warn("Driver is stopped, not checking uploads"); return; } int uploads = 0; final int uploaders = metadataToUploader.size(); metrics.startUploads(); try { uploads = checkUploads(); } catch (Throwable t) { LOG.error("Uncaught exception while checking {} upload(s)", uploaders, t); } finally { runLock.unlock(); metrics.finishUploads(); LOG.info( "Found {} items from {} uploader(s) in {}", uploads, uploaders, JavaUtils.duration(start)); } } }, configuration.getCheckUploadsEverySeconds(), configuration.getCheckUploadsEverySeconds(), TimeUnit.SECONDS); try { super.watch(); } catch (Throwable t) { throw Throwables.propagate(t); } }
public T newEntity2(Map flags, Entity parent) { try { Constructor<T> constructor = clazz.getConstructor(Map.class, Entity.class); return constructor.newInstance(flags, parent); } catch (InstantiationException e) { throw Throwables.propagate(e); } catch (IllegalAccessException e) { throw Throwables.propagate(e); } catch (InvocationTargetException e) { throw Throwables.propagate(e); } catch (NoSuchMethodException e) { throw Throwables.propagate(e); } }
@SuppressWarnings("unchecked") @Override public void reconstruct(RebindContext rebindContext, EntityMemento memento) { if (LOG.isTraceEnabled()) LOG.trace("Reconstructing entity: {}", memento.toVerboseString()); // Note that the id should have been set in the constructor; it is immutable entity.setDisplayName(memento.getDisplayName()); for (Effector<?> eff : memento.getEffectors()) ((EntityInternal) entity).getMutableEntityType().addEffector(eff); for (Map.Entry<ConfigKey<?>, Object> entry : memento.getConfig().entrySet()) { try { ConfigKey<?> key = entry.getKey(); Object value = entry.getValue(); Class<?> type = (key.getType() != null) ? key.getType() : rebindContext.loadClass(key.getTypeName()); entity.setConfig((ConfigKey<Object>) key, value); } catch (ClassNotFoundException e) { throw Throwables.propagate(e); } } ((EntityInternal) entity).getConfigMap().addToLocalBag(memento.getConfigUnmatched()); ((EntityInternal) entity).refreshInheritedConfig(); for (Map.Entry<AttributeSensor<?>, Object> entry : memento.getAttributes().entrySet()) { try { AttributeSensor<?> key = entry.getKey(); Object value = entry.getValue(); Class<?> type = (key.getType() != null) ? key.getType() : rebindContext.loadClass(key.getTypeName()); ((EntityInternal) entity) .setAttributeWithoutPublishing((AttributeSensor<Object>) key, value); } catch (ClassNotFoundException e) { throw Throwables.propagate(e); } } setParent(rebindContext, memento); addChildren(rebindContext, memento); addPolicies(rebindContext, memento); addEnrichers(rebindContext, memento); addMembers(rebindContext, memento); addTags(rebindContext, memento); addLocations(rebindContext, memento); doReconstruct(rebindContext, memento); ((AbstractEntity) entity).rebind(); }
@Override public Operator createOperator(DriverContext driverContext) { checkState(!closed, "Factory is already closed"); OperatorContext operatorContext = driverContext.addOperatorContext( operatorId, constructor.getDeclaringClass().getSimpleName()); try { return constructor.newInstance(operatorContext, types); } catch (InvocationTargetException e) { throw Throwables.propagate(e.getCause()); } catch (ReflectiveOperationException e) { throw Throwables.propagate(e); } }
/** * Creates a temp block meta only if allocator finds available space. This method will not trigger * any eviction. * * @param sessionId session Id * @param blockId block Id * @param location location to create the block * @param initialBlockSize initial block size in bytes * @param newBlock true if this temp block is created for a new block * @return a temp block created if successful, or null if allocation failed (instead of throwing * {@link WorkerOutOfSpaceException} because allocation failure could be an expected case) * @throws BlockAlreadyExistsException if there is already a block with the same block id */ private TempBlockMeta createBlockMetaInternal( long sessionId, long blockId, BlockStoreLocation location, long initialBlockSize, boolean newBlock) throws BlockAlreadyExistsException { // NOTE: a temp block is supposed to be visible for its own writer, unnecessary to acquire // block lock here since no sharing mMetadataWriteLock.lock(); try { if (newBlock) { checkTempBlockIdAvailable(blockId); } StorageDirView dirView = mAllocator.allocateBlockWithView(sessionId, initialBlockSize, location, getUpdatedView()); if (dirView == null) { // Allocator fails to find a proper place for this new block. return null; } // TODO(carson): Add tempBlock to corresponding storageDir and remove the use of // StorageDirView.createTempBlockMeta. TempBlockMeta tempBlock = dirView.createTempBlockMeta(sessionId, blockId, initialBlockSize); try { // Add allocated temp block to metadata manager. This should never fail if allocator // correctly assigns a StorageDir. mMetaManager.addTempBlockMeta(tempBlock); } catch (WorkerOutOfSpaceException e) { // If we reach here, allocator is not working properly LOG.error( "Unexpected failure: {} bytes allocated at {} by allocator, " + "but addTempBlockMeta failed", initialBlockSize, location); throw Throwables.propagate(e); } catch (BlockAlreadyExistsException e) { // If we reach here, allocator is not working properly LOG.error( "Unexpected failure: {} bytes allocated at {} by allocator, " + "but addTempBlockMeta failed", initialBlockSize, location); throw Throwables.propagate(e); } return tempBlock; } finally { mMetadataWriteLock.unlock(); } }
@Override public void configure() { try { setName("AppWithStreamSizeSchedule"); setDescription("Sample application"); ObjectStores.createObjectStore(getConfigurer(), "input", String.class); ObjectStores.createObjectStore(getConfigurer(), "output", String.class); addWorkflow(new SampleWorkflow()); addStream(new Stream("stream")); Map<String, String> scheduleProperties = Maps.newHashMap(); scheduleProperties.put("oneKey", "oneValue"); scheduleProperties.put("anotherKey", "anotherValue"); scheduleProperties.put("someKey", "someValue"); scheduleWorkflow( Schedules.createDataSchedule("SampleSchedule1", "", Schedules.Source.STREAM, "stream", 1), "SampleWorkflow", scheduleProperties); scheduleWorkflow( Schedules.createDataSchedule("SampleSchedule2", "", Schedules.Source.STREAM, "stream", 2), "SampleWorkflow", scheduleProperties); } catch (UnsupportedTypeException e) { throw Throwables.propagate(e); } }
@Override protected void shutDown() throws Exception { LOG.info("Stopping Metrics Processor ..."); Throwable throwable = null; try { Services.chainStop(election, resourceClient).get(); } catch (Throwable th) { throwable = th; LOG.error("Exception while shutting down.", th); } try { cancelResourceHandler.cancel(); } catch (Throwable th) { throwable = th; LOG.error("Exception while shutting down.", th); } try { cancelDiscoverable.cancel(); } catch (Throwable th) { throwable = th; LOG.error("Exception while shutting down.", th); } if (throwable != null) { throw Throwables.propagate(throwable); } }
@Override public void dropPartition(String databaseName, String tableName, List<String> parts) { try { retry() .stopOn(NoSuchObjectException.class, MetaException.class) .stopOnIllegalExceptions() .run( "dropPartition", stats .getDropPartition() .wrap( () -> { try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { client.dropPartition(databaseName, tableName, parts, true); } return null; })); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); } catch (Exception e) { if (e instanceof InterruptedException) { Thread.currentThread().interrupt(); } throw Throwables.propagate(e); } finally { invalidatePartitionCache(databaseName, tableName); } }
private static <K, V> Map<K, V> getAll(LoadingCache<K, V> cache, Iterable<K> keys) { try { return cache.getAll(keys); } catch (ExecutionException | UncheckedExecutionException | ExecutionError e) { throw Throwables.propagate(e.getCause()); } }
private static <K, V> V get(LoadingCache<K, V> cache, K key) { try { return cache.get(key); } catch (ExecutionException | UncheckedExecutionException | ExecutionError e) { throw Throwables.propagate(e.getCause()); } }
protected <T> List<T> getAsyncChildren(final String parent, final Transcoder<T> transcoder) { try { return getAsyncChildrenThrows(parent, transcoder); } catch (Throwable t) { throw Throwables.propagate(t); } }
@Override @SuppressWarnings({"unchecked", "rawtypes"}) public void render() { response.setContentType(CONTENT_TYPE); Enumeration<String> attrs = request.getAttributeNames(); Map root = new HashMap(); while (attrs.hasMoreElements()) { String attrName = attrs.nextElement(); root.put(attrName, request.getAttribute(attrName)); } Writer writer = null; try { writer = response.getWriter(); Template template = getConfiguration().getTemplate(view); template.process(root, writer); // Merge the data-model and the template } catch (Exception e) { throw new RenderException(e); } finally { try { if (writer != null) { writer.close(); } } catch (IOException e) { Throwables.propagate(e); } } }