/** * Validates a specified template. * * @param validateTemplateRequest The input for <a>ValidateTemplate</a> action. * @return Result of the ValidateTemplate operation returned by the service. * @sample AmazonCloudFormation.ValidateTemplate */ @Override public ValidateTemplateResult validateTemplate(ValidateTemplateRequest validateTemplateRequest) { ExecutionContext executionContext = createExecutionContext(validateTemplateRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<ValidateTemplateRequest> request = null; Response<ValidateTemplateResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new ValidateTemplateRequestMarshaller() .marshall(super.beforeMarshalling(validateTemplateRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<ValidateTemplateResult> responseHandler = new StaxResponseHandler<ValidateTemplateResult>( new ValidateTemplateResultStaxUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } }
private <X, Y extends AmazonWebServiceRequest> Response<X> invoke( Request<Y> request, HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler, ExecutionContext executionContext) { request.setEndpoint(endpoint); request.setTimeOffset(timeOffset); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); AWSCredentials credentials; awsRequestMetrics.startEvent(Field.CredentialsRequestTime); try { credentials = awsCredentialsProvider.getCredentials(); } finally { awsRequestMetrics.endEvent(Field.CredentialsRequestTime); } AmazonWebServiceRequest originalRequest = request.getOriginalRequest(); if (originalRequest != null && originalRequest.getRequestCredentials() != null) { credentials = originalRequest.getRequestCredentials(); } executionContext.setCredentials(credentials); DefaultErrorResponseHandler errorResponseHandler = new DefaultErrorResponseHandler(exceptionUnmarshallers); return client.execute(request, responseHandler, errorResponseHandler, executionContext); }
/** * Sends a signal to the specified resource with a success or failure status. You can use the * SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation * doesn't proceed with a stack creation or update until resources receive the required number of * signals or the timeout period is exceeded. The SignalResource API is useful in cases where you * want to send signals from anywhere other than an Amazon EC2 instance. * * @param signalResourceRequest The input for the <a>SignalResource</a> action. * @sample AmazonCloudFormation.SignalResource */ @Override public void signalResource(SignalResourceRequest signalResourceRequest) { ExecutionContext executionContext = createExecutionContext(signalResourceRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<SignalResourceRequest> request = null; Response<Void> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new SignalResourceRequestMarshaller() .marshall(super.beforeMarshalling(signalResourceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null); invoke(request, responseHandler, executionContext); } finally { endClientExecution(awsRequestMetrics, request, response); } }
/** * You use this operation to change the parameters specified in the original manifest file by * supplying a new manifest file. The manifest file attached to this request replaces the original * manifest file. You can only use the operation after a CreateJob request but before the data * transfer starts and you can only use it on jobs you own. * * @param updateJobRequest Container for the necessary parameters to execute the UpdateJob service * method on AmazonImportExport. * @return The response from the UpdateJob service method, as returned by AmazonImportExport. * @throws MalformedManifestException * @throws BucketPermissionException * @throws InvalidAddressException * @throws InvalidParameterException * @throws UnableToUpdateJobIdException * @throws MultipleRegionsException * @throws InvalidVersionException * @throws MissingParameterException * @throws InvalidFileSystemException * @throws CanceledJobIdException * @throws MissingCustomsException * @throws NoSuchBucketException * @throws ExpiredJobIdException * @throws InvalidAccessKeyIdException * @throws InvalidCustomsException * @throws InvalidManifestFieldException * @throws MissingManifestFieldException * @throws InvalidJobIdException * @throws AmazonClientException If any internal errors are encountered inside the client while * attempting to make the request or handle the response. For example if a network connection * is not available. * @throws AmazonServiceException If an error response is returned by AmazonImportExport * indicating either a problem with the data in the request, or a server side issue. */ public UpdateJobResult updateJob(UpdateJobRequest updateJobRequest) { ExecutionContext executionContext = createExecutionContext(updateJobRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request<UpdateJobRequest> request = null; Response<UpdateJobResult> response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateJobRequestMarshaller().marshall(super.beforeMarshalling(updateJobRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } response = invoke(request, new UpdateJobResultStaxUnmarshaller(), executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } }
/** * Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted * stacks do not show up in the DescribeStacks API if the deletion has been completed * successfully. * * @param deleteStackRequest Container for the necessary parameters to execute the DeleteStack * service method on AmazonCloudFormation. * @throws AmazonClientException If any internal errors are encountered inside the client while * attempting to make the request or handle the response. For example if a network connection * is not available. * @throws AmazonServiceException If an error response is returned by AmazonCloudFormation * indicating either a problem with the data in the request, or a server side issue. */ public void deleteStack(DeleteStackRequest deleteStackRequest) { ExecutionContext executionContext = createExecutionContext(deleteStackRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); Request<DeleteStackRequest> request = null; awsRequestMetrics.startEvent(Field.ClientExecuteTime); try { request = new DeleteStackRequestMarshaller().marshall(deleteStackRequest); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); invoke(request, null, executionContext); } finally { endClientExecution(awsRequestMetrics, request, null); } }
/** * Creates a stack as specified in the template. After the call completes successfully, the stack * creation starts. You can check the status of the stack via the DescribeStacks API. * * @param createStackRequest Container for the necessary parameters to execute the CreateStack * service method on AmazonCloudFormation. * @return The response from the CreateStack service method, as returned by AmazonCloudFormation. * @throws AlreadyExistsException * @throws LimitExceededException * @throws InsufficientCapabilitiesException * @throws AmazonClientException If any internal errors are encountered inside the client while * attempting to make the request or handle the response. For example if a network connection * is not available. * @throws AmazonServiceException If an error response is returned by AmazonCloudFormation * indicating either a problem with the data in the request, or a server side issue. */ public CreateStackResult createStack(CreateStackRequest createStackRequest) { ExecutionContext executionContext = createExecutionContext(createStackRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); Request<CreateStackRequest> request = null; Response<CreateStackResult> response = null; awsRequestMetrics.startEvent(Field.ClientExecuteTime); try { request = new CreateStackRequestMarshaller().marshall(createStackRequest); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); response = invoke(request, new CreateStackResultStaxUnmarshaller(), executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } }
public Object evaluate(ExecutionContext context) throws RegionNotFoundException { Region rgn; Cache cache = context.getCache(); // do PR bucketRegion substitution here for expressions that evaluate to a Region. PartitionedRegion pr = context.getPartitionedRegion(); if (pr != null && pr.getFullPath().equals(this.regionPath)) { rgn = context.getBucketRegion(); } else if (pr != null) { // Asif : This is a very tricky solution to allow equijoin queries on PartitionedRegion // locally // We have possibly got a situation of equijoin. it may be across PRs. so use the context's // bucket region // to get ID and then retrieve the this region's bucket region BucketRegion br = context.getBucketRegion(); int bucketID = br.getId(); // Is current region a partitioned region rgn = cache.getRegion(this.regionPath); if (rgn.getAttributes().getDataPolicy().withPartitioning()) { // convert it into bucket region. PartitionedRegion prLocal = (PartitionedRegion) rgn; rgn = prLocal.getDataStore().getLocalBucketById(bucketID); } } else { rgn = cache.getRegion(this.regionPath); } if (rgn == null) { // if we couldn't find the region because the cache is closed, throw // a CacheClosedException if (cache.isClosed()) { throw new CacheClosedException(); } throw new RegionNotFoundException( LocalizedStrings.CompiledRegion_REGION_NOT_FOUND_0.toLocalizedString(this.regionPath)); } if (context.isCqQueryContext()) { return new QRegion(rgn, true, context); } else { return new QRegion(rgn, false, context); } }
/** * Returns a set of temporary security credentials for users who have been authenticated in a * mobile or web application with a web identity provider, such as Amazon Cognito, Login with * Amazon, Facebook, Google, or any OpenID Connect-compatible identity provider. * * <p><b>NOTE:</b> For mobile applications, we recommend that you use Amazon Cognito. You can use * Amazon Cognito with the AWS SDK for iOS and the AWS SDK for Android to uniquely identify a user * and supply the user with a consistent identity throughout the lifetime of an application. To * learn more about Amazon Cognito, see Amazon Cognito Overview in the AWS SDK for Android * Developer Guide guide and Amazon Cognito Overview in the AWS SDK for iOS Developer Guide. * * <p>Calling <code>AssumeRoleWithWebIdentity</code> does not require the use of AWS security * credentials. Therefore, you can distribute an application (for example, on mobile devices) that * requests temporary security credentials without including long-term AWS credentials in the * application, and without deploying server-based proxy services that use long-term AWS * credentials. Instead, the identity of the caller is validated by using a token from the web * identity provider. * * <p>The temporary security credentials returned by this API consist of an access key ID, a * secret access key, and a security token. Applications can use these temporary security * credentials to sign calls to AWS service APIs. The credentials are valid for the duration that * you specified when calling <code>AssumeRoleWithWebIdentity</code> , which can be from 900 * seconds (15 minutes) to 3600 seconds (1 hour). By default, the temporary security credentials * are valid for 1 hour. * * <p>Optionally, you can pass an IAM access policy to this operation. If you choose not to pass a * policy, the temporary security credentials that are returned by the operation have the * permissions that are defined in the access policy of the role that is being assumed. If you * pass a policy to this operation, the temporary security credentials that are returned by the * operation have the permissions that are allowed by both the access policy of the role that is * being assumed, <i> and </i> the policy that you pass. This gives you a way to further restrict * the permissions for the resulting temporary security credentials. You cannot use the passed * policy to grant permissions that are in excess of those allowed by the access policy of the * role that is being assumed. For more information, see <a * href="http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html"> * Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity </a> in the * <i>Using IAM</i> . * * <p>Before your application can call <code>AssumeRoleWithWebIdentity</code> , you must have an * identity token from a supported identity provider and create a role that the application can * assume. The role that your application assumes must trust the identity provider that is * associated with the identity token. In other words, the identity provider must be specified in * the role's trust policy. * * <p>For more information about how to use web identity federation and the <code> * AssumeRoleWithWebIdentity</code> API, see the following resources: * * <ul> * <li><a href="http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual"> * Using Web Identity Federation APIs for Mobile Apps </a> and <a * href="http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity"> * Federation Through a Web-based Identity Provider </a> . * <li><a href="https://web-identity-federation-playground.s3.amazonaws.com/index.html">Web * Identity Federation Playground </a> . This interactive website lets you walk through the * process of authenticating via Login with Amazon, Facebook, or Google, getting temporary * security credentials, and then using those credentials to make a request to AWS. * <li><a href="http://aws.amazon.com/sdkforios/">AWS SDK for iOS </a> and <a * href="http://aws.amazon.com/sdkforandroid/">AWS SDK for Android </a> . These toolkits * contain sample apps that show how to invoke the identity providers, and then how to use * the information from these providers to get and use temporary security credentials. * <li><a href="http://aws.amazon.com/articles/4617974389850313">Web Identity Federation with * Mobile Applications </a> . This article discusses web identity federation and shows an * example of how to use web identity federation to get access to content in Amazon S3. * </ul> * * @param assumeRoleWithWebIdentityRequest Container for the necessary parameters to execute the * AssumeRoleWithWebIdentity service method on AWSSecurityTokenService. * @return The response from the AssumeRoleWithWebIdentity service method, as returned by * AWSSecurityTokenService. * @throws PackedPolicyTooLargeException * @throws IDPRejectedClaimException * @throws MalformedPolicyDocumentException * @throws InvalidIdentityTokenException * @throws ExpiredTokenException * @throws IDPCommunicationErrorException * @throws AmazonClientException If any internal errors are encountered inside the client while * attempting to make the request or handle the response. For example if a network connection * is not available. * @throws AmazonServiceException If an error response is returned by AWSSecurityTokenService * indicating either a problem with the data in the request, or a server side issue. */ public AssumeRoleWithWebIdentityResult assumeRoleWithWebIdentity( AssumeRoleWithWebIdentityRequest assumeRoleWithWebIdentityRequest) { ExecutionContext executionContext = createExecutionContext(assumeRoleWithWebIdentityRequest); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); Request<AssumeRoleWithWebIdentityRequest> request = null; Response<AssumeRoleWithWebIdentityResult> response = null; awsRequestMetrics.startEvent(Field.ClientExecuteTime); try { request = new AssumeRoleWithWebIdentityRequestMarshaller() .marshall(assumeRoleWithWebIdentityRequest); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); response = invoke(request, new AssumeRoleWithWebIdentityResultStaxUnmarshaller(), executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } }
private <X, Y extends AmazonWebServiceRequest> Response<X> invoke( Request<Y> request, Unmarshaller<X, StaxUnmarshallerContext> unmarshaller, ExecutionContext executionContext) { request.setEndpoint(endpoint); request.setTimeOffset(timeOffset); AmazonWebServiceRequest originalRequest = request.getOriginalRequest(); AWSCredentials credentials = awsCredentialsProvider.getCredentials(); if (originalRequest.getRequestCredentials() != null) { credentials = originalRequest.getRequestCredentials(); } executionContext.setCredentials(credentials); StaxResponseHandler<X> responseHandler = new StaxResponseHandler<X>(unmarshaller); DefaultErrorResponseHandler errorResponseHandler = new DefaultErrorResponseHandler(exceptionUnmarshallers); return client.execute(request, responseHandler, errorResponseHandler, executionContext); }
public static Object element(Object arg, ExecutionContext context) throws FunctionDomainException, TypeMismatchException { if (arg == null || arg == QueryService.UNDEFINED) return QueryService.UNDEFINED; if (arg instanceof Collection) { Collection c = (Collection) arg; // for remote distinct queries, the result of sub query could contain a // mix of String and PdxString which could be duplicates, so convert all // PdxStrings to String if (context.isDistinct() && ((DefaultQuery) context.getQuery()).isRemoteQuery()) { Set tempResults = new HashSet(); for (Object o : c) { if (o instanceof PdxString) { o = ((PdxString) o).toString(); } tempResults.add(o); } c.clear(); c.addAll(tempResults); tempResults = null; } checkSingleton(c.size()); return c.iterator().next(); } // not a Collection, must be an array Class clazz = arg.getClass(); if (!clazz.isArray()) throw new TypeMismatchException( LocalizedStrings.Functions_THE_ELEMENT_FUNCTION_CANNOT_BE_APPLIED_TO_AN_OBJECT_OF_TYPE_0 .toLocalizedString(clazz.getName())); // handle arrays if (arg instanceof Object[]) { Object[] a = (Object[]) arg; if (((DefaultQuery) context.getQuery()).isRemoteQuery() && context.isDistinct()) { for (int i = 0; i < a.length; i++) { if (a[i] instanceof PdxString) { a[i] = ((PdxString) a[i]).toString(); } } } checkSingleton(a.length); return a[0]; } if (arg instanceof int[]) { int[] a = (int[]) arg; checkSingleton(a.length); return Integer.valueOf(a[0]); } if (arg instanceof long[]) { long[] a = (long[]) arg; checkSingleton(a.length); return Long.valueOf(a[0]); } if (arg instanceof boolean[]) { boolean[] a = (boolean[]) arg; checkSingleton(a.length); return Boolean.valueOf(a[0]); } if (arg instanceof byte[]) { byte[] a = (byte[]) arg; checkSingleton(a.length); return Byte.valueOf(a[0]); } if (arg instanceof char[]) { char[] a = (char[]) arg; checkSingleton(a.length); return new Character(a[0]); } if (arg instanceof double[]) { double[] a = (double[]) arg; checkSingleton(a.length); return Double.valueOf(a[0]); } if (arg instanceof float[]) { float[] a = (float[]) arg; checkSingleton(a.length); return new Float(a[0]); } if (arg instanceof short[]) { short[] a = (short[]) arg; checkSingleton(a.length); return new Short(a[0]); } // did I miss something? throw new TypeMismatchException( LocalizedStrings.Functions_THE_ELEMENT_FUNCTION_CANNOT_BE_APPLIED_TO_AN_OBJECT_OF_TYPE_0 .toLocalizedString(clazz.getName())); }
@Override public ExecutionResult execute(ExecutionContext executionContext) throws ProcessExecutionException, InterruptedException { try { StopWatch stopWatch = new StopWatch(); stopWatch.start(); log.info("Starting Kmer Counting on all Reads"); // Create shortcut to args for convienience Args args = this.getArgs(); // Force run parallel to false if not using a scheduler if (!executionContext.usingScheduler() && args.isRunParallel()) { log.warn("Forcing linear execution due to lack of job scheduler"); args.setRunParallel(false); } // Create the output directory args.getOutputDir().mkdirs(); JobOutputMap jfCountOutputs = new JobOutputMap(); List<ExecutionResult> jobResults = new ArrayList<>(); List<ExecutionResult> allJobResults = new ArrayList<>(); // Create the output directory for the RAW datasets File rawOutputDir = new File(args.getOutputDir(), "raw"); if (!rawOutputDir.exists()) { rawOutputDir.mkdirs(); } // Start jellyfish on all RAW datasets for (Library lib : args.getAllLibraries()) { // Execute jellyfish and add id to list of job ids JobOutput jfOut = this.executeJellyfishCount(args, "raw", args.getOutputDir(), lib); jobResults.add(jfOut.getResult()); allJobResults.add(jfOut.getResult()); jfCountOutputs.updateTracker("raw", jfOut.getOutputFile()); } // Also start jellyfish on all the prep-processed libraries from MECQ if (args.getAllMecqs() != null) { for (Mecq.EcqArgs ecqArgs : args.getAllMecqs()) { // Create the output directory for the RAW datasets File ecqOutputDir = new File(args.getOutputDir(), ecqArgs.getName()); if (!ecqOutputDir.exists()) { ecqOutputDir.mkdirs(); } for (Library lib : ecqArgs.getOutputLibraries()) { // Add jellyfish id to list of job ids JobOutput jfOut = this.executeJellyfishCount(args, ecqArgs.getName(), args.getOutputDir(), lib); jobResults.add(jfOut.getResult()); allJobResults.add(jfOut.getResult()); jfCountOutputs.updateTracker(ecqArgs.getName(), jfOut.getOutputFile()); } } } // If we're using a scheduler and we have been asked to run each job // in parallel, then we should wait for all those to complete before continueing. if (executionContext.usingScheduler() && args.isRunParallel()) { log.info("Kmer counting all ECQ groups in parallel, waiting for completion"); this.conanExecutorService.executeScheduledWait( jobResults, args.getJobPrefix() + "-count-*", ExitStatus.Type.COMPLETED_ANY, args.getJobPrefix() + "-kmer-count-wait", args.getOutputDir()); } // Waiting point... clear job ids. jobResults.clear(); JobOutputMap mergedOutputs = new JobOutputMap(); // Now execute merge jobs if required for (Map.Entry<String, Set<File>> entry : jfCountOutputs.entrySet()) { String ecqName = entry.getKey(); Set<File> fileSet = entry.getValue(); // Only merge if there's more than one library if (fileSet.size() > 1) { JobOutput jfOut = this.executeJellyfishMerger( args, ecqName, fileSet, new File(args.getOutputDir(), ecqName)); jobResults.add(jfOut.getResult()); allJobResults.add(jfOut.getResult()); mergedOutputs.updateTracker(ecqName, jfOut.getOutputFile()); } } // If we're using a scheduler and we have been asked to run each job // in parallel, then we should wait for all those to complete before continueing. if (executionContext.usingScheduler() && args.isRunParallel()) { log.info( "Creating merged kmer counts for all ECQ groups in parallel, waiting for completion"); this.conanExecutorService.executeScheduledWait( jobResults, args.getJobPrefix() + "-merge-*", ExitStatus.Type.COMPLETED_ANY, args.getJobPrefix() + "-kmer-merge-wait", args.getOutputDir()); } // Waiting point... clear job ids. jobResults.clear(); // Combine all jellyfish out maps jfCountOutputs.combine(mergedOutputs); String katGcpJobPrefix = args.getJobPrefix() + "-kat-gcp"; // Run KAT GCP on everything List<ExecutionResult> katGcpResults = this.executeKatGcp( jfCountOutputs, katGcpJobPrefix, args.getThreadsPerProcess(), args.getMemoryPerProcess(), args.isRunParallel()); for (ExecutionResult result : katGcpResults) { result.setName(result.getName().substring(args.getJobPrefix().length() + 1)); jobResults.add(result); allJobResults.add(result); } // If we're using a scheduler and we have been asked to run each job // in parallel, then we should wait for all those to complete before continueing. if (executionContext.usingScheduler() && args.isRunParallel()) { log.info("Running \"kat gcp\" for all ECQ groups in parallel, waiting for completion"); this.conanExecutorService.executeScheduledWait( jobResults, katGcpJobPrefix + "*", ExitStatus.Type.COMPLETED_ANY, args.getJobPrefix() + "-kat-gcp-wait", args.getOutputDir()); } // Waiting point... clear job ids. jobResults.clear(); log.info("Kmer counting of all reads finished."); stopWatch.stop(); TaskResult taskResult = new DefaultTaskResult( "rampart-read_analysis-kmer", true, allJobResults, stopWatch.getTime() / 1000L); // Output the resource usage to file FileUtils.writeLines( new File(args.getOutputDir(), args.getJobPrefix() + ".summary"), taskResult.getOutput()); return new DefaultExecutionResult( taskResult.getTaskName(), 0, new String[] {}, null, -1, new ResourceUsage( taskResult.getMaxMemUsage(), taskResult.getActualTotalRuntime(), taskResult.getTotalExternalCputime())); } catch (ConanParameterException | IOException e) { throw new ProcessExecutionException(-1, e); } }
public DispatcherResult dispatch( final ExecutionContext context, final ExecutionItem item, final Dispatchable toDispatch) throws DispatcherException { final NodesSelector nodesSelector = context.getNodeSelector(); INodeSet nodes = null; try { nodes = framework.filterAuthorizedNodes( context.getFrameworkProject(), new HashSet<String>(Arrays.asList("read", "run")), framework.filterNodeSet( nodesSelector, context.getFrameworkProject(), context.getNodesFile())); } catch (NodeFileParserException e) { throw new DispatcherException(e); } if (nodes.getNodes().size() < 1) { throw new DispatcherException("No nodes matched"); } boolean keepgoing = context.isKeepgoing(); context .getExecutionListener() .log(4, "preparing for sequential execution on " + nodes.getNodes().size() + " nodes"); final HashSet<String> nodeNames = new HashSet<String>(nodes.getNodeNames()); final HashMap<String, Object> failures = new HashMap<String, Object>(); FailedNodesListener failedListener = context.getExecutionListener().getFailedNodesListener(); if (null != failedListener) { failedListener.matchedNodes(nodeNames); } boolean interrupted = false; final Thread thread = Thread.currentThread(); boolean success = true; final HashMap<String, StatusResult> resultMap = new HashMap<String, StatusResult>(); final Collection<INodeEntry> nodes1 = nodes.getNodes(); // reorder based on configured rank property and order final String rankProperty = null != context.getNodeRankAttribute() ? context.getNodeRankAttribute() : "nodename"; final boolean rankAscending = context.isNodeRankOrderAscending(); final INodeEntryComparator comparator = new INodeEntryComparator(rankProperty); final TreeSet<INodeEntry> orderedNodes = new TreeSet<INodeEntry>(rankAscending ? comparator : Collections.reverseOrder(comparator)); orderedNodes.addAll(nodes1); for (final Object node1 : orderedNodes) { if (thread.isInterrupted() || thread instanceof ExecutionServiceThread && ((ExecutionServiceThread) thread).isAborted()) { interrupted = true; break; } final INodeEntry node = (INodeEntry) node1; context .getExecutionListener() .log( Constants.DEBUG_LEVEL, "Executing command on node: " + node.getNodename() + ", " + node.toString()); try { if (thread.isInterrupted() || thread instanceof ExecutionServiceThread && ((ExecutionServiceThread) thread).isAborted()) { interrupted = true; break; } final StatusResult result; final ExecutionContext interimcontext = new ExecutionContextImpl.Builder(context) .nodeSelector(SelectorUtils.singleNode(node.getNodename())) .build(); if (null != item) { result = framework.getExecutionService().interpretCommand(interimcontext, item, node); } else { result = toDispatch.dispatch(interimcontext, node); } if (null != result) { resultMap.put(node.getNodename(), result); } if (null == result || !result.isSuccess()) { success = false; // context.getExecutionListener().log(Constants.ERR_LEVEL, // "Failed execution for node " + node.getNodename() + ": " + // result); if (null != result) { failures.put(node.getNodename(), result); } else { failures.put(node.getNodename(), "Failed execution, result was null"); } if (!keepgoing) { break; } } else { nodeNames.remove(node.getNodename()); } } catch (Throwable e) { success = false; failures.put( node.getNodename(), "Error dispatching command to the node: " + e.getMessage()); context .getExecutionListener() .log( Constants.ERR_LEVEL, "Failed dispatching to node " + node.getNodename() + ": " + e.getMessage()); final StringWriter stringWriter = new StringWriter(); e.printStackTrace(new PrintWriter(stringWriter)); context .getExecutionListener() .log( Constants.DEBUG_LEVEL, "Failed dispatching to node " + node.getNodename() + ": " + stringWriter.toString()); if (!keepgoing) { if (failures.size() > 0 && null != failedListener) { // tell listener of failed node list failedListener.nodesFailed(failures); } throw new DispatcherException( "Failed dispatching to node " + node.getNodename() + ": " + e.getMessage(), e, node); } } } if (keepgoing && nodeNames.size() > 0) { if (null != failedListener) { // tell listener of failed node list failedListener.nodesFailed(failures); } // now fail // XXX: needs to change from exception throw new NodesetFailureException(failures); } else if (null != failedListener && failures.isEmpty() && !interrupted) { failedListener.nodesSucceeded(); } if (interrupted) { throw new DispatcherException("Node dispatch interrupted"); } final boolean status = success; return new DispatcherResult() { public Map<String, ? extends StatusResult> getResults() { return resultMap; } public boolean isSuccess() { return status; } @Override public String toString() { return "DispatcherResult{" + "status=" + isSuccess() + ", " + "results=" + getResults() + "}"; } }; }