/**
 * Client for accessing AWS CloudFormation. All service calls made using this client are blocking,
 * and will not return until the service call completes.
 *
 * <p><fullname>AWS CloudFormation</fullname>
 *
 * <p>AWS CloudFormation enables you to create and manage AWS infrastructure deployments predictably
 * and repeatedly. AWS CloudFormation helps you leverage AWS products such as Amazon EC2, EBS,
 * Amazon SNS, ELB, and Auto Scaling to build highly-reliable, highly scalable, cost effective
 * applications without worrying about creating and configuring the underlying AWS infrastructure.
 *
 * <p>With AWS CloudFormation, you declare all of your resources and dependencies in a template
 * file. The template defines a collection of resources as a single unit called a stack. AWS
 * CloudFormation creates and deletes all member resources of the stack together and manages all
 * dependencies between the resources for you.
 *
 * <p>For more information about this product, go to the <a
 * href="http://aws.amazon.com/cloudformation/">CloudFormation Product Page</a>.
 *
 * <p>Amazon CloudFormation makes use of other AWS products. If you need additional technical
 * information about a specific AWS product, you can find the product's technical documentation at
 * <a href="http://aws.amazon.com/documentation/" >http://aws.amazon.com/documentation/</a>.
 */
public class AmazonCloudFormationClient extends AmazonWebServiceClient
    implements AmazonCloudFormation {
  /** Provider for AWS credentials. */
  private AWSCredentialsProvider awsCredentialsProvider;

  private static final Log log = LogFactory.getLog(AmazonCloudFormation.class);

  /** Default signing name for the service. */
  private static final String DEFAULT_SIGNING_NAME = "cloudformation";

  /** The region metadata service name for computing region endpoints. */
  private static final String DEFAULT_ENDPOINT_PREFIX = "cloudformation";

  /** List of exception unmarshallers for all AWS CloudFormation exceptions. */
  protected final List<Unmarshaller<AmazonServiceException, Node>> exceptionUnmarshallers =
      new ArrayList<Unmarshaller<AmazonServiceException, Node>>();

  /**
   * Constructs a new client to invoke service methods on AWS CloudFormation. A credentials provider
   * chain will be used that searches for credentials in this order:
   *
   * <ul>
   *   <li>Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
   *   <li>Java System Properties - aws.accessKeyId and aws.secretKey
   *   <li>Instance profile credentials delivered through the Amazon EC2 metadata service
   * </ul>
   *
   * <p>All service calls made using this new client object are blocking, and will not return until
   * the service call completes.
   *
   * @see DefaultAWSCredentialsProviderChain
   */
  public AmazonCloudFormationClient() {
    this(
        new DefaultAWSCredentialsProviderChain(),
        com.amazonaws.PredefinedClientConfigurations.defaultConfig());
  }

  /**
   * Constructs a new client to invoke service methods on AWS CloudFormation. A credentials provider
   * chain will be used that searches for credentials in this order:
   *
   * <ul>
   *   <li>Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
   *   <li>Java System Properties - aws.accessKeyId and aws.secretKey
   *   <li>Instance profile credentials delivered through the Amazon EC2 metadata service
   * </ul>
   *
   * <p>All service calls made using this new client object are blocking, and will not return until
   * the service call completes.
   *
   * @param clientConfiguration The client configuration options controlling how this client
   *     connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.).
   * @see DefaultAWSCredentialsProviderChain
   */
  public AmazonCloudFormationClient(ClientConfiguration clientConfiguration) {
    this(new DefaultAWSCredentialsProviderChain(), clientConfiguration);
  }

  /**
   * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS
   * account credentials.
   *
   * <p>All service calls made using this new client object are blocking, and will not return until
   * the service call completes.
   *
   * @param awsCredentials The AWS credentials (access key ID and secret key) to use when
   *     authenticating with AWS services.
   */
  public AmazonCloudFormationClient(AWSCredentials awsCredentials) {
    this(awsCredentials, com.amazonaws.PredefinedClientConfigurations.defaultConfig());
  }

  /**
   * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS
   * account credentials and client configuration options.
   *
   * <p>All service calls made using this new client object are blocking, and will not return until
   * the service call completes.
   *
   * @param awsCredentials The AWS credentials (access key ID and secret key) to use when
   *     authenticating with AWS services.
   * @param clientConfiguration The client configuration options controlling how this client
   *     connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.).
   */
  public AmazonCloudFormationClient(
      AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) {
    super(clientConfiguration);
    this.awsCredentialsProvider = new StaticCredentialsProvider(awsCredentials);
    init();
  }

  /**
   * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS
   * account credentials provider.
   *
   * <p>All service calls made using this new client object are blocking, and will not return until
   * the service call completes.
   *
   * @param awsCredentialsProvider The AWS credentials provider which will provide credentials to
   *     authenticate requests with AWS services.
   */
  public AmazonCloudFormationClient(AWSCredentialsProvider awsCredentialsProvider) {
    this(awsCredentialsProvider, com.amazonaws.PredefinedClientConfigurations.defaultConfig());
  }

  /**
   * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS
   * account credentials provider and client configuration options.
   *
   * <p>All service calls made using this new client object are blocking, and will not return until
   * the service call completes.
   *
   * @param awsCredentialsProvider The AWS credentials provider which will provide credentials to
   *     authenticate requests with AWS services.
   * @param clientConfiguration The client configuration options controlling how this client
   *     connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.).
   */
  public AmazonCloudFormationClient(
      AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) {
    this(awsCredentialsProvider, clientConfiguration, null);
  }

  /**
   * Constructs a new client to invoke service methods on AWS CloudFormation using the specified AWS
   * account credentials provider, client configuration options, and request metric collector.
   *
   * <p>All service calls made using this new client object are blocking, and will not return until
   * the service call completes.
   *
   * @param awsCredentialsProvider The AWS credentials provider which will provide credentials to
   *     authenticate requests with AWS services.
   * @param clientConfiguration The client configuration options controlling how this client
   *     connects to AWS CloudFormation (ex: proxy settings, retry counts, etc.).
   * @param requestMetricCollector optional request metric collector
   */
  public AmazonCloudFormationClient(
      AWSCredentialsProvider awsCredentialsProvider,
      ClientConfiguration clientConfiguration,
      RequestMetricCollector requestMetricCollector) {
    super(clientConfiguration, requestMetricCollector);
    this.awsCredentialsProvider = awsCredentialsProvider;
    init();
  }

  private void init() {
    exceptionUnmarshallers.add(new LimitExceededExceptionUnmarshaller());
    exceptionUnmarshallers.add(new AlreadyExistsExceptionUnmarshaller());
    exceptionUnmarshallers.add(new InsufficientCapabilitiesExceptionUnmarshaller());
    exceptionUnmarshallers.add(new StandardErrorUnmarshaller());

    setServiceNameIntern(DEFAULT_SIGNING_NAME);
    setEndpointPrefix(DEFAULT_ENDPOINT_PREFIX);
    // calling this.setEndPoint(...) will also modify the signer accordingly
    this.setEndpoint("https://cloudformation.us-east-1.amazonaws.com");
    HandlerChainFactory chainFactory = new HandlerChainFactory();
    requestHandler2s.addAll(
        chainFactory.newRequestHandlerChain(
            "/com/amazonaws/services/cloudformation/request.handlers"));
    requestHandler2s.addAll(
        chainFactory.newRequestHandler2Chain(
            "/com/amazonaws/services/cloudformation/request.handler2s"));
  }

  /**
   * Cancels an update on the specified stack. If the call completes successfully, the stack rolls
   * back the update and reverts to the previous stack configuration. <note>You can cancel only
   * stacks that are in the UPDATE_IN_PROGRESS state.</note>
   *
   * @param cancelUpdateStackRequest The input for the <a>CancelUpdateStack</a> action.
   * @sample AmazonCloudFormation.CancelUpdateStack
   */
  @Override
  public void cancelUpdateStack(CancelUpdateStackRequest cancelUpdateStackRequest) {
    ExecutionContext executionContext = createExecutionContext(cancelUpdateStackRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<CancelUpdateStackRequest> request = null;
    Response<Void> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new CancelUpdateStackRequestMarshaller()
                .marshall(super.beforeMarshalling(cancelUpdateStackRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null);
      invoke(request, responseHandler, executionContext);

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Creates a stack as specified in the template. After the call completes successfully, the stack
   * creation starts. You can check the status of the stack via the <a>DescribeStacks</a> API.
   *
   * @param createStackRequest The input for <a>CreateStack</a> action.
   * @return Result of the CreateStack operation returned by the service.
   * @throws LimitExceededException Quota for the resource has already been reached.
   * @throws AlreadyExistsException Resource with the name requested already exists.
   * @throws InsufficientCapabilitiesException The template contains resources with capabilities
   *     that were not specified in the Capabilities parameter.
   * @sample AmazonCloudFormation.CreateStack
   */
  @Override
  public CreateStackResult createStack(CreateStackRequest createStackRequest) {
    ExecutionContext executionContext = createExecutionContext(createStackRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<CreateStackRequest> request = null;
    Response<CreateStackResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new CreateStackRequestMarshaller()
                .marshall(super.beforeMarshalling(createStackRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<CreateStackResult> responseHandler =
          new StaxResponseHandler<CreateStackResult>(new CreateStackResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted
   * stacks do not show up in the <a>DescribeStacks</a> API if the deletion has been completed
   * successfully.
   *
   * @param deleteStackRequest The input for <a>DeleteStack</a> action.
   * @sample AmazonCloudFormation.DeleteStack
   */
  @Override
  public void deleteStack(DeleteStackRequest deleteStackRequest) {
    ExecutionContext executionContext = createExecutionContext(deleteStackRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<DeleteStackRequest> request = null;
    Response<Void> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new DeleteStackRequestMarshaller()
                .marshall(super.beforeMarshalling(deleteStackRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null);
      invoke(request, responseHandler, executionContext);

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Retrieves your account's AWS CloudFormation limits, such as the maximum number of stacks that
   * you can create in your account.
   *
   * @param describeAccountLimitsRequest The input for the <a>DescribeAccountLimits</a> action.
   * @return Result of the DescribeAccountLimits operation returned by the service.
   * @sample AmazonCloudFormation.DescribeAccountLimits
   */
  @Override
  public DescribeAccountLimitsResult describeAccountLimits(
      DescribeAccountLimitsRequest describeAccountLimitsRequest) {
    ExecutionContext executionContext = createExecutionContext(describeAccountLimitsRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<DescribeAccountLimitsRequest> request = null;
    Response<DescribeAccountLimitsResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new DescribeAccountLimitsRequestMarshaller()
                .marshall(super.beforeMarshalling(describeAccountLimitsRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<DescribeAccountLimitsResult> responseHandler =
          new StaxResponseHandler<DescribeAccountLimitsResult>(
              new DescribeAccountLimitsResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Returns all stack related events for a specified stack. For more information about a stack's
   * event history, go to <a href=
   * "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/concept-stack.html" >Stacks</a>
   * in the AWS CloudFormation User Guide. <note>You can list events for stacks that have failed to
   * create or have been deleted by specifying the unique stack identifier (stack ID).</note>
   *
   * @param describeStackEventsRequest The input for <a>DescribeStackEvents</a> action.
   * @return Result of the DescribeStackEvents operation returned by the service.
   * @sample AmazonCloudFormation.DescribeStackEvents
   */
  @Override
  public DescribeStackEventsResult describeStackEvents(
      DescribeStackEventsRequest describeStackEventsRequest) {
    ExecutionContext executionContext = createExecutionContext(describeStackEventsRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<DescribeStackEventsRequest> request = null;
    Response<DescribeStackEventsResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new DescribeStackEventsRequestMarshaller()
                .marshall(super.beforeMarshalling(describeStackEventsRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<DescribeStackEventsResult> responseHandler =
          new StaxResponseHandler<DescribeStackEventsResult>(
              new DescribeStackEventsResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Returns a description of the specified resource in the specified stack.
   *
   * <p>For deleted stacks, DescribeStackResource returns resource information for up to 90 days
   * after the stack has been deleted.
   *
   * @param describeStackResourceRequest The input for <a>DescribeStackResource</a> action.
   * @return Result of the DescribeStackResource operation returned by the service.
   * @sample AmazonCloudFormation.DescribeStackResource
   */
  @Override
  public DescribeStackResourceResult describeStackResource(
      DescribeStackResourceRequest describeStackResourceRequest) {
    ExecutionContext executionContext = createExecutionContext(describeStackResourceRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<DescribeStackResourceRequest> request = null;
    Response<DescribeStackResourceResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new DescribeStackResourceRequestMarshaller()
                .marshall(super.beforeMarshalling(describeStackResourceRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<DescribeStackResourceResult> responseHandler =
          new StaxResponseHandler<DescribeStackResourceResult>(
              new DescribeStackResourceResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Returns AWS resource descriptions for running and deleted stacks. If <code>StackName</code> is
   * specified, all the associated resources that are part of the stack are returned. If <code>
   * PhysicalResourceId</code> is specified, the associated resources of the stack that the resource
   * belongs to are returned. <note>Only the first 100 resources will be returned. If your stack has
   * more resources than this, you should use <code>ListStackResources</code> instead.</note>
   *
   * <p>For deleted stacks, <code>DescribeStackResources</code> returns resource information for up
   * to 90 days after the stack has been deleted.
   *
   * <p>You must specify either <code>StackName</code> or <code>PhysicalResourceId</code>, but not
   * both. In addition, you can specify <code>LogicalResourceId</code> to filter the returned
   * result. For more information about resources, the <code>LogicalResourceId</code> and <code>
   * PhysicalResourceId</code>, go to the <a
   * href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide">AWS CloudFormation User
   * Guide</a>. <note>A <code>ValidationError</code> is returned if you specify both <code>StackName
   * </code> and <code>PhysicalResourceId</code> in the same request.</note>
   *
   * @param describeStackResourcesRequest The input for <a>DescribeStackResources</a> action.
   * @return Result of the DescribeStackResources operation returned by the service.
   * @sample AmazonCloudFormation.DescribeStackResources
   */
  @Override
  public DescribeStackResourcesResult describeStackResources(
      DescribeStackResourcesRequest describeStackResourcesRequest) {
    ExecutionContext executionContext = createExecutionContext(describeStackResourcesRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<DescribeStackResourcesRequest> request = null;
    Response<DescribeStackResourcesResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new DescribeStackResourcesRequestMarshaller()
                .marshall(super.beforeMarshalling(describeStackResourcesRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<DescribeStackResourcesResult> responseHandler =
          new StaxResponseHandler<DescribeStackResourcesResult>(
              new DescribeStackResourcesResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Returns the description for the specified stack; if no stack name was specified, then it
   * returns the description for all the stacks created.
   *
   * @param describeStacksRequest The input for <a>DescribeStacks</a> action.
   * @return Result of the DescribeStacks operation returned by the service.
   * @sample AmazonCloudFormation.DescribeStacks
   */
  @Override
  public DescribeStacksResult describeStacks(DescribeStacksRequest describeStacksRequest) {
    ExecutionContext executionContext = createExecutionContext(describeStacksRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<DescribeStacksRequest> request = null;
    Response<DescribeStacksResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new DescribeStacksRequestMarshaller()
                .marshall(super.beforeMarshalling(describeStacksRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<DescribeStacksResult> responseHandler =
          new StaxResponseHandler<DescribeStacksResult>(new DescribeStacksResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  @Override
  public DescribeStacksResult describeStacks() {
    return describeStacks(new DescribeStacksRequest());
  }

  /**
   * Returns the estimated monthly cost of a template. The return value is an AWS Simple Monthly
   * Calculator URL with a query string that describes the resources required to run the template.
   *
   * @param estimateTemplateCostRequest
   * @return Result of the EstimateTemplateCost operation returned by the service.
   * @sample AmazonCloudFormation.EstimateTemplateCost
   */
  @Override
  public EstimateTemplateCostResult estimateTemplateCost(
      EstimateTemplateCostRequest estimateTemplateCostRequest) {
    ExecutionContext executionContext = createExecutionContext(estimateTemplateCostRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<EstimateTemplateCostRequest> request = null;
    Response<EstimateTemplateCostResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new EstimateTemplateCostRequestMarshaller()
                .marshall(super.beforeMarshalling(estimateTemplateCostRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<EstimateTemplateCostResult> responseHandler =
          new StaxResponseHandler<EstimateTemplateCostResult>(
              new EstimateTemplateCostResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  @Override
  public EstimateTemplateCostResult estimateTemplateCost() {
    return estimateTemplateCost(new EstimateTemplateCostRequest());
  }

  /**
   * Returns the stack policy for a specified stack. If a stack doesn't have a policy, a null value
   * is returned.
   *
   * @param getStackPolicyRequest The input for the <a>GetStackPolicy</a> action.
   * @return Result of the GetStackPolicy operation returned by the service.
   * @sample AmazonCloudFormation.GetStackPolicy
   */
  @Override
  public GetStackPolicyResult getStackPolicy(GetStackPolicyRequest getStackPolicyRequest) {
    ExecutionContext executionContext = createExecutionContext(getStackPolicyRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<GetStackPolicyRequest> request = null;
    Response<GetStackPolicyResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new GetStackPolicyRequestMarshaller()
                .marshall(super.beforeMarshalling(getStackPolicyRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<GetStackPolicyResult> responseHandler =
          new StaxResponseHandler<GetStackPolicyResult>(new GetStackPolicyResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Returns the template body for a specified stack. You can get the template for running or
   * deleted stacks.
   *
   * <p>For deleted stacks, GetTemplate returns the template for up to 90 days after the stack has
   * been deleted. <note> If the template does not exist, a <code>ValidationError</code> is
   * returned. </note>
   *
   * @param getTemplateRequest The input for a <a>GetTemplate</a> action.
   * @return Result of the GetTemplate operation returned by the service.
   * @sample AmazonCloudFormation.GetTemplate
   */
  @Override
  public GetTemplateResult getTemplate(GetTemplateRequest getTemplateRequest) {
    ExecutionContext executionContext = createExecutionContext(getTemplateRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<GetTemplateRequest> request = null;
    Response<GetTemplateResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new GetTemplateRequestMarshaller()
                .marshall(super.beforeMarshalling(getTemplateRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<GetTemplateResult> responseHandler =
          new StaxResponseHandler<GetTemplateResult>(new GetTemplateResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Returns information about a new or existing template. The <code>GetTemplateSummary</code>
   * action is useful for viewing parameter information, such as default parameter values and
   * parameter types, before you create or update a stack.
   *
   * <p>You can use the <code>GetTemplateSummary</code> action when you submit a template, or you
   * can get template information for a running or deleted stack.
   *
   * <p>For deleted stacks, <code>GetTemplateSummary</code> returns the template information for up
   * to 90 days after the stack has been deleted. If the template does not exist, a <code>
   * ValidationError</code> is returned.
   *
   * @param getTemplateSummaryRequest The input for the <a>GetTemplateSummary</a> action.
   * @return Result of the GetTemplateSummary operation returned by the service.
   * @sample AmazonCloudFormation.GetTemplateSummary
   */
  @Override
  public GetTemplateSummaryResult getTemplateSummary(
      GetTemplateSummaryRequest getTemplateSummaryRequest) {
    ExecutionContext executionContext = createExecutionContext(getTemplateSummaryRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<GetTemplateSummaryRequest> request = null;
    Response<GetTemplateSummaryResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new GetTemplateSummaryRequestMarshaller()
                .marshall(super.beforeMarshalling(getTemplateSummaryRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<GetTemplateSummaryResult> responseHandler =
          new StaxResponseHandler<GetTemplateSummaryResult>(
              new GetTemplateSummaryResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  @Override
  public GetTemplateSummaryResult getTemplateSummary() {
    return getTemplateSummary(new GetTemplateSummaryRequest());
  }

  /**
   * Returns descriptions of all resources of the specified stack.
   *
   * <p>For deleted stacks, ListStackResources returns resource information for up to 90 days after
   * the stack has been deleted.
   *
   * @param listStackResourcesRequest The input for the <a>ListStackResource</a> action.
   * @return Result of the ListStackResources operation returned by the service.
   * @sample AmazonCloudFormation.ListStackResources
   */
  @Override
  public ListStackResourcesResult listStackResources(
      ListStackResourcesRequest listStackResourcesRequest) {
    ExecutionContext executionContext = createExecutionContext(listStackResourcesRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<ListStackResourcesRequest> request = null;
    Response<ListStackResourcesResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new ListStackResourcesRequestMarshaller()
                .marshall(super.beforeMarshalling(listStackResourcesRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<ListStackResourcesResult> responseHandler =
          new StaxResponseHandler<ListStackResourcesResult>(
              new ListStackResourcesResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Returns the summary information for stacks whose status matches the specified
   * StackStatusFilter. Summary information for stacks that have been deleted is kept for 90 days
   * after the stack is deleted. If no StackStatusFilter is specified, summary information for all
   * stacks is returned (including existing stacks and stacks that have been deleted).
   *
   * @param listStacksRequest The input for <a>ListStacks</a> action.
   * @return Result of the ListStacks operation returned by the service.
   * @sample AmazonCloudFormation.ListStacks
   */
  @Override
  public ListStacksResult listStacks(ListStacksRequest listStacksRequest) {
    ExecutionContext executionContext = createExecutionContext(listStacksRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<ListStacksRequest> request = null;
    Response<ListStacksResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new ListStacksRequestMarshaller().marshall(super.beforeMarshalling(listStacksRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<ListStacksResult> responseHandler =
          new StaxResponseHandler<ListStacksResult>(new ListStacksResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  @Override
  public ListStacksResult listStacks() {
    return listStacks(new ListStacksRequest());
  }

  /**
   * Sets a stack policy for a specified stack.
   *
   * @param setStackPolicyRequest The input for the <a>SetStackPolicy</a> action.
   * @sample AmazonCloudFormation.SetStackPolicy
   */
  @Override
  public void setStackPolicy(SetStackPolicyRequest setStackPolicyRequest) {
    ExecutionContext executionContext = createExecutionContext(setStackPolicyRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<SetStackPolicyRequest> request = null;
    Response<Void> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new SetStackPolicyRequestMarshaller()
                .marshall(super.beforeMarshalling(setStackPolicyRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null);
      invoke(request, responseHandler, executionContext);

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Sends a signal to the specified resource with a success or failure status. You can use the
   * SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation
   * doesn't proceed with a stack creation or update until resources receive the required number of
   * signals or the timeout period is exceeded. The SignalResource API is useful in cases where you
   * want to send signals from anywhere other than an Amazon EC2 instance.
   *
   * @param signalResourceRequest The input for the <a>SignalResource</a> action.
   * @sample AmazonCloudFormation.SignalResource
   */
  @Override
  public void signalResource(SignalResourceRequest signalResourceRequest) {
    ExecutionContext executionContext = createExecutionContext(signalResourceRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<SignalResourceRequest> request = null;
    Response<Void> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new SignalResourceRequestMarshaller()
                .marshall(super.beforeMarshalling(signalResourceRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<Void> responseHandler = new StaxResponseHandler<Void>(null);
      invoke(request, responseHandler, executionContext);

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Updates a stack as specified in the template. After the call completes successfully, the stack
   * update starts. You can check the status of the stack via the <a>DescribeStacks</a> action.
   *
   * <p>To get a copy of the template for an existing stack, you can use the <a>GetTemplate</a>
   * action.
   *
   * <p>Tags that were associated with this stack during creation time will still be associated with
   * the stack after an <code>UpdateStack</code> operation.
   *
   * <p>For more information about creating an update template, updating a stack, and monitoring the
   * progress of the update, see <a href=
   * "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html"
   * >Updating a Stack</a>.
   *
   * @param updateStackRequest The input for <a>UpdateStack</a> action.
   * @return Result of the UpdateStack operation returned by the service.
   * @throws InsufficientCapabilitiesException The template contains resources with capabilities
   *     that were not specified in the Capabilities parameter.
   * @sample AmazonCloudFormation.UpdateStack
   */
  @Override
  public UpdateStackResult updateStack(UpdateStackRequest updateStackRequest) {
    ExecutionContext executionContext = createExecutionContext(updateStackRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<UpdateStackRequest> request = null;
    Response<UpdateStackResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new UpdateStackRequestMarshaller()
                .marshall(super.beforeMarshalling(updateStackRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<UpdateStackResult> responseHandler =
          new StaxResponseHandler<UpdateStackResult>(new UpdateStackResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Validates a specified template.
   *
   * @param validateTemplateRequest The input for <a>ValidateTemplate</a> action.
   * @return Result of the ValidateTemplate operation returned by the service.
   * @sample AmazonCloudFormation.ValidateTemplate
   */
  @Override
  public ValidateTemplateResult validateTemplate(ValidateTemplateRequest validateTemplateRequest) {
    ExecutionContext executionContext = createExecutionContext(validateTemplateRequest);
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(Field.ClientExecuteTime);
    Request<ValidateTemplateRequest> request = null;
    Response<ValidateTemplateResult> response = null;

    try {
      awsRequestMetrics.startEvent(Field.RequestMarshallTime);
      try {
        request =
            new ValidateTemplateRequestMarshaller()
                .marshall(super.beforeMarshalling(validateTemplateRequest));
        // Binds the request metrics to the current request.
        request.setAWSRequestMetrics(awsRequestMetrics);
      } finally {
        awsRequestMetrics.endEvent(Field.RequestMarshallTime);
      }

      StaxResponseHandler<ValidateTemplateResult> responseHandler =
          new StaxResponseHandler<ValidateTemplateResult>(
              new ValidateTemplateResultStaxUnmarshaller());
      response = invoke(request, responseHandler, executionContext);

      return response.getAwsResponse();

    } finally {

      endClientExecution(awsRequestMetrics, request, response);
    }
  }

  /**
   * Returns additional metadata for a previously executed successful, request, typically used for
   * debugging issues where a service isn't acting as expected. This data isn't considered part of
   * the result data returned by an operation, so it's available through this separate, diagnostic
   * interface.
   *
   * <p>Response metadata is only cached for a limited period of time, so if you need to access this
   * extra diagnostic information for an executed request, you should use this method to retrieve it
   * as soon as possible after executing the request.
   *
   * @param request The originally executed request
   * @return The response metadata for the specified request, or null if none is available.
   */
  public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) {
    return client.getResponseMetadataForRequest(request);
  }

  private <X, Y extends AmazonWebServiceRequest> Response<X> invoke(
      Request<Y> request,
      HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler,
      ExecutionContext executionContext) {
    request.setEndpoint(endpoint);
    request.setTimeOffset(timeOffset);

    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    AWSCredentials credentials;
    awsRequestMetrics.startEvent(Field.CredentialsRequestTime);
    try {
      credentials = awsCredentialsProvider.getCredentials();
    } finally {
      awsRequestMetrics.endEvent(Field.CredentialsRequestTime);
    }

    AmazonWebServiceRequest originalRequest = request.getOriginalRequest();
    if (originalRequest != null && originalRequest.getRequestCredentials() != null) {
      credentials = originalRequest.getRequestCredentials();
    }

    executionContext.setCredentials(credentials);

    DefaultErrorResponseHandler errorResponseHandler =
        new DefaultErrorResponseHandler(exceptionUnmarshallers);

    return client.execute(request, responseHandler, errorResponseHandler, executionContext);
  }
}
예제 #2
0
/**
 * MDC server
 *
 * @author yjiang
 */
public abstract class MDCServer extends IoHandlerAdapter {

  static final Log log = LogFactory.getLog(MDCServer.class);

  static final AtomicInteger counter = new AtomicInteger(0);

  /** the the max size of a packet, 32KB */
  static int MAX_SIZE = 10240 * 1024; // test

  protected InetSocketAddress address;
  protected Selector selector;
  protected ServerSocketChannel server;
  protected final int PROCESS_NUMBER = 4;
  protected static Configuration _conf;

  protected IoAcceptor acceptor;
  protected boolean isRunning = false;

  protected boolean testKey() {
    String data = UID.random(24);
    byte[] bb = RSA.encode(data.getBytes(), TConn.pub_key);
    if (bb != null) {
      bb = RSA.decode(bb, TConn.pri_key);
      if (bb != null && data.equals(new String(bb))) {
        return true;
      }
    }

    return false;
  }

  /** Close. */
  public void close() {
    if (selector != null) {
      selector.wakeup();
      try {
        selector.close();
      } catch (IOException e1) {
        log.warn("close selector fails", e1);
      } finally {
        selector = null;
      }
    }

    if (server != null) {
      try {
        server.socket().close();
        server.close();
      } catch (IOException e) {
        log.warn("close socket server fails", e);
      } finally {
        server = null;
      }
    }
  }

  /**
   * Instantiates a new MDC server.
   *
   * @param host the host
   * @param port the port
   */
  protected MDCServer(String host, int port) {
    _conf = Config.getConfig();

    address = (host == null) ? new InetSocketAddress(port) : new InetSocketAddress(host, port);

    /** initialize app command */
    Command.init();

    /** initialize the connection center */
    TConnCenter.init(_conf, port);

    synchronized (_conf) {
      /** load public key from database */
      TConn.pub_key = SystemConfig.s("pub_key", null);
      TConn.pri_key = SystemConfig.s("pri_key", null);

      /** initialize the RSA key, hardcode 2048 bits */
      if (TConn.pub_key == null
          || TConn.pri_key == null
          || "".equals(TConn.pub_key)
          || "".equals(TConn.pri_key)) {
        /** print out the old state */
        log.warn(
            "the pub_key or pri_key missed, the old state are pub_key:["
                + TConn.pub_key
                + "], pri_key:["
                + TConn.pri_key
                + "]");

        Key k = RSA.generate(2048);
        TConn.pri_key = k.pri_key;
        TConn.pub_key = k.pub_key;

        /** print out the new public key */
        log.warn("create new RSA key pair, pub_key:[" + TConn.pub_key + ']');

        /** set back in database */
        SystemConfig.setConfig("pri_key", TConn.pri_key);
        SystemConfig.setConfig("pub_key", TConn.pub_key);
      }

      MAX_SIZE = SystemConfig.i("mdc.max_size", MAX_SIZE);
    }
  }

  /**
   * Start.
   *
   * @return the MDC server
   */
  public abstract MDCServer start();

  /** Stop. */
  public void stop() {
    acceptor.unbind();
  }

  /**
   * Service.
   *
   * @param o the o
   * @param session the session
   */
  void service(IoBuffer o, IoSession session) {
    try {
      // System.out.println(o.remaining() + "/" + o.capacity());

      session.setAttribute("last", System.currentTimeMillis());

      SimpleIoBuffer in = (SimpleIoBuffer) session.getAttribute("buf");
      if (in == null) {
        in = SimpleIoBuffer.create(4096);
        session.setAttribute("buf", in);
      }
      byte[] data = new byte[o.remaining()];
      o.get(data);
      in.append(data);

      // log.debug("recv: " + data.length + ", " +
      // session.getRemoteAddress());

      while (in.length() > 5) {
        in.mark();
        /**
         * Byte 1: head of the package<br>
         * bit 7-6: "01", indicator of MDC<br>
         * bit 5: encrypt indicator, "0": no; "1": encrypted<br>
         * bit 4: zip indicator, "0": no, "1": ziped<br>
         * bit 0-3: reserved<br>
         * Byte 2-5: length of data<br>
         * Byte[…]: data array<br>
         */
        byte head = in.read();
        /** test the head indicator, if not correct close it */
        if ((head & 0xC0) != 0x40) {
          log.info("flag is not correct! flag:" + head + ",from: " + session.getRemoteAddress());

          session.write("error.head");
          session.close(true);
          return;
        }

        int len = in.getInt();

        if (len <= 0 || len > MAX_SIZE) {
          log.error(
              "mdcserver.Wrong lendth: "
                  + len
                  + "/"
                  + MAX_SIZE
                  + " - "
                  + session.getRemoteAddress());
          session.write("error.packet.size");
          session.close(true);
          break;
        }

        // log.info("packet.len:" + len + ", len in buffer:" +
        // in.length());
        if (in.length() < len) {
          in.reset();
          break;
        } else {
          // do it

          byte[] b = new byte[len];
          in.read(b);

          // log.info("stub.package.size: " + len + ", head:" + head +
          // ", cmd:" + Bean.toString(b));
          // log.info("stub.package.size: " + len + ", head:" + head);

          /** test the zip flag */
          if ((head & 0x10) != 0) {
            b = Zip.unzip(b);
          }

          final TConn d = (TConn) session.getAttribute("conn");
          if (d != null) {
            /** test the encrypted flag */
            if ((head & 0x20) != 0) {
              b = DES.decode(b, d.deskey);
            }

            final byte[] bb = b;

            /** test if the packet is for mdc or app */
            new WorkerTask() {

              @Override
              public void onExecute() {
                d.process(bb);
              }
            }.schedule(0);

            session.setAttribute("last", System.currentTimeMillis());
          } else {
            session.write("error.getconnection");

            log.error("error to get connection: " + session.getRemoteAddress());
            session.close(true);
          }
        }
      }
    } catch (Throwable e) {
      log.error("closing stub: " + session.getRemoteAddress(), e);
      session.write("exception." + e.getMessage());
      session.close(true);
    }
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#sessionCreated(org.apache
   * .mina.core.session.IoSession)
   */
  public void sessionCreated(IoSession session) throws Exception {
    log.info("stub created:" + session.getRemoteAddress());

    Counter.add("mdc", "connection", 1);

    TConn d = new TConn(session);
    d.set("x-forwarded-for", session.getRemoteAddress().toString());

    session.setAttribute("conn", d);
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#sessionClosed(org.apache
   * .mina.core.session.IoSession)
   */
  public void sessionClosed(IoSession session) throws Exception {
    log.info("closed stub: " + session.getRemoteAddress());
    TConn d = (TConn) session.getAttribute("conn");
    if (d != null) {
      d.close();

      session.removeAttribute("conn");
    }
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#sessionIdle(org.apache.
   * mina.core.session.IoSession, org.apache.mina.core.session.IdleStatus)
   */
  public void sessionIdle(IoSession session, IdleStatus status) throws Exception {
    if (IdleStatus.BOTH_IDLE.equals(status)) {
      Long l = (Long) session.getAttribute("last");
      if (l != null && System.currentTimeMillis() - l > 60 * 1000) {
        session.close(true);
      }
    }
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#messageReceived(org.apache
   * .mina.core.session.IoSession, java.lang.Object)
   */
  public void messageReceived(IoSession session, Object message) throws Exception {
    // System.out.println(message);
    if (message instanceof IoBuffer) {
      service((IoBuffer) message, session);
    }
  }

  /**
   * Creates the tcp server.
   *
   * @param host the host
   * @param port the port
   * @return the MDC server
   */
  public static synchronized MDCServer createTcpServer(String host, int port) {
    return new TDCServer(host, port);
  }

  /**
   * Creates the udp server.
   *
   * @param host the host
   * @param port the port
   * @return the MDC server
   */
  public static synchronized MDCServer createUdpServer(String host, int port) {
    return new UDCServer(host, port);
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * org.apache.mina.core.service.IoHandlerAdapter#exceptionCaught(org.apache
   * .mina.core.session.IoSession, java.lang.Throwable)
   */
  @Override
  public void exceptionCaught(IoSession session, Throwable cause) throws Exception {
    TConn d = (TConn) session.getAttribute("conn");
    if (d != null && d.valid()) {
      App.bye(d);
    }
  }
}
/**
 * The <code>DefaultTcpTransportMapping</code> implements a TCP transport mapping with the Java 1.4
 * new IO API.
 *
 * <p>It uses a single thread for processing incoming and outgoing messages. The thread is started
 * when the <code>listen</code> method is called, or when an outgoing request is sent using the
 * <code>sendMessage</code> method.
 *
 * @author Frank Fock
 * @version 1.7.4a
 */
public class DefaultTcpTransportMapping extends TcpTransportMapping {

  private static final LogAdapter logger = LogFactory.getLogger(DefaultTcpTransportMapping.class);

  private Hashtable sockets = new Hashtable();
  private ServerThread server;

  private Timer socketCleaner;
  // 1 minute default timeout
  private long connectionTimeout = 60000;
  private boolean serverEnabled = false;

  private static final int MIN_SNMP_HEADER_LENGTH = 6;
  private MessageLengthDecoder messageLengthDecoder = new SnmpMesssageLengthDecoder();

  /**
   * Creates a default TCP transport mapping with the server for incoming messages disabled.
   *
   * @throws UnknownHostException
   * @throws IOException on failure of binding a local port.
   */
  public DefaultTcpTransportMapping() throws UnknownHostException, IOException {
    super(new TcpAddress(InetAddress.getLocalHost(), 0));
  }

  /**
   * Creates a default TCP transport mapping that binds to the given address (interface) on the
   * local host.
   *
   * @param serverAddress the TcpAddress instance that describes the server address to listen on
   *     incoming connection requests.
   * @throws UnknownHostException if the specified interface does not exist.
   * @throws IOException if the given address cannot be bound.
   */
  public DefaultTcpTransportMapping(TcpAddress serverAddress)
      throws UnknownHostException, IOException {
    super(serverAddress);
    this.serverEnabled = true;
  }

  /**
   * Listen for incoming and outgoing requests. If the <code>serverEnabled</code> member is <code>
   * false</code> the server for incoming requests is not started. This starts the internal server
   * thread that processes messages.
   *
   * @throws SocketException when the transport is already listening for incoming/outgoing messages.
   * @throws IOException
   */
  public synchronized void listen() throws java.io.IOException {
    if (server != null) {
      throw new SocketException("Port already listening");
    }
    server = new ServerThread();
    if (connectionTimeout > 0) {
      socketCleaner = new Timer(true); // run as daemon
    }
    server.setDaemon(true);
    server.start();
  }

  /**
   * Changes the priority of the server thread for this TCP transport mapping. This method has no
   * effect, if called before {@link #listen()} has been called for this transport mapping.
   *
   * @param newPriority the new priority.
   * @see Thread#setPriority
   * @since 1.2.2
   */
  public void setPriority(int newPriority) {
    ServerThread st = server;
    if (st != null) {
      st.setPriority(newPriority);
    }
  }

  /**
   * Returns the priority of the internal listen thread.
   *
   * @return a value between {@link Thread#MIN_PRIORITY} and {@link Thread#MAX_PRIORITY}.
   * @since 1.2.2
   */
  public int getPriority() {
    ServerThread st = server;
    if (st != null) {
      return st.getPriority();
    } else {
      return Thread.NORM_PRIORITY;
    }
  }

  /**
   * Sets the name of the listen thread for this UDP transport mapping. This method has no effect,
   * if called before {@link #listen()} has been called for this transport mapping.
   *
   * @param name the new thread name.
   * @since 1.6
   */
  public void setThreadName(String name) {
    ServerThread st = server;
    if (st != null) {
      st.setName(name);
    }
  }

  /**
   * Returns the name of the listen thread.
   *
   * @return the thread name if in listening mode, otherwise <code>null</code>.
   * @since 1.6
   */
  public String getThreadName() {
    ServerThread st = server;
    if (st != null) {
      return st.getName();
    } else {
      return null;
    }
  }

  /** Closes all open sockets and stops the internal server thread that processes messages. */
  public void close() {
    ServerThread st = server;
    if (st != null) {
      st.close();
      try {
        st.join();
      } catch (InterruptedException ex) {
        logger.warn(ex);
      }
      server = null;
      for (Iterator it = sockets.values().iterator(); it.hasNext(); ) {
        SocketEntry entry = (SocketEntry) it.next();
        try {
          synchronized (entry) {
            entry.getSocket().close();
          }
          logger.debug("Socket to " + entry.getPeerAddress() + " closed");
        } catch (IOException iox) {
          // ingore
          logger.debug(iox);
        }
      }
      if (socketCleaner != null) {
        socketCleaner.cancel();
      }
      socketCleaner = null;
    }
  }

  /**
   * Closes a connection to the supplied remote address, if it is open. This method is particularly
   * useful when not using a timeout for remote connections.
   *
   * @param remoteAddress the address of the peer socket.
   * @return <code>true</code> if the connection has been closed and <code>false</code> if there was
   *     nothing to close.
   * @since 1.7.1
   */
  public synchronized boolean close(Address remoteAddress) throws IOException {
    if (logger.isDebugEnabled()) {
      logger.debug("Closing socket for peer address " + remoteAddress);
    }
    SocketEntry entry = (SocketEntry) sockets.remove(remoteAddress);
    if (entry != null) {
      synchronized (entry) {
        entry.getSocket().close();
      }
      logger.info("Socket to " + entry.getPeerAddress() + " closed");
      return true;
    }
    return false;
  }

  /**
   * Sends a SNMP message to the supplied address.
   *
   * @param address an <code>TcpAddress</code>. A <code>ClassCastException</code> is thrown if
   *     <code>address</code> is not a <code>TcpAddress</code> instance.
   * @param message byte[] the message to sent.
   * @throws IOException
   */
  public void sendMessage(Address address, byte[] message) throws java.io.IOException {
    if (server == null) {
      listen();
    }
    server.sendMessage(address, message);
  }

  /**
   * Gets the connection timeout. This timeout specifies the time a connection may be idle before it
   * is closed.
   *
   * @return long the idle timeout in milliseconds.
   */
  public long getConnectionTimeout() {
    return connectionTimeout;
  }

  /**
   * Sets the connection timeout. This timeout specifies the time a connection may be idle before it
   * is closed.
   *
   * @param connectionTimeout the idle timeout in milliseconds. A zero or negative value will
   *     disable any timeout and connections opened by this transport mapping will stay opened until
   *     they are explicitly closed.
   */
  public void setConnectionTimeout(long connectionTimeout) {
    this.connectionTimeout = connectionTimeout;
  }

  /**
   * Checks whether a server for incoming requests is enabled.
   *
   * @return boolean
   */
  public boolean isServerEnabled() {
    return serverEnabled;
  }

  public MessageLengthDecoder getMessageLengthDecoder() {
    return messageLengthDecoder;
  }

  /**
   * Sets whether a server for incoming requests should be created when the transport is set into
   * listen state. Setting this value has no effect until the {@link #listen()} method is called (if
   * the transport is already listening, {@link #close()} has to be called before).
   *
   * @param serverEnabled if <code>true</code> if the transport will listens for incoming requests
   *     after {@link #listen()} has been called.
   */
  public void setServerEnabled(boolean serverEnabled) {
    this.serverEnabled = serverEnabled;
  }

  /**
   * Sets the message length decoder. Default message length decoder is the {@link
   * SnmpMesssageLengthDecoder}. The message length decoder must be able to decode the total length
   * of a message for this transport mapping protocol(s).
   *
   * @param messageLengthDecoder a <code>MessageLengthDecoder</code> instance.
   */
  public void setMessageLengthDecoder(MessageLengthDecoder messageLengthDecoder) {
    if (messageLengthDecoder == null) {
      throw new NullPointerException();
    }
    this.messageLengthDecoder = messageLengthDecoder;
  }

  /**
   * Gets the inbound buffer size for incoming requests. When SNMP packets are received that are
   * longer than this maximum size, the messages will be silently dropped and the connection will be
   * closed.
   *
   * @return the maximum inbound buffer size in bytes.
   */
  public int getMaxInboundMessageSize() {
    return super.getMaxInboundMessageSize();
  }

  /**
   * Sets the maximum buffer size for incoming requests. When SNMP packets are received that are
   * longer than this maximum size, the messages will be silently dropped and the connection will be
   * closed.
   *
   * @param maxInboundMessageSize the length of the inbound buffer in bytes.
   */
  public void setMaxInboundMessageSize(int maxInboundMessageSize) {
    this.maxInboundMessageSize = maxInboundMessageSize;
  }

  private synchronized void timeoutSocket(SocketEntry entry) {
    if (connectionTimeout > 0) {
      socketCleaner.schedule(new SocketTimeout(entry), connectionTimeout);
    }
  }

  public boolean isListening() {
    return (server != null);
  }

  class SocketEntry {
    private Socket socket;
    private TcpAddress peerAddress;
    private long lastUse;
    private LinkedList message = new LinkedList();
    private ByteBuffer readBuffer = null;

    public SocketEntry(TcpAddress address, Socket socket) {
      this.peerAddress = address;
      this.socket = socket;
      this.lastUse = System.currentTimeMillis();
    }

    public long getLastUse() {
      return lastUse;
    }

    public void used() {
      lastUse = System.currentTimeMillis();
    }

    public Socket getSocket() {
      return socket;
    }

    public TcpAddress getPeerAddress() {
      return peerAddress;
    }

    public synchronized void addMessage(byte[] message) {
      this.message.add(message);
    }

    public byte[] nextMessage() {
      if (this.message.size() > 0) {
        return (byte[]) this.message.removeFirst();
      }
      return null;
    }

    public void setReadBuffer(ByteBuffer byteBuffer) {
      this.readBuffer = byteBuffer;
    }

    public ByteBuffer getReadBuffer() {
      return readBuffer;
    }

    public String toString() {
      return "SocketEntry[peerAddress="
          + peerAddress
          + ",socket="
          + socket
          + ",lastUse="
          + new Date(lastUse)
          + "]";
    }
  }

  public static class SnmpMesssageLengthDecoder implements MessageLengthDecoder {
    public int getMinHeaderLength() {
      return MIN_SNMP_HEADER_LENGTH;
    }

    public MessageLength getMessageLength(ByteBuffer buf) throws IOException {
      MutableByte type = new MutableByte();
      BERInputStream is = new BERInputStream(buf);
      int ml = BER.decodeHeader(is, type);
      int hl = (int) is.getPosition();
      MessageLength messageLength = new MessageLength(hl, ml);
      return messageLength;
    }
  }

  class SocketTimeout extends TimerTask {
    private SocketEntry entry;

    public SocketTimeout(SocketEntry entry) {
      this.entry = entry;
    }

    /** run */
    public void run() {
      long now = System.currentTimeMillis();
      if ((socketCleaner == null) || (now - entry.getLastUse() >= connectionTimeout)) {
        if (logger.isDebugEnabled()) {
          logger.debug(
              "Socket has not been used for "
                  + (now - entry.getLastUse())
                  + " micro seconds, closing it");
        }
        sockets.remove(entry.getPeerAddress());
        try {
          synchronized (entry) {
            entry.getSocket().close();
          }
          logger.info("Socket to " + entry.getPeerAddress() + " closed due to timeout");
        } catch (IOException ex) {
          logger.error(ex);
        }
      } else {
        if (logger.isDebugEnabled()) {
          logger.debug("Scheduling " + ((entry.getLastUse() + connectionTimeout) - now));
        }
        socketCleaner.schedule(
            new SocketTimeout(entry), (entry.getLastUse() + connectionTimeout) - now);
      }
    }
  }

  class ServerThread extends Thread {
    private byte[] buf;
    private volatile boolean stop = false;
    private Throwable lastError = null;
    private ServerSocketChannel ssc;
    private Selector selector;

    private LinkedList pending = new LinkedList();

    public ServerThread() throws IOException {
      setName("DefaultTCPTransportMapping_" + getAddress());
      buf = new byte[getMaxInboundMessageSize()];
      // Selector for incoming requests
      selector = Selector.open();

      if (serverEnabled) {
        // Create a new server socket and set to non blocking mode
        ssc = ServerSocketChannel.open();
        ssc.configureBlocking(false);

        // Bind the server socket
        InetSocketAddress isa =
            new InetSocketAddress(tcpAddress.getInetAddress(), tcpAddress.getPort());
        ssc.socket().bind(isa);
        // Register accepts on the server socket with the selector. This
        // step tells the selector that the socket wants to be put on the
        // ready list when accept operations occur, so allowing multiplexed
        // non-blocking I/O to take place.
        ssc.register(selector, SelectionKey.OP_ACCEPT);
      }
    }

    private void processPending() {
      synchronized (pending) {
        while (pending.size() > 0) {
          SocketEntry entry = (SocketEntry) pending.removeFirst();
          try {
            // Register the channel with the selector, indicating
            // interest in connection completion and attaching the
            // target object so that we can get the target back
            // after the key is added to the selector's
            // selected-key set
            if (entry.getSocket().isConnected()) {
              entry.getSocket().getChannel().register(selector, SelectionKey.OP_WRITE, entry);
            } else {
              entry.getSocket().getChannel().register(selector, SelectionKey.OP_CONNECT, entry);
            }

          } catch (IOException iox) {
            logger.error(iox);
            // Something went wrong, so close the channel and
            // record the failure
            try {
              entry.getSocket().getChannel().close();
              TransportStateEvent e =
                  new TransportStateEvent(
                      DefaultTcpTransportMapping.this,
                      entry.getPeerAddress(),
                      TransportStateEvent.STATE_CLOSED,
                      iox);
              fireConnectionStateChanged(e);
            } catch (IOException ex) {
              logger.error(ex);
            }
            lastError = iox;
          }
        }
      }
    }

    public Throwable getLastError() {
      return lastError;
    }

    public void sendMessage(Address address, byte[] message) throws java.io.IOException {
      Socket s = null;
      SocketEntry entry = (SocketEntry) sockets.get(address);
      if (logger.isDebugEnabled()) {
        logger.debug("Looking up connection for destination '" + address + "' returned: " + entry);
        logger.debug(sockets.toString());
      }
      if (entry != null) {
        s = entry.getSocket();
      }
      if ((s == null) || (s.isClosed())) {
        if (logger.isDebugEnabled()) {
          logger.debug("Socket for address '" + address + "' is closed, opening it...");
        }
        SocketChannel sc = null;
        try {
          // Open the channel, set it to non-blocking, initiate connect
          sc = SocketChannel.open();
          sc.configureBlocking(false);
          sc.connect(
              new InetSocketAddress(
                  ((TcpAddress) address).getInetAddress(), ((TcpAddress) address).getPort()));
          s = sc.socket();
          entry = new SocketEntry((TcpAddress) address, s);
          entry.addMessage(message);
          sockets.put(address, entry);

          synchronized (pending) {
            pending.add(entry);
          }

          selector.wakeup();
          logger.debug("Trying to connect to " + address);
        } catch (IOException iox) {
          logger.error(iox);
          throw iox;
        }
      } else {
        entry.addMessage(message);
        synchronized (pending) {
          pending.add(entry);
        }
        selector.wakeup();
      }
    }

    public void run() {
      // Here's where everything happens. The select method will
      // return when any operations registered above have occurred, the
      // thread has been interrupted, etc.
      try {
        while (!stop) {
          try {
            if (selector.select() > 0) {
              if (stop) {
                break;
              }
              // Someone is ready for I/O, get the ready keys
              Set readyKeys = selector.selectedKeys();
              Iterator it = readyKeys.iterator();

              // Walk through the ready keys collection and process date requests.
              while (it.hasNext()) {
                SelectionKey sk = (SelectionKey) it.next();
                it.remove();
                SocketChannel readChannel = null;
                TcpAddress incomingAddress = null;
                if (sk.isAcceptable()) {
                  // The key indexes into the selector so you
                  // can retrieve the socket that's ready for I/O
                  ServerSocketChannel nextReady = (ServerSocketChannel) sk.channel();
                  // Accept the date request and send back the date string
                  Socket s = nextReady.accept().socket();
                  readChannel = s.getChannel();
                  readChannel.configureBlocking(false);
                  readChannel.register(selector, SelectionKey.OP_READ);

                  incomingAddress = new TcpAddress(s.getInetAddress(), s.getPort());
                  SocketEntry entry = new SocketEntry(incomingAddress, s);
                  sockets.put(incomingAddress, entry);
                  timeoutSocket(entry);
                  TransportStateEvent e =
                      new TransportStateEvent(
                          DefaultTcpTransportMapping.this,
                          incomingAddress,
                          TransportStateEvent.STATE_CONNECTED,
                          null);
                  fireConnectionStateChanged(e);
                } else if (sk.isReadable()) {
                  readChannel = (SocketChannel) sk.channel();
                  incomingAddress =
                      new TcpAddress(
                          readChannel.socket().getInetAddress(), readChannel.socket().getPort());
                } else if (sk.isWritable()) {
                  try {
                    SocketEntry entry = (SocketEntry) sk.attachment();
                    SocketChannel sc = (SocketChannel) sk.channel();
                    if (entry != null) {
                      writeMessage(entry, sc);
                    }
                  } catch (IOException iox) {
                    if (logger.isDebugEnabled()) {
                      iox.printStackTrace();
                    }
                    logger.warn(iox);
                    TransportStateEvent e =
                        new TransportStateEvent(
                            DefaultTcpTransportMapping.this,
                            incomingAddress,
                            TransportStateEvent.STATE_DISCONNECTED_REMOTELY,
                            iox);
                    fireConnectionStateChanged(e);
                    sk.cancel();
                  }
                } else if (sk.isConnectable()) {
                  try {
                    SocketEntry entry = (SocketEntry) sk.attachment();
                    SocketChannel sc = (SocketChannel) sk.channel();
                    if ((!sc.isConnected()) && (sc.finishConnect())) {
                      sc.configureBlocking(false);
                      logger.debug("Connected to " + entry.getPeerAddress());
                      // make sure conncetion is closed if not used for timeout
                      // micro seconds
                      timeoutSocket(entry);
                      sc.register(selector, SelectionKey.OP_WRITE, entry);
                    }
                    TransportStateEvent e =
                        new TransportStateEvent(
                            DefaultTcpTransportMapping.this,
                            incomingAddress,
                            TransportStateEvent.STATE_CONNECTED,
                            null);
                    fireConnectionStateChanged(e);
                  } catch (IOException iox) {
                    if (logger.isDebugEnabled()) {
                      iox.printStackTrace();
                    }
                    logger.warn(iox);
                    sk.cancel();
                  }
                }

                if (readChannel != null) {
                  try {
                    readMessage(sk, readChannel, incomingAddress);
                  } catch (IOException iox) {
                    // IO exception -> channel closed remotely
                    if (logger.isDebugEnabled()) {
                      iox.printStackTrace();
                    }
                    logger.warn(iox);
                    sk.cancel();
                    readChannel.close();
                    TransportStateEvent e =
                        new TransportStateEvent(
                            DefaultTcpTransportMapping.this,
                            incomingAddress,
                            TransportStateEvent.STATE_DISCONNECTED_REMOTELY,
                            iox);
                    fireConnectionStateChanged(e);
                  }
                }
              }
            }
          } catch (NullPointerException npex) {
            // There seems to happen a NullPointerException within the select()
            npex.printStackTrace();
            logger.warn("NullPointerException within select()?");
          }
          processPending();
        }
        if (ssc != null) {
          ssc.close();
        }
      } catch (IOException iox) {
        logger.error(iox);
        lastError = iox;
      }
      if (!stop) {
        stop = true;
        synchronized (DefaultTcpTransportMapping.this) {
          server = null;
        }
      }
    }

    private void readMessage(SelectionKey sk, SocketChannel readChannel, TcpAddress incomingAddress)
        throws IOException {
      // note that socket has been used
      SocketEntry entry = (SocketEntry) sockets.get(incomingAddress);
      if (entry != null) {
        entry.used();
        ByteBuffer readBuffer = entry.getReadBuffer();
        if (readBuffer != null) {
          readChannel.read(readBuffer);
          if (readBuffer.hasRemaining()) {
            readChannel.register(selector, SelectionKey.OP_READ, entry);
          } else {
            dispatchMessage(incomingAddress, readBuffer, readBuffer.capacity());
          }
          return;
        }
      }
      ByteBuffer byteBuffer = ByteBuffer.wrap(buf);
      byteBuffer.limit(messageLengthDecoder.getMinHeaderLength());
      long bytesRead = readChannel.read(byteBuffer);
      if (logger.isDebugEnabled()) {
        logger.debug("Reading header " + bytesRead + " bytes from " + incomingAddress);
      }
      MessageLength messageLength = new MessageLength(0, Integer.MIN_VALUE);
      if (bytesRead == messageLengthDecoder.getMinHeaderLength()) {
        messageLength = messageLengthDecoder.getMessageLength(ByteBuffer.wrap(buf));
        if (logger.isDebugEnabled()) {
          logger.debug("Message length is " + messageLength);
        }
        if ((messageLength.getMessageLength() > getMaxInboundMessageSize())
            || (messageLength.getMessageLength() <= 0)) {
          logger.error(
              "Received message length "
                  + messageLength
                  + " is greater than inboundBufferSize "
                  + getMaxInboundMessageSize());
          synchronized (entry) {
            entry.getSocket().close();
            logger.info("Socket to " + entry.getPeerAddress() + " closed due to an error");
          }
        } else {
          byteBuffer.limit(messageLength.getMessageLength());
          bytesRead += readChannel.read(byteBuffer);
          if (bytesRead == messageLength.getMessageLength()) {
            dispatchMessage(incomingAddress, byteBuffer, bytesRead);
          } else {
            byte[] message = new byte[byteBuffer.limit()];
            byteBuffer.flip();
            byteBuffer.get(message, 0, byteBuffer.limit() - byteBuffer.remaining());
            entry.setReadBuffer(ByteBuffer.wrap(message));
          }
          readChannel.register(selector, SelectionKey.OP_READ, entry);
        }
      } else if (bytesRead < 0) {
        logger.debug("Socket closed remotely");
        sk.cancel();
        readChannel.close();
        TransportStateEvent e =
            new TransportStateEvent(
                DefaultTcpTransportMapping.this,
                incomingAddress,
                TransportStateEvent.STATE_DISCONNECTED_REMOTELY,
                null);
        fireConnectionStateChanged(e);
      }
    }

    private void dispatchMessage(
        TcpAddress incomingAddress, ByteBuffer byteBuffer, long bytesRead) {
      byteBuffer.flip();
      if (logger.isDebugEnabled()) {
        logger.debug(
            "Received message from "
                + incomingAddress
                + " with length "
                + bytesRead
                + ": "
                + new OctetString(byteBuffer.array(), 0, (int) bytesRead).toHexString());
      }
      ByteBuffer bis;
      if (isAsyncMsgProcessingSupported()) {
        byte[] bytes = new byte[(int) bytesRead];
        System.arraycopy(byteBuffer.array(), 0, bytes, 0, (int) bytesRead);
        bis = ByteBuffer.wrap(bytes);
      } else {
        bis = ByteBuffer.wrap(byteBuffer.array(), 0, (int) bytesRead);
      }
      fireProcessMessage(incomingAddress, bis);
    }

    private void writeMessage(SocketEntry entry, SocketChannel sc) throws IOException {
      byte[] message = entry.nextMessage();
      if (message != null) {
        ByteBuffer buffer = ByteBuffer.wrap(message);
        sc.write(buffer);
        if (logger.isDebugEnabled()) {
          logger.debug(
              "Send message with length "
                  + message.length
                  + " to "
                  + entry.getPeerAddress()
                  + ": "
                  + new OctetString(message).toHexString());
        }
        sc.register(selector, SelectionKey.OP_READ);
      }
    }

    public void close() {
      stop = true;
      ServerThread st = server;
      if (st != null) {
        st.interrupt();
      }
    }
  }
}
예제 #4
0
/**
 * The <code>DefaultUdpTransportMapping</code> implements a UDP transport mapping based on Java
 * standard IO and using an internal thread for listening on the inbound socket.
 *
 * @author Frank Fock
 * @version 1.9
 */
public class DefaultUdpTransportMapping extends UdpTransportMapping {

  private static final LogAdapter logger = LogFactory.getLogger(DefaultUdpTransportMapping.class);

  protected DatagramSocket socket = null;
  protected WorkerTask listener;
  protected ListenThread listenerThread;
  private int socketTimeout = 0;

  private int receiveBufferSize = 0; // not set by default

  /**
   * Creates a UDP transport with an arbitrary local port on all local interfaces.
   *
   * @throws IOException if socket binding fails.
   */
  public DefaultUdpTransportMapping() throws IOException {
    super(new UdpAddress(InetAddress.getLocalHost(), 0));
    socket = new DatagramSocket(udpAddress.getPort());
  }

  /**
   * Creates a UDP transport with optional reusing the address if is currently in timeout state
   * (TIME_WAIT) after the connection is closed.
   *
   * @param udpAddress the local address for sending and receiving of UDP messages.
   * @param reuseAddress if <code>true</code> addresses are reused which provides faster socket
   *     binding if an application is restarted for instance.
   * @throws IOException if socket binding fails.
   * @since 1.7.3
   */
  public DefaultUdpTransportMapping(UdpAddress udpAddress, boolean reuseAddress)
      throws IOException {
    super(udpAddress);
    socket = new DatagramSocket(null);
    socket.setReuseAddress(reuseAddress);
    final SocketAddress addr =
        new InetSocketAddress(udpAddress.getInetAddress(), udpAddress.getPort());
    socket.bind(addr);
  }

  /**
   * Creates a UDP transport on the specified address. The address will not be reused if it is
   * currently in timeout state (TIME_WAIT).
   *
   * @param udpAddress the local address for sending and receiving of UDP messages.
   * @throws IOException if socket binding fails.
   */
  public DefaultUdpTransportMapping(UdpAddress udpAddress) throws IOException {
    super(udpAddress);
    socket = new DatagramSocket(udpAddress.getPort(), udpAddress.getInetAddress());
  }

  public void sendMessage(
      UdpAddress targetAddress, byte[] message, TransportStateReference tmStateReference)
      throws java.io.IOException {
    InetSocketAddress targetSocketAddress =
        new InetSocketAddress(targetAddress.getInetAddress(), targetAddress.getPort());
    if (logger.isDebugEnabled()) {
      logger.debug(
          "Sending message to "
              + targetAddress
              + " with length "
              + message.length
              + ": "
              + new OctetString(message).toHexString());
    }
    DatagramSocket s = ensureSocket();
    s.send(new DatagramPacket(message, message.length, targetSocketAddress));
  }

  /**
   * Closes the socket and stops the listener thread.
   *
   * @throws IOException
   */
  public void close() throws IOException {
    boolean interrupted = false;
    WorkerTask l = listener;
    if (l != null) {
      l.terminate();
      l.interrupt();
      if (socketTimeout > 0) {
        try {
          l.join();
        } catch (InterruptedException ex) {
          interrupted = true;
          logger.warn(ex);
        }
      }
      listener = null;
    }
    DatagramSocket closingSocket = socket;
    if ((closingSocket != null) && (!closingSocket.isClosed())) {
      closingSocket.close();
    }
    socket = null;
    if (interrupted) {
      Thread.currentThread().interrupt();
    }
  }

  /**
   * Starts the listener thread that accepts incoming messages. The thread is started in daemon mode
   * and thus it will not block application terminated. Nevertheless, the {@link #close()} method
   * should be called to stop the listen thread gracefully and free associated ressources.
   *
   * @throws IOException
   */
  public synchronized void listen() throws IOException {
    if (listener != null) {
      throw new SocketException("Port already listening");
    }
    ensureSocket();
    listenerThread = new ListenThread();
    listener =
        SNMP4JSettings.getThreadFactory()
            .createWorkerThread("DefaultUDPTransportMapping_" + getAddress(), listenerThread, true);
    listener.run();
  }

  private synchronized DatagramSocket ensureSocket() throws SocketException {
    DatagramSocket s = socket;
    if (s == null) {
      s = new DatagramSocket(udpAddress.getPort());
      s.setSoTimeout(socketTimeout);
      this.socket = s;
    }
    return s;
  }

  /**
   * Changes the priority of the listen thread for this UDP transport mapping. This method has no
   * effect, if called before {@link #listen()} has been called for this transport mapping.
   *
   * @param newPriority the new priority.
   * @see Thread#setPriority(int)
   * @since 1.2.2
   */
  public void setPriority(int newPriority) {
    WorkerTask lt = listener;
    if (lt instanceof Thread) {
      ((Thread) lt).setPriority(newPriority);
    }
  }

  /**
   * Returns the priority of the internal listen thread.
   *
   * @return a value between {@link Thread#MIN_PRIORITY} and {@link Thread#MAX_PRIORITY}.
   * @since 1.2.2
   */
  public int getPriority() {
    WorkerTask lt = listener;
    if (lt instanceof Thread) {
      return ((Thread) lt).getPriority();
    } else {
      return Thread.NORM_PRIORITY;
    }
  }

  /**
   * Sets the name of the listen thread for this UDP transport mapping. This method has no effect,
   * if called before {@link #listen()} has been called for this transport mapping.
   *
   * @param name the new thread name.
   * @since 1.6
   */
  public void setThreadName(String name) {
    WorkerTask lt = listener;
    if (lt instanceof Thread) {
      ((Thread) lt).setName(name);
    }
  }

  /**
   * Returns the name of the listen thread.
   *
   * @return the thread name if in listening mode, otherwise <code>null</code>.
   * @since 1.6
   */
  public String getThreadName() {
    WorkerTask lt = listener;
    if (lt instanceof Thread) {
      return ((Thread) lt).getName();
    } else {
      return null;
    }
  }

  public void setMaxInboundMessageSize(int maxInboundMessageSize) {
    this.maxInboundMessageSize = maxInboundMessageSize;
  }

  /**
   * Returns the socket timeout. 0 returns implies that the option is disabled (i.e., timeout of
   * infinity).
   *
   * @return the socket timeout setting.
   */
  public int getSocketTimeout() {
    return socketTimeout;
  }

  /**
   * Gets the requested receive buffer size for the underlying UDP socket. This size might not
   * reflect the actual size of the receive buffer, which is implementation specific.
   *
   * @return <=0 if the default buffer size of the OS is used, or a value >0 if the user specified a
   *     buffer size.
   */
  public int getReceiveBufferSize() {
    return receiveBufferSize;
  }

  /**
   * Sets the receive buffer size, which should be > the maximum inbound message size. This method
   * has to be called before {@link #listen()} to be effective.
   *
   * @param receiveBufferSize an integer value >0 and > {@link #getMaxInboundMessageSize()}.
   */
  public void setReceiveBufferSize(int receiveBufferSize) {
    if (receiveBufferSize <= 0) {
      throw new IllegalArgumentException("Receive buffer size must be > 0");
    }
    this.receiveBufferSize = receiveBufferSize;
  }

  /**
   * Sets the socket timeout in milliseconds.
   *
   * @param socketTimeout the socket timeout for incoming messages in milliseconds. A timeout of
   *     zero is interpreted as an infinite timeout.
   */
  public void setSocketTimeout(int socketTimeout) {
    this.socketTimeout = socketTimeout;
    if (socket != null) {
      try {
        socket.setSoTimeout(socketTimeout);
      } catch (SocketException ex) {
        throw new RuntimeException(ex);
      }
    }
  }

  public boolean isListening() {
    return (listener != null);
  }

  class ListenThread implements WorkerTask {

    private byte[] buf;
    private volatile boolean stop = false;

    public ListenThread() throws SocketException {
      buf = new byte[getMaxInboundMessageSize()];
    }

    public void run() {
      DatagramSocket socketCopy = socket;
      if (socketCopy != null) {
        try {
          socketCopy.setSoTimeout(getSocketTimeout());
          if (receiveBufferSize > 0) {
            socketCopy.setReceiveBufferSize(Math.max(receiveBufferSize, maxInboundMessageSize));
          }
          if (logger.isDebugEnabled()) {
            logger.debug(
                "UDP receive buffer size for socket "
                    + getAddress()
                    + " is set to: "
                    + socketCopy.getReceiveBufferSize());
          }
        } catch (SocketException ex) {
          logger.error(ex);
          setSocketTimeout(0);
        }
      }
      while (!stop) {
        DatagramPacket packet =
            new DatagramPacket(buf, buf.length, udpAddress.getInetAddress(), udpAddress.getPort());
        try {
          socketCopy = socket;
          try {
            if (socketCopy == null) {
              stop = true;
              continue;
            }
            socketCopy.receive(packet);
          } catch (InterruptedIOException iiox) {
            if (iiox.bytesTransferred <= 0) {
              continue;
            }
          }
          if (logger.isDebugEnabled()) {
            logger.debug(
                "Received message from "
                    + packet.getAddress()
                    + "/"
                    + packet.getPort()
                    + " with length "
                    + packet.getLength()
                    + ": "
                    + new OctetString(packet.getData(), 0, packet.getLength()).toHexString());
          }
          ByteBuffer bis;
          // If messages are processed asynchronously (i.e. multi-threaded)
          // then we have to copy the buffer's content here!
          if (isAsyncMsgProcessingSupported()) {
            byte[] bytes = new byte[packet.getLength()];
            System.arraycopy(packet.getData(), 0, bytes, 0, bytes.length);
            bis = ByteBuffer.wrap(bytes);
          } else {
            bis = ByteBuffer.wrap(packet.getData());
          }
          TransportStateReference stateReference =
              new TransportStateReference(
                  DefaultUdpTransportMapping.this,
                  udpAddress,
                  null,
                  SecurityLevel.undefined,
                  SecurityLevel.undefined,
                  false,
                  socketCopy);
          fireProcessMessage(
              new UdpAddress(packet.getAddress(), packet.getPort()), bis, stateReference);
        } catch (SocketTimeoutException stex) {
          // ignore
        } catch (PortUnreachableException purex) {
          synchronized (DefaultUdpTransportMapping.this) {
            listener = null;
          }
          logger.error(purex);
          if (logger.isDebugEnabled()) {
            purex.printStackTrace();
          }
          if (SNMP4JSettings.isFowardRuntimeExceptions()) {
            throw new RuntimeException(purex);
          }
          break;
        } catch (SocketException soex) {
          if (!stop) {
            logger.error(
                "Socket for transport mapping " + toString() + " error: " + soex.getMessage(),
                soex);
          }
          if (SNMP4JSettings.isFowardRuntimeExceptions()) {
            stop = true;
            throw new RuntimeException(soex);
          }
        } catch (IOException iox) {
          logger.warn(iox);
          if (logger.isDebugEnabled()) {
            iox.printStackTrace();
          }
          if (SNMP4JSettings.isFowardRuntimeExceptions()) {
            throw new RuntimeException(iox);
          }
        }
      }
      synchronized (DefaultUdpTransportMapping.this) {
        listener = null;
        stop = true;
        DatagramSocket closingSocket = socket;
        if ((closingSocket != null) && (!closingSocket.isClosed())) {
          closingSocket.close();
        }
      }
      if (logger.isDebugEnabled()) {
        logger.debug("Worker task stopped:" + getClass().getName());
      }
    }

    public void close() {
      stop = true;
    }

    public void terminate() {
      close();
      if (logger.isDebugEnabled()) {
        logger.debug("Terminated worker task: " + getClass().getName());
      }
    }

    public void join() throws InterruptedException {
      if (logger.isDebugEnabled()) {
        logger.debug("Joining worker task: " + getClass().getName());
      }
    }

    public void interrupt() {
      if (logger.isDebugEnabled()) {
        logger.debug("Interrupting worker task: " + getClass().getName());
      }
      close();
    }
  }
}
예제 #5
0
/**
 * ******************************************************** The Secondary NameNode is a helper to
 * the primary NameNode. The Secondary is responsible for supporting periodic checkpoints of the
 * HDFS metadata. The current design allows only one Secondary NameNode per HDFs cluster.
 *
 * <p>The Secondary NameNode is a daemon that periodically wakes up (determined by the schedule
 * specified in the configuration), triggers a periodic checkpoint and then goes back to sleep. The
 * Secondary NameNode uses the ClientProtocol to talk to the primary NameNode.
 *
 * <p>********************************************************
 */
public class SecondaryNameNode implements Runnable {

  public static final Log LOG = LogFactory.getLog(SecondaryNameNode.class.getName());

  private String fsName;
  private CheckpointStorage checkpointImage;
  private FSNamesystem namesystem;

  private NamenodeProtocol namenode;
  private Configuration conf;
  private InetSocketAddress nameNodeAddr;
  private volatile boolean shouldRun;
  private HttpServer infoServer;
  private int infoPort;
  private String infoBindAddress;

  private Collection<File> checkpointDirs;
  private Collection<File> checkpointEditsDirs;
  private long checkpointPeriod; // in seconds
  private long checkpointSize; // size (in MB) of current Edit Log

  /** Utility class to facilitate junit test error simulation. */
  static class ErrorSimulator {
    private static boolean[] simulation = null; // error simulation events

    static void initializeErrorSimulationEvent(int numberOfEvents) {
      simulation = new boolean[numberOfEvents];
      for (int i = 0; i < numberOfEvents; i++) {
        simulation[i] = false;
      }
    }

    static boolean getErrorSimulation(int index) {
      if (simulation == null) return false;
      assert (index < simulation.length);
      return simulation[index];
    }

    static void setErrorSimulation(int index) {
      assert (index < simulation.length);
      simulation[index] = true;
    }

    static void clearErrorSimulation(int index) {
      assert (index < simulation.length);
      simulation[index] = false;
    }
  }

  FSImage getFSImage() {
    return checkpointImage;
  }

  /** Create a connection to the primary namenode. */
  public SecondaryNameNode(Configuration conf) throws IOException {
    try {
      initialize(conf);
    } catch (IOException e) {
      shutdown();
      throw e;
    }
  }

  /** Initialize SecondaryNameNode. */
  private void initialize(Configuration conf) throws IOException {
    // initiate Java VM metrics
    JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));

    // Create connection to the namenode.
    shouldRun = true;
    nameNodeAddr = NameNode.getAddress(conf);

    this.conf = conf;
    this.namenode =
        (NamenodeProtocol)
            RPC.waitForProxy(
                NamenodeProtocol.class, NamenodeProtocol.versionID, nameNodeAddr, conf);

    // initialize checkpoint directories
    fsName = getInfoServer();
    checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointImage = new CheckpointStorage(conf);
    checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);

    // Initialize other scheduling parameters from the configuration
    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

    // initialize the webserver for uploading files.
    String infoAddr =
        NetUtils.getServerAddress(
            conf,
            "dfs.secondary.info.bindAddress",
            "dfs.secondary.info.port",
            "dfs.secondary.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf);
    infoServer.setAttribute("name.system.image", checkpointImage);
    this.infoServer.setAttribute("name.conf", conf);
    infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
    infoServer.start();

    // The web-server port can be ephemeral... ensure we have the correct info
    infoPort = infoServer.getPort();
    conf.set("dfs.secondary.http.address", infoBindAddress + ":" + infoPort);
    LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
    LOG.warn(
        "Checkpoint Period   :"
            + checkpointPeriod
            + " secs "
            + "("
            + checkpointPeriod / 60
            + " min)");
    LOG.warn(
        "Log Size Trigger    :"
            + checkpointSize
            + " bytes "
            + "("
            + checkpointSize / 1024
            + " KB)");
  }

  /** Shut down this instance of the datanode. Returns only after shutdown is complete. */
  public void shutdown() {
    shouldRun = false;
    try {
      if (infoServer != null) infoServer.stop();
    } catch (Exception e) {
      LOG.warn("Exception shutting down SecondaryNameNode", e);
    }
    try {
      if (checkpointImage != null) checkpointImage.close();
    } catch (IOException e) {
      LOG.warn(StringUtils.stringifyException(e));
    }
  }

  //
  // The main work loop
  //
  public void run() {

    //
    // Poll the Namenode (once every 5 minutes) to find the size of the
    // pending edit log.
    //
    long period = 5 * 60; // 5 minutes
    long lastCheckpointTime = 0;
    if (checkpointPeriod < period) {
      period = checkpointPeriod;
    }

    while (shouldRun) {
      try {
        Thread.sleep(1000 * period);
      } catch (InterruptedException ie) {
        // do nothing
      }
      if (!shouldRun) {
        break;
      }
      try {
        long now = System.currentTimeMillis();

        long size = namenode.getEditLogSize();
        if (size >= checkpointSize || now >= lastCheckpointTime + 1000 * checkpointPeriod) {
          doCheckpoint();
          lastCheckpointTime = now;
        }
      } catch (IOException e) {
        LOG.error("Exception in doCheckpoint: ");
        LOG.error(StringUtils.stringifyException(e));
        e.printStackTrace();
        checkpointImage.imageDigest = null;
      } catch (Throwable e) {
        LOG.error("Throwable Exception in doCheckpoint: ");
        LOG.error(StringUtils.stringifyException(e));
        e.printStackTrace();
        Runtime.getRuntime().exit(-1);
      }
    }
  }

  /**
   * Download <code>fsimage</code> and <code>edits</code> files from the name-node.
   *
   * @return true if a new image has been downloaded and needs to be loaded
   * @throws IOException
   */
  private boolean downloadCheckpointFiles(CheckpointSignature sig) throws IOException {

    checkpointImage.cTime = sig.cTime;
    checkpointImage.checkpointTime = sig.checkpointTime;

    boolean downloadImage = true;
    String fileid;
    File[] srcNames;
    if (sig.imageDigest.equals(checkpointImage.imageDigest)) {
      downloadImage = false;
      LOG.info("Image has not changed. Will not download image.");
    } else {
      // get fsimage
      srcNames = checkpointImage.getImageFiles();
      assert srcNames.length > 0 : "No checkpoint targets.";
      fileid = "getimage=1";
      TransferFsImage.getFileClient(fsName, fileid, srcNames, false);
      checkpointImage.imageDigest = sig.imageDigest;
      LOG.info(
          "Downloaded file " + srcNames[0].getName() + " size " + srcNames[0].length() + " bytes.");
    }
    // get edits file
    fileid = "getedit=1";
    srcNames = checkpointImage.getEditsFiles();
    assert srcNames.length > 0 : "No checkpoint targets.";
    TransferFsImage.getFileClient(fsName, fileid, srcNames, false);
    LOG.info(
        "Downloaded file " + srcNames[0].getName() + " size " + srcNames[0].length() + " bytes.");

    checkpointImage.checkpointUploadDone(null);

    return downloadImage;
  }

  /** Copy the new fsimage into the NameNode */
  private void putFSImage(CheckpointSignature sig) throws IOException {
    String fileid =
        "putimage=1&port="
            + infoPort
            + "&machine="
            + InetAddress.getLocalHost().getHostAddress()
            + "&token="
            + sig.toString();
    LOG.info("Posted URL " + fsName + fileid);
    TransferFsImage.getFileClient(fsName, fileid, (File[]) null, false);
  }

  /** Returns the Jetty server that the Namenode is listening on. */
  private String getInfoServer() throws IOException {
    URI fsName = FileSystem.getDefaultUri(conf);
    if (!"hdfs".equals(fsName.getScheme())) {
      throw new IOException("This is not a DFS");
    }
    return NetUtils.getServerAddress(
        conf, "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address");
  }

  /** Create a new checkpoint */
  void doCheckpoint() throws IOException {

    LOG.info("Checkpoint starting");

    // Do the required initialization of the merge work area.
    startCheckpoint();

    // Tell the namenode to start logging transactions in a new edit file
    // Returns a token that would be used to upload the merged image.
    CheckpointSignature sig = (CheckpointSignature) namenode.rollEditLog();

    // error simulation code for junit test
    if (ErrorSimulator.getErrorSimulation(0)) {
      throw new IOException("Simulating error0 " + "after creating edits.new");
    }

    boolean loadImage = downloadCheckpointFiles(sig); // Fetch fsimage and edits
    doMerge(sig, loadImage); // Do the merge

    //
    // Upload the new image into the NameNode. Then tell the Namenode
    // to make this new uploaded image as the most current image.
    //
    putFSImage(sig);

    // error simulation code for junit test
    if (ErrorSimulator.getErrorSimulation(1)) {
      throw new IOException("Simulating error1 " + "after uploading new image to NameNode");
    }

    namenode.rollFsImage(new CheckpointSignature(checkpointImage));
    checkpointImage.endCheckpoint();

    LOG.info("Checkpoint done. New Image Size: " + checkpointImage.getFsImageName().length());
  }

  private void startCheckpoint() throws IOException {
    checkpointImage.unlockAll();
    checkpointImage.getEditLog().close();
    checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
    checkpointImage.startCheckpoint();
  }

  /** Merge downloaded image and edits and write the new image into current storage directory. */
  private void doMerge(CheckpointSignature sig, boolean loadImage) throws IOException {
    if (loadImage) { // create an empty namespace if new image
      namesystem = new FSNamesystem(checkpointImage, conf);
    }
    assert namesystem.dir.fsImage == checkpointImage;
    checkpointImage.doMerge(sig, loadImage);
  }

  /**
   * @param argv The parameters passed to this program.
   * @exception Exception if the filesystem does not exist.
   * @return 0 on success, non zero on error.
   */
  private int processArgs(String[] argv) throws Exception {

    if (argv.length < 1) {
      printUsage("");
      return -1;
    }

    int exitCode = -1;
    int i = 0;
    String cmd = argv[i++];

    //
    // verify that we have enough command line parameters
    //
    if ("-geteditsize".equals(cmd)) {
      if (argv.length != 1) {
        printUsage(cmd);
        return exitCode;
      }
    } else if ("-checkpoint".equals(cmd)) {
      if (argv.length != 1 && argv.length != 2) {
        printUsage(cmd);
        return exitCode;
      }
      if (argv.length == 2 && !"force".equals(argv[i])) {
        printUsage(cmd);
        return exitCode;
      }
    }

    exitCode = 0;
    try {
      if ("-checkpoint".equals(cmd)) {
        long size = namenode.getEditLogSize();
        if (size >= checkpointSize || argv.length == 2 && "force".equals(argv[i])) {
          doCheckpoint();
        } else {
          System.err.println(
              "EditLog size "
                  + size
                  + " bytes is "
                  + "smaller than configured checkpoint "
                  + "size "
                  + checkpointSize
                  + " bytes.");
          System.err.println("Skipping checkpoint.");
        }
      } else if ("-geteditsize".equals(cmd)) {
        long size = namenode.getEditLogSize();
        System.out.println("EditLog size is " + size + " bytes");
      } else {
        exitCode = -1;
        LOG.error(cmd.substring(1) + ": Unknown command");
        printUsage("");
      }
    } catch (RemoteException e) {
      //
      // This is a error returned by hadoop server. Print
      // out the first line of the error mesage, ignore the stack trace.
      exitCode = -1;
      try {
        String[] content;
        content = e.getLocalizedMessage().split("\n");
        LOG.error(cmd.substring(1) + ": " + content[0]);
      } catch (Exception ex) {
        LOG.error(cmd.substring(1) + ": " + ex.getLocalizedMessage());
      }
    } catch (IOException e) {
      //
      // IO exception encountered locally.
      //
      exitCode = -1;
      LOG.error(cmd.substring(1) + ": " + e.getLocalizedMessage());
    } finally {
      // Does the RPC connection need to be closed?
    }
    return exitCode;
  }

  /**
   * Displays format of commands.
   *
   * @param cmd The command that is being executed.
   */
  private void printUsage(String cmd) {
    if ("-geteditsize".equals(cmd)) {
      System.err.println("Usage: java SecondaryNameNode" + " [-geteditsize]");
    } else if ("-checkpoint".equals(cmd)) {
      System.err.println("Usage: java SecondaryNameNode" + " [-checkpoint [force]]");
    } else {
      System.err.println(
          "Usage: java SecondaryNameNode " + "[-checkpoint [force]] " + "[-geteditsize] ");
    }
  }

  /**
   * main() has some simple utility methods.
   *
   * @param argv Command line parameters.
   * @exception Exception if the filesystem does not exist.
   */
  public static void main(String[] argv) throws Exception {
    StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
    Configuration tconf = new Configuration();
    if (argv.length >= 1) {
      SecondaryNameNode secondary = new SecondaryNameNode(tconf);
      int ret = secondary.processArgs(argv);
      System.exit(ret);
    }

    // Create a never ending deamon
    Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf));
    checkpointThread.start();
  }

  static class CheckpointStorage extends FSImage {
    /** */
    CheckpointStorage(Configuration conf) throws IOException {
      super(conf);
    }

    @Override
    public boolean isConversionNeeded(StorageDirectory sd) {
      return false;
    }

    /**
     * Analyze checkpoint directories. Create directories if they do not exist. Recover from an
     * unsuccessful checkpoint is necessary.
     *
     * @param dataDirs
     * @param editsDirs
     * @throws IOException
     */
    void recoverCreate(Collection<File> dataDirs, Collection<File> editsDirs) throws IOException {
      Collection<File> tempDataDirs = new ArrayList<File>(dataDirs);
      Collection<File> tempEditsDirs = new ArrayList<File>(editsDirs);
      this.storageDirs = new ArrayList<StorageDirectory>();
      setStorageDirectories(tempDataDirs, tempEditsDirs);
      for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext(); ) {
        StorageDirectory sd = it.next();
        boolean isAccessible = true;
        try { // create directories if don't exist yet
          if (!sd.getRoot().mkdirs()) {
            // do nothing, directory is already created
          }
        } catch (SecurityException se) {
          isAccessible = false;
        }
        if (!isAccessible)
          throw new InconsistentFSStateException(
              sd.getRoot(), "cannot access checkpoint directory.");
        StorageState curState;
        try {
          curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR);
          // sd is locked but not opened
          switch (curState) {
            case NON_EXISTENT:
              // fail if any of the configured checkpoint dirs are inaccessible
              throw new InconsistentFSStateException(
                  sd.getRoot(), "checkpoint directory does not exist or is not accessible.");
            case NOT_FORMATTED:
              break; // it's ok since initially there is no current and VERSION
            case NORMAL:
              break;
            default: // recovery is possible
              sd.doRecover(curState);
          }
        } catch (IOException ioe) {
          sd.unlock();
          throw ioe;
        }
      }
    }

    /**
     * Prepare directories for a new checkpoint.
     *
     * <p>Rename <code>current</code> to <code>lastcheckpoint.tmp</code> and recreate <code>current
     * </code>.
     *
     * @throws IOException
     */
    void startCheckpoint() throws IOException {
      for (StorageDirectory sd : storageDirs) {
        moveCurrent(sd);
      }
    }

    void endCheckpoint() throws IOException {
      for (StorageDirectory sd : storageDirs) {
        moveLastCheckpoint(sd);
      }
    }

    /** Merge image and edits, and verify consistency with the signature. */
    private void doMerge(CheckpointSignature sig, boolean loadImage) throws IOException {
      getEditLog().open();
      StorageDirectory sdName = null;
      StorageDirectory sdEdits = null;
      Iterator<StorageDirectory> it = null;
      if (loadImage) {
        it = dirIterator(NameNodeDirType.IMAGE);
        if (it.hasNext()) sdName = it.next();
        if (sdName == null) throw new IOException("Could not locate checkpoint fsimage");
      }
      it = dirIterator(NameNodeDirType.EDITS);
      if (it.hasNext()) sdEdits = it.next();
      if (sdEdits == null) throw new IOException("Could not locate checkpoint edits");
      if (loadImage) {
        loadFSImage(FSImage.getImageFile(sdName, NameNodeFile.IMAGE));
      }
      loadFSEdits(sdEdits);
      sig.validateStorageInfo(this);
      saveNamespace(false);
    }
  }
}