public String getStatus(String clusterId) {
    String state = null;
    String status = null;

    List<ClusterSummary> EmrClusters = emr.listClusters().getClusters();
    for (ClusterSummary clusterSummary : EmrClusters) {
      if (clusterSummary.getId().equals(clusterId)) {
        ClusterStatus clStatus = clusterSummary.getStatus();
        state = clStatus.getState();
      }
    }
    switch (state) {
      case "WAITING":
        status = "running";
        break;
      case "RUNNING":
        status = "starting";
        break;
      case "STARTING":
        status = "starting";
        break;
      case "TERMINATED":
        status = "deleting";
        break;
      case "BOOTSTRAPING":
        status = "starting";
        break;
      default:
        status = "deleting";
        break;
    }
    return status;
  }
示例#2
0
  public CreateEMRCluster() {
    TEMP_ACCESS_KEY_ID = PropEditor.getProp(PublicStr.TEMP_ACCESS_KEY_ID);
    TEMP_SECRET_ACCESS_KEY = PropEditor.getProp(PublicStr.TEMP_SECRET_ACCESS_KEY);

    System.out.println(TEMP_ACCESS_KEY_ID + "----" + TEMP_SECRET_ACCESS_KEY);
    AWSCredentials credentials =
        new BasicAWSCredentials(TEMP_ACCESS_KEY_ID, TEMP_SECRET_ACCESS_KEY);

    emr = new AmazonElasticMapReduceClient(credentials);
    emr.setEndpoint("elasticmapreduce.us-west-2.amazonaws.com");

    this.credentials = credentials;
  }
  public String createClusterHadoop(
      String name, int nodes, String instance, Map<String, Boolean> apps) {
    RunJobFlowResult result;
    StepFactory stepFactory = new StepFactory();
    StepConfig enabledebugging =
        new StepConfig()
            .withName("Enable debugging")
            .withActionOnFailure("TERMINATE_JOB_FLOW")
            .withHadoopJarStep(stepFactory.newEnableDebuggingStep());

    List<Application> applications = new ArrayList<Application>();

    if (apps.get("hive")) {
      Application hive = new Application().withName("Hive");
      applications.add(hive);
    }
    if (apps.get("hue")) {
      Application hue = new Application().withName("Hue");
      applications.add(hue);
    }
    if (apps.get("spark")) {
      Application spark = new Application().withName("Spark");
      applications.add(spark);
    }
    RunJobFlowRequest request =
        new RunJobFlowRequest()
            .withName(name)
            .withReleaseLabel("emr-4.1.0")
            .withSteps(enabledebugging)
            .withApplications(applications)
            .withServiceRole("EMR_DefaultRole")
            .withJobFlowRole("EMR_EC2_DefaultRole")
            .withInstances(
                new JobFlowInstancesConfig()
                    .withInstanceCount(nodes)
                    .withKeepJobFlowAliveWhenNoSteps(true)
                    .withMasterInstanceType(instance)
                    .withSlaveInstanceType(instance));

    result = emr.runJobFlow(request);
    return result.getJobFlowId();
  }
示例#4
0
  public void createClusterWithStep(String inputPath, String outputPath, String logBucket) {

    String logUri = "s3://" + logBucket + "/";

    RunJobFlowRequest request =
        new RunJobFlowRequest()
            .withName("Create cluster with ReleaseLabel")
            .withReleaseLabel("emr-4.2.0")
            // .withSteps(customStep)
            .withServiceRole("EMR_DefaultRole")
            .withJobFlowRole("EMR_EC2_DefaultRole")
            .withInstances(
                new JobFlowInstancesConfig()
                    .withInstanceCount(3)
                    .withKeepJobFlowAliveWhenNoSteps(true)
                    .withMasterInstanceType("m3.xlarge")
                    .withSlaveInstanceType("m3.xlarge"))
            .withVisibleToAllUsers(true)
            .withLogUri(logUri)
            .withLogUri(logUri);

    RunJobFlowResult result = emr.runJobFlow(request);

    // --------------------------------------------------------------------------

    ClusterSummary cluster = emr.listClusters().getClusters().get(0);

    HadoopJarStepConfig hadoopConfigAdd =
        new HadoopJarStepConfig()
            .withJar("s3://hadoop-lyy/code/hadoop-mapreduce-examples-2.6.0.jar")
            .withMainClass("wordcount")
            .withArgs("-Dfs.s3.canned.acl=BucketOwnerFullControl", inputPath, outputPath);

    StepConfig customStepAdd =
        new StepConfig("Step2", hadoopConfigAdd).withActionOnFailure(ActionOnFailure.CONTINUE);
    AddJobFlowStepsResult resultAdd =
        emr.addJobFlowSteps(
            new AddJobFlowStepsRequest().withJobFlowId(cluster.getId()).withSteps(customStepAdd));

    System.out.println(resultAdd.getStepIds());

    //

    DescribeStepRequest describe =
        new DescribeStepRequest().withStepId(resultAdd.getStepIds().get(0));

    describe.setClusterId(cluster.getId());
    describe.setRequestCredentials(credentials);

    DescribeStepResult res = emr.describeStep(describe);
    StepStatus status = res.getStep().getStatus();
    String stas = status.getState();

    while (stas.equals(StepExecutionState.PENDING.name())
        || stas.equals(StepExecutionState.RUNNING.name())) {
      try {
        Thread.sleep(5000);
        res = emr.describeStep(describe);
        status = res.getStep().getStatus();
        stas = status.getState();
        System.out.print(".");
      } catch (InterruptedException e) {
        e.printStackTrace();
      }
    }

    if (stas.equals(StepExecutionState.COMPLETED.name())) {
      System.out.println("\n step complete");
    } else if (stas.equals(StepExecutionState.FAILED.name())
        || stas.equals(StepExecutionState.CANCELLED.name())) {
      System.out.println("\n step failed");
    }
  }
示例#5
0
  public CreateEMRCluster(AWSCredentials credentials) {
    emr = new AmazonElasticMapReduceClient(credentials);
    emr.setEndpoint("elasticmapreduce.us-west-2.amazonaws.com");

    this.credentials = credentials;
  }
 public void removeEmrCluster(String clusterId) {
   emr.terminateJobFlows(new TerminateJobFlowsRequest(Arrays.asList(new String[] {clusterId})));
 }