@Test
 public void testDataFromFileWithUTF8() throws Exception {
   final String specFilePath = "src/test/resources/hadoop_cluster_cn.json";
   try {
     ClusterCreate clusterSpec =
         CommandsUtils.getObjectByJsonString(
             ClusterCreate.class, CommandsUtils.dataFromFile(specFilePath));
     NodeGroupCreate[] nodeGroups = clusterSpec.getNodeGroups();
     assertEquals(nodeGroups.length, 3);
     assertEquals(nodeGroups[0].getName(), "主节点");
     assertEquals(nodeGroups[1].getName(), "协作节点");
     assertEquals(nodeGroups[2].getName(), "客户端");
   } catch (Exception ex) {
     System.out.println(ex.getMessage());
     assert (true);
   }
 }
Пример #2
0
  @Override
  public RepeatStatus executeStep(
      ChunkContext chunkContext, JobExecutionStatusHolder jobExecutionStatusHolder)
      throws Exception {

    // This step is only for app manager like ClouderaMgr and Ambari
    String clusterName =
        getJobParameters(chunkContext).getString(JobConstants.CLUSTER_NAME_JOB_PARAM);

    SoftwareManager softwareMgr = softwareMgrs.getSoftwareManagerByClusterName(clusterName);

    String appMgrName = softwareMgr.getName();
    if (Constants.IRONFAN.equals(appMgrName)) {
      // we do not config any local repo for Ironfan
      return RepeatStatus.FINISHED;
    }

    ClusterCreate clusterConfig =
        clusterManager.getClusterConfigMgr().getClusterConfig(clusterName);
    String localRepoURL = clusterConfig.getLocalRepoURL();
    logger.info("Use the following URL as the local yum server:" + localRepoURL);

    if (!CommonUtil.isBlank(localRepoURL)) {
      // Setup local repo file on each node for ClouderaMgr/Ambari.
      logger.info(
          "ConfigLocalRepoStep: start to setup local repo on each node for ClouderaMgr/Ambari.");

      List<NodeEntity> nodes = getNodesToBeSetLocalRepo(chunkContext, clusterName);
      String appMgrRepoID =
          Configuration.getString(
              Constants.SERENGETI_NODE_YUM_CLOUDERA_MANAGER_REPO_ID,
              Constants.NODE_APPMANAGER_YUM_CLOUDERA_MANAGER_REPO_ID);
      if (appMgrName.equals(Constants.AMBARI_PLUGIN_TYPE)) {
        appMgrRepoID =
            Configuration.getString(
                Constants.SERENGETI_NODE_YUM_AMBARI_REPO_ID,
                Constants.NODE_APPMANAGER_YUM_AMBARI_REPO_ID);
      }

      setLocalRepoService.setLocalRepoForNodes(clusterName, nodes, appMgrRepoID, localRepoURL);
    }

    return RepeatStatus.FINISHED;
  }
 @Test
 public void testPrettyJsonOutputWithUTF8() throws Exception {
   final String specFilePath = "src/test/resources/hadoop_cluster_cn.json";
   final String exportFilePath = "src/test/resources/hadoop_cluster_cn_export.json";
   ClusterCreate clusterSpec =
       CommandsUtils.getObjectByJsonString(
           ClusterCreate.class, CommandsUtils.dataFromFile(specFilePath));
   CommandsUtils.prettyJsonOutput(clusterSpec, exportFilePath);
   File exportFile = new File(exportFilePath);
   assertTrue(exportFile.exists());
   ClusterCreate exportClusterSpec =
       CommandsUtils.getObjectByJsonString(
           ClusterCreate.class, CommandsUtils.dataFromFile(exportFilePath));
   NodeGroupCreate[] nodeGroups = exportClusterSpec.getNodeGroups();
   assertEquals(nodeGroups.length, 3);
   assertEquals(nodeGroups[0].getName(), "主节点");
   assertEquals(nodeGroups[1].getName(), "协作节点");
   assertEquals(nodeGroups[2].getName(), "客户端");
   exportFile.delete();
 }
 @Test
 @SuppressWarnings("unchecked")
 public void testGetObjectByJsonString()
     throws JsonParseException, JsonMappingException, IOException {
   StringBuilder jsonBuff = new StringBuilder();
   jsonBuff
       .append("{  ")
       .append(" \"nodeGroups\": [ ")
       .append("      {            ")
       .append("        \"name\": \"master\"  ,  ")
       .append("        \"roles\": [             ")
       .append("        \"hadoop_namenode\"   ,  ")
       .append("        \"hadoop_jobtracker\"   ")
       .append("         ],                      ")
       .append("        \"instanceNum\": 1,             ")
       .append("        \"cpuNum\": 2,                  ")
       .append("        \"memCapacityMB\":2048,         ")
       .append("        \"storage\": {                  ")
       .append("        \"type\": \"SHARED\",           ")
       .append("        \"sizeGB\": 10                  ")
       .append("         },                               ")
       .append("    \"configuration\": {            ")
       .append("       \"hadoop\": {                ")
       .append("           \"core-site.xml\" : {           ")
       .append("           \"fs.default.name\": \"hdfs://localhost:8020\" ")
       .append("        },                            ")
       .append("       \"hdfs-site.xml\" : {           ")
       .append("          \"dfs.replication\": 4          ")
       .append("       },                               ")
       .append("       \"mapred-site.xml\" : {         ")
       .append("          \"mapred.map.tasks\": 5          ")
       .append("      },                             ")
       .append("      \"hadoop-env.sh\" : {           ")
       .append("         \"JAVA_HOME\": \"/path/to/javahome\"              ")
       .append("      },                              ")
       .append("     \"log4j.properties\" : {        ")
       .append("       \"hadoop.root.logger\": \"DEBUG,console\" ")
       .append("      }                                          ")
       .append("    }                                          ")
       .append("  }                                          ")
       .append("}, ")
       .append("{")
       .append("      \"name\": \"worker\",  ")
       .append("      \"roles\": [           ")
       .append("          \"hadoop_datanode\",   ")
       .append("          \"hadoop_tasktracker\" ")
       .append("       ], ")
       .append("      \"instanceNum\": 3, ")
       .append("      \"cpuNum\": 2, ")
       .append("      \"memCapacityMB\":2048, ")
       .append("      \"storage\": {          ")
       .append("      \"type\": \"SHARED\",   ")
       .append("      \"sizeGB\": 10          ")
       .append("     }                        ")
       .append("   }                          ")
       .append("], ")
       .append(" \"configuration\": {   ")
       .append(" \"hadoop\": {          ")
       .append(" \"core-site.xml\" : {  ")
       .append(" \"fs.default.name\": \"hdfs://fqdn_or_ip:8020\",")
       .append(" \"dfs.data.dir\":\"/data/\", ")
       .append(" \"dfs.http.address\":\"localhost\" ")
       .append("}, ")
       .append(" \"hdfs-site.xml\" : {  ")
       .append(" \"dfs.repliation\": 2   ")
       .append("}, ")
       .append(" \"mapred-site.xml\" : { ")
       .append(" \"mapred.map.tasks\": 3 ")
       .append(" }, ")
       .append(" \"hadoop-env.sh\" : {   ")
       .append(" \"JAVA_HOME\": \"/path/to/javahome\" ")
       .append(" }, ")
       .append("\"log4j.properties\" : {              ")
       .append("\"hadoop.root.logger\": \"DEBUG,console\" ")
       .append("  } ")
       .append("}  ")
       .append("} ")
       .append("}");
   ClusterCreate clusterCreate =
       CommandsUtils.getObjectByJsonString(ClusterCreate.class, jsonBuff.toString());
   assertNotNull(clusterCreate);
   Map<String, Object> hadoopConfig =
       (Map<String, Object>) clusterCreate.getConfiguration().get("hadoop");
   Map<String, Object> coreSiteConfig = (Map<String, Object>) hadoopConfig.get("core-site.xml");
   assertEquals(coreSiteConfig.get("fs.default.name"), "hdfs://fqdn_or_ip:8020");
 }