public List<NodeReport> getClusterNodes() throws YarnRemoteException {
    if (clientResourceManager == null)
      throw new IllegalArgumentException("Can't get report without connecting first!");

    GetClusterNodesRequest req = Records.newRecord(GetClusterNodesRequest.class);
    GetClusterNodesResponse res = clientResourceManager.getClusterNodes(req);

    return res.getNodeReports();
  }
Пример #2
0
 @Override
 public List<NodeReport> getNodeReports(NodeState... states) throws YarnException, IOException {
   EnumSet<NodeState> statesSet =
       (states.length == 0) ? EnumSet.allOf(NodeState.class) : EnumSet.noneOf(NodeState.class);
   for (NodeState state : states) {
     statesSet.add(state);
   }
   GetClusterNodesRequest request = GetClusterNodesRequest.newInstance(statesSet);
   GetClusterNodesResponse response = rmClient.getClusterNodes(request);
   return response.getNodeReports();
 }
 /**
  * Get all the nodes in the cluster, this method generate RPC
  *
  * @return host names
  * @throws YarnException
  */
 private List<String> getClusterNodes() throws YarnException {
   List<String> result = new ArrayList<String>();
   GetClusterNodesRequest clusterNodesReq = Records.newRecord(GetClusterNodesRequest.class);
   try {
     GetClusterNodesResponse clusterNodesResp =
         applicationsManager.getClusterNodes(clusterNodesReq);
     List<NodeReport> nodeReports = clusterNodesResp.getNodeReports();
     for (NodeReport nodeReport : nodeReports) {
       result.add(nodeReport.getNodeId().getHost());
     }
   } catch (IOException e) {
     LOG.error("error getting cluster nodes from AM");
     throw new YarnException(e);
   }
   return result;
 }
 public TaskTrackerInfo[] getActiveTrackers() throws IOException, InterruptedException {
   GetClusterNodesRequest request = recordFactory.newRecordInstance(GetClusterNodesRequest.class);
   GetClusterNodesResponse response = applicationsManager.getClusterNodes(request);
   return TypeConverter.fromYarnNodes(response.getNodeReports());
 }
Пример #5
0
  @Test
  public void testSetupShutdown() throws Exception {
    GetClusterNodesRequest request = Records.newRecord(GetClusterNodesRequest.class);
    ClientRMService clientRMService = yarnCluster.getResourceManager().getClientRMService();
    GetClusterNodesResponse response = clientRMService.getClusterNodes(request);
    List<NodeReport> nodeReports = response.getNodeReports();
    LOG.info("{}", nodeReports);

    for (NodeReport nr : nodeReports) {
      LOG.info("Node: {}", nr.getNodeId());
      LOG.info("Total memory: {}", nr.getCapability());
      LOG.info("Used memory: {}", nr.getUsed());
      LOG.info("Number containers: {}", nr.getNumContainers());
    }

    String appMasterJar = JarFinder.getJar(StreamingAppMaster.class);
    LOG.info("appmaster jar: " + appMasterJar);
    String testJar = JarFinder.getJar(StramMiniClusterTest.class);
    LOG.info("testJar: " + testJar);

    // create test application
    Properties dagProps = new Properties();

    // input module (ensure shutdown works while windows are generated)
    dagProps.put(
        StreamingApplication.DT_PREFIX + "operator.numGen.classname",
        TestGeneratorInputOperator.class.getName());
    dagProps.put(StreamingApplication.DT_PREFIX + "operator.numGen.maxTuples", "1");

    // fake output adapter - to be ignored when determine shutdown
    // props.put(DAGContext.DT_PREFIX + "stream.output.classname",
    // HDFSOutputStream.class.getName());
    // props.put(DAGContext.DT_PREFIX + "stream.output.inputNode", "module2");
    // props.put(DAGContext.DT_PREFIX + "stream.output.filepath",
    // "miniclustertest-testSetupShutdown.out");

    dagProps.put(
        StreamingApplication.DT_PREFIX + "operator.module1.classname",
        GenericTestOperator.class.getName());

    dagProps.put(
        StreamingApplication.DT_PREFIX + "operator.module2.classname",
        GenericTestOperator.class.getName());

    dagProps.put(StreamingApplication.DT_PREFIX + "stream.fromNumGen.source", "numGen.outport");
    dagProps.put(StreamingApplication.DT_PREFIX + "stream.fromNumGen.sinks", "module1.inport1");

    dagProps.put(StreamingApplication.DT_PREFIX + "stream.n1n2.source", "module1.outport1");
    dagProps.put(StreamingApplication.DT_PREFIX + "stream.n1n2.sinks", "module2.inport1");

    dagProps.setProperty(
        StreamingApplication.DT_PREFIX + LogicalPlan.MASTER_MEMORY_MB.getName(), "128");
    dagProps.setProperty(
        StreamingApplication.DT_PREFIX + LogicalPlan.CONTAINER_JVM_OPTIONS.getName(),
        "-Dlog4j.properties=custom_log4j.properties");
    dagProps.setProperty(
        StreamingApplication.DT_PREFIX + "operator.*." + OperatorContext.MEMORY_MB.getName(), "64");
    dagProps.setProperty(
        StreamingApplication.DT_PREFIX + "operator.*." + OperatorContext.VCORES.getName(), "1");
    dagProps.setProperty(
        StreamingApplication.DT_PREFIX
            + "operator.*.port.*."
            + Context.PortContext.BUFFER_MEMORY_MB.getName(),
        "32");
    dagProps.setProperty(StreamingApplication.DT_PREFIX + LogicalPlan.DEBUG.getName(), "true");
    // dagProps.setProperty(StreamingApplication.DT_PREFIX +
    // LogicalPlan.CONTAINERS_MAX_COUNT.getName(), "2");
    LOG.info("dag properties: {}", dagProps);

    LOG.info("Initializing Client");
    LogicalPlanConfiguration tb = new LogicalPlanConfiguration(conf);
    tb.addFromProperties(dagProps, null);
    LogicalPlan dag = createDAG(tb);
    Configuration yarnConf = new Configuration(yarnCluster.getConfig());
    StramClient client = new StramClient(yarnConf, dag);
    try {
      client.start();
      if (StringUtils.isBlank(System.getenv("JAVA_HOME"))) {
        client.javaCmd = "java"; // JAVA_HOME not set in the yarn mini cluster
      }
      LOG.info("Running client");
      client.startApplication();
      boolean result = client.monitorApplication();

      LOG.info("Client run completed. Result=" + result);
      Assert.assertTrue(result);
    } finally {
      client.stop();
    }
  }