Esempio n. 1
0
 static void setStaticMapping(LoggedNetworkTopology topology) {
   for (LoggedNetworkTopology rack : topology.getChildren()) {
     for (LoggedNetworkTopology node : rack.getChildren()) {
       StaticMapping.addNodeToRack(node.getName(), new RackNode(rack.getName(), 1).getName());
     }
   }
 }
Esempio n. 2
0
  /** Start the tasktracker. */
  public void startTaskTracker(String host, String rack, int idx, int numDir) throws IOException {
    if (rack != null) {
      StaticMapping.addNodeToRack(host, rack);
    }
    if (host != null) {
      NetUtils.addStaticResolution(host, "localhost");
    }
    TaskTrackerRunner taskTracker;
    taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);

    addTaskTracker(taskTracker);
  }
Esempio n. 3
0
  private void startDataNodes() throws IOException {
    if (racks != null && numDataNodes > racks.length) {
      throw new IllegalArgumentException(
          "The length of racks ["
              + racks.length
              + "] is less than the number "
              + "of datanodes ["
              + numDataNodes
              + "].");
    }
    if (hosts != null && numDataNodes > hosts.length) {
      throw new IllegalArgumentException(
          "The length of hosts ["
              + hosts.length
              + "] is less than the number "
              + "of datanodes ["
              + numDataNodes
              + "].");
    }

    // Generate some hostnames if required
    if (racks != null && hosts == null) {
      LOG.info("Generating host names for datanodes");
      hosts = new String[numDataNodes];
      for (int i = 0; i < numDataNodes; i++) {
        hosts[i] = "host" + i + ".foo.com";
      }
    }

    String[] dnArgs = {HdfsConstants.StartupOption.REGULAR.getName()};

    for (int i = 0; i < numDataNodes; i++) {
      Configuration dnConf = new Configuration(conf);

      File dir1 = new File(dataDir, "data" + (2 * i + 1));
      File dir2 = new File(dataDir, "data" + (2 * i + 2));
      dir1.mkdirs();
      dir2.mkdirs();
      if (!dir1.isDirectory() || !dir2.isDirectory()) {
        throw new IOException(
            "Mkdirs failed to create directory for DataNode " + i + ": " + dir1 + " or " + dir2);
      }
      dnConf.set("dfs.data.dir", dir1.getPath() + "," + dir2.getPath());

      LOG.info("Starting DataNode " + i + " with dfs.data.dir: " + dnConf.get("dfs.data.dir"));

      if (hosts != null) {
        dnConf.set("slave.host.name", hosts[i]);
        LOG.info(
            "Starting DataNode " + i + " with hostname set to: " + dnConf.get("slave.host.name"));
      }

      if (racks != null) {
        String name = hosts[i];
        LOG.info("Adding node with hostname : " + name + " to rack " + racks[i]);
        StaticMapping.addNodeToRack(name, racks[i]);
      }
      Configuration newconf = new Configuration(dnConf); // save config
      if (hosts != null) {
        NetUtils.addStaticResolution(hosts[i], "localhost");
      }
      AvatarDataNode dn = AvatarDataNode.instantiateDataNode(dnArgs, dnConf);
      // since the HDFS does things based on IP:port, we need to add the mapping
      // for IP:port to rackId

      String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
      if (racks != null) {
        int port = dn.getSelfAddr().getPort();
        System.out.println(
            "Adding node with IP:port : " + ipAddr + ":" + port + " to rack " + racks[i]);
        StaticMapping.addNodeToRack(ipAddr + ":" + port, racks[i]);
      }
      dn.runDatanodeDaemon();
      dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs));
    }
  }