コード例 #1
0
 /** Test {@link DFSUtil#getNameServiceIds(Configuration)} */
 @Test
 public void testGetNameServiceIds() {
   HdfsConfiguration conf = new HdfsConfiguration();
   conf.set(DFS_NAMESERVICES, "nn1,nn2");
   Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
   Iterator<String> it = nameserviceIds.iterator();
   assertEquals(2, nameserviceIds.size());
   assertEquals("nn1", it.next().toString());
   assertEquals("nn2", it.next().toString());
 }
コード例 #2
0
ファイル: MiniAvatarCluster.java プロジェクト: ZJB/hadoop-20
  /**
   * Modify the config and start up the servers. The rpc and info ports for servers are guaranteed
   * to use free ports.
   *
   * <p>NameNode and DataNode directory creation and configuration will be managed by this class.
   *
   * @param conf the base configuration to use in starting the servers. This will be modified as
   *     necessary.
   * @param numDataNodes Number of DataNodes to start; may be zero
   * @param format if true, format the NameNode and DataNodes before starting up
   * @param racks array of strings indicating the rack that each DataNode is on
   * @param hosts array of strings indicating the hostname of each DataNode
   * @param numNameNodes Number of NameNodes to start;
   * @param federation if true, we start it with federation configure;
   */
  public MiniAvatarCluster(
      Configuration conf,
      int numDataNodes,
      boolean format,
      String[] racks,
      String[] hosts,
      int numNameNodes,
      boolean federation)
      throws IOException, ConfigException, InterruptedException {

    final String testDir = TEST_DIR + "/" + System.currentTimeMillis();
    baseAvatarDir = testDir + "/avatar";
    dataDir = testDir + "/data";
    this.conf = conf;
    this.numDataNodes = numDataNodes;
    this.format = format;
    this.racks = racks;
    this.hosts = hosts;

    conf.setInt("dfs.secondary.info.port", 0);
    conf.set("fs.ha.zookeeper.prefix", "/hdfs");
    conf.set("fs.ha.zookeeper.quorum", "localhost:" + zkClientPort);

    // datanodes
    conf.set("dfs.datanode.address", "localhost:0");
    conf.set("dfs.datanode.http.address", "localhost:0");
    conf.set("dfs.datanode.ipc.address", "localhost:0");
    conf.set("dfs.datanode.dns.interface", "lo");
    conf.set("dfs.namenode.dns.interface", "lo");

    // other settings
    conf.setBoolean("dfs.permissions", false);
    conf.setBoolean("dfs.persist.blocks", true);
    conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedAvatarFileSystem");
    conf.setLong("dfs.blockreport.initialDelay", 0);
    conf.setClass(
        "topology.node.switch.mapping.impl", StaticMapping.class, DNSToSwitchMapping.class);

    // never automatically exit from safe mode
    conf.setFloat("dfs.safemode.threshold.pct", 1.5f);
    this.federation = federation;
    Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
    if (nameserviceIds.size() > 1) this.federation = true;
    if (!federation && numNameNodes != 1) {
      throw new IOException("Only 1 namenode is allowed in non-federation cluster.");
    }
    nameNodes = new NameNodeInfo[numNameNodes];
    for (int nnIndex = 0; nnIndex < numNameNodes; nnIndex++) {
      nameNodes[nnIndex] = new NameNodeInfo(nnIndex);
      nameNodes[nnIndex].createAvatarDirs();
    }
    if (!federation) {
      nameNodes[0].initGeneralConf(conf, null);
    } else {
      if (nameserviceIds.isEmpty()) {
        for (int i = 0; i < nameNodes.length; i++) {
          nameserviceIds.add(NAMESERVICE_ID_PREFIX + i);
        }
      }
      initFederationConf(conf, nameserviceIds);
    }

    startAvatarNodes();
    waitAvatarNodesActive();

    if (this.format) {
      File data_dir = new File(dataDir);
      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
        throw new IOException("Cannot remove data directory: " + data_dir);
      }
    }
    startDataNodes();
    waitDataNodesActive();

    waitExitSafeMode();
  }