@CliCommand(
      value = {PREFIX + "info"},
      help = "Returns basic info about the Hadoop configuration")
  public String info() {
    StringBuilder sb = new StringBuilder();
    sb.append("Hadoop [");
    sb.append(VersionInfo.getVersion());
    sb.append(" rev.");
    sb.append(VersionInfo.getRevision());
    sb.append("]");

    sb.append("[fs=");
    String fs = hadoopConfiguration.get("fs.default.name");
    if (fs != null && fs.length() > 0) {
      sb.append(fs);
    }
    sb.append("]");

    sb.append("[jt=");
    String jt = hadoopConfiguration.get("mapred.job.tracker");
    if (jt != null && jt.length() > 0) {
      sb.append(jt);
    }
    sb.append("]");

    // TODO: potentially add a check to see whether HDFS is running
    return sb.toString();
  }
Пример #2
0
  public void verifyClusterGeneric(
      long clusterid,
      long startedon,
      String state,
      String haState,
      String hadoopVersionBuiltOn,
      String hadoopBuildVersion,
      String hadoopVersion,
      String resourceManagerVersionBuiltOn,
      String resourceManagerBuildVersion,
      String resourceManagerVersion) {

    assertEquals("clusterId doesn't match: ", ResourceManager.getClusterTimeStamp(), clusterid);
    assertEquals("startedOn doesn't match: ", ResourceManager.getClusterTimeStamp(), startedon);
    assertTrue("stated doesn't match: " + state, state.matches(STATE.INITED.toString()));
    assertTrue("HA state doesn't match: " + haState, haState.matches("INITIALIZING"));

    WebServicesTestUtils.checkStringMatch(
        "hadoopVersionBuiltOn", VersionInfo.getDate(), hadoopVersionBuiltOn);
    WebServicesTestUtils.checkStringEqual(
        "hadoopBuildVersion", VersionInfo.getBuildVersion(), hadoopBuildVersion);
    WebServicesTestUtils.checkStringMatch("hadoopVersion", VersionInfo.getVersion(), hadoopVersion);

    WebServicesTestUtils.checkStringMatch(
        "resourceManagerVersionBuiltOn", YarnVersionInfo.getDate(), resourceManagerVersionBuiltOn);
    WebServicesTestUtils.checkStringEqual(
        "resourceManagerBuildVersion",
        YarnVersionInfo.getBuildVersion(),
        resourceManagerBuildVersion);
    WebServicesTestUtils.checkStringMatch(
        "resourceManagerVersion", YarnVersionInfo.getVersion(), resourceManagerVersion);
  }
Пример #3
0
 /** Return a table containing version information. */
 public static String getVersionTable() {
   return "<div id='dfstable'><table>"
       + "\n  <tr><td id='col1'>Version:</td><td>"
       + VersionInfo.getVersion()
       + ", "
       + VersionInfo.getRevision()
       + "\n  <tr><td id='col1'>Compiled:</td><td>"
       + VersionInfo.getDate()
       + " by "
       + VersionInfo.getUser()
       + " from "
       + VersionInfo.getBranch()
       + "\n</table></div>";
 }
Пример #4
0
  @Test
  public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "1.0.0");
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();

      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();

      long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();

      DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
      doReturn("fake-storage-id").when(mockDnReg).getStorageID();
      doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();

      // Should succeed when software versions are the same and CTimes are the
      // same.
      doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
      rpcServer.registerDatanode(mockDnReg);

      // Should succeed when software versions are the same and CTimes are
      // different.
      doReturn(nnCTime + 1).when(mockStorageInfo).getCTime();
      rpcServer.registerDatanode(mockDnReg);

      // Should fail when software version of DN is different from NN and CTimes
      // are different.
      doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion();
      try {
        rpcServer.registerDatanode(mockDnReg);
        fail(
            "Should not have been able to register DN with different software"
                + " versions and CTimes");
      } catch (IncorrectVersionException ive) {
        GenericTestUtils.assertExceptionContains("does not match CTime of NN", ive);
        LOG.info("Got expected exception", ive);
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
Пример #5
0
  /** Build a map from the query string, setting values and defaults. */
  protected Map<String, String> buildRoot(HttpServletRequest request, XMLOutputter doc) {
    final String path = ServletUtil.getDecodedPath(request, "/listPaths");
    final String exclude =
        request.getParameter("exclude") != null ? request.getParameter("exclude") : "\\..*\\.crc";
    final String filter =
        request.getParameter("filter") != null ? request.getParameter("filter") : ".*";
    final boolean recur =
        request.getParameter("recursive") != null
            && "yes".equals(request.getParameter("recursive"));

    Map<String, String> root = new HashMap<String, String>();
    root.put("path", path);
    root.put("recursive", recur ? "yes" : "no");
    root.put("filter", filter);
    root.put("exclude", exclude);
    root.put("time", df.get().format(new Date()));
    root.put("version", VersionInfo.getVersion());
    return root;
  }
  @BeforeClass
  public static void setupCluster() throws IOException {
    Configuration conf = new HdfsConfiguration();
    final String[] racks = {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2", "/rack2"};
    storages = DFSTestUtil.createDatanodeStorageInfos(racks);
    dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
    DFSTestUtil.formatNameNode(conf);
    namenode = new NameNode(conf);
    int blockSize = 1024;

    dnrList = new ArrayList<DatanodeRegistration>();
    dnManager = namenode.getNamesystem().getBlockManager().getDatanodeManager();

    // Register DNs
    for (int i = 0; i < 6; i++) {
      DatanodeRegistration dnr =
          new DatanodeRegistration(
              dataNodes[i],
              new StorageInfo(NodeType.DATA_NODE),
              new ExportedBlockKeys(),
              VersionInfo.getVersion());
      dnrList.add(dnr);
      dnManager.registerDatanode(dnr);
      dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(
          2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize, 0L,
          2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize, 0L);
      dataNodes[i].updateHeartbeat(
          BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]), 0L, 0L, 0, 0, null);
    }
  }
 private void verifySoftwareVersion(DatanodeRegistration dnReg) throws IncorrectVersionException {
   String dnVersion = dnReg.getSoftwareVersion();
   if (VersionUtil.compareVersions(dnVersion, minimumDataNodeVersion) < 0) {
     IncorrectVersionException ive =
         new IncorrectVersionException(minimumDataNodeVersion, dnVersion, "DataNode", "NameNode");
     LOG.warn(ive.getMessage() + " DN: " + dnReg);
     throw ive;
   }
   String nnVersion = VersionInfo.getVersion();
   if (!dnVersion.equals(nnVersion)) {
     String messagePrefix =
         "Reported DataNode version '"
             + dnVersion
             + "' of DN "
             + dnReg
             + " does not match NameNode version '"
             + nnVersion
             + "'";
     long nnCTime = nn.getFSImage().getStorage().getCTime();
     long dnCTime = dnReg.getStorageInfo().getCTime();
     if (nnCTime != dnCTime) {
       IncorrectVersionException ive =
           new IncorrectVersionException(
               messagePrefix
                   + " and CTime of DN ('"
                   + dnCTime
                   + "') does not match CTime of NN ('"
                   + nnCTime
                   + "')");
       LOG.warn(ive);
       throw ive;
     } else {
       LOG.info(messagePrefix + ". Note: This is normal during a rolling upgrade.");
     }
   }
 }
Пример #8
0
 public AppConfig() {
   init();
   LOG.info("Using Hadoop version " + VersionInfo.getVersion());
 }
Пример #9
0
 /** Returns the protoc version used for the build. */
 public static String getProtocVersion() {
   return COMMON_VERSION_INFO._getProtocVersion();
 }
Пример #10
0
 /** Returns the buildVersion which includes version, revision, user and date. */
 public static String getBuildVersion() {
   return COMMON_VERSION_INFO._getBuildVersion();
 }
Пример #11
0
 /** Get the checksum of the source files from which Hadoop was built. */
 public static String getSrcChecksum() {
   return COMMON_VERSION_INFO._getSrcChecksum();
 }
Пример #12
0
 /** Get the subversion URL for the root Hadoop directory. */
 public static String getUrl() {
   return COMMON_VERSION_INFO._getUrl();
 }
Пример #13
0
 /**
  * Get the branch on which this originated.
  *
  * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
  */
 public static String getBranch() {
   return COMMON_VERSION_INFO._getBranch();
 }
Пример #14
0
 /**
  * Get the subversion revision number for the root directory
  *
  * @return the revision number, eg. "451451"
  */
 public static String getRevision() {
   return COMMON_VERSION_INFO._getRevision();
 }
Пример #15
0
  /**
   * This should attempts to kerberos login via a keytab if security is enabled in hadoop.
   *
   * <p>This should be able to support multiple hadoop clusters as long as the particular principal
   * is allowed on multiple clusters.
   *
   * <p>To preserve compatibility with non security enhanced hdfs, we use reflection on various
   * UserGroupInformation and SecurityUtil related method calls.
   */
  @SuppressWarnings("unchecked")
  static void tryKerberosLogin() throws IOException {

    /*
     * UserGroupInformation is in hadoop 0.18
     * UserGroupInformation.isSecurityEnabled() not in pre security API.
     *
     * boolean useSec = UserGroupInformation.isSecurityEnabled();
     */
    boolean useSec = false;

    try {
      Class<UserGroupInformation> c = UserGroupInformation.class;
      // static call, null this obj
      useSec = (Boolean) c.getMethod("isSecurityEnabled").invoke(null);
    } catch (Exception e) {
      LOG.warn(
          "Flume is using Hadoop core "
              + org.apache.hadoop.util.VersionInfo.getVersion()
              + " which does not support Security / Authentication: "
              + e.getMessage());
      return;
    }

    LOG.info("Hadoop Security enabled: " + useSec);
    if (!useSec) {
      return;
    }

    // At this point we know we are using a hadoop library that is kerberos
    // enabled.

    // attempt to load kerberos information for authenticated hdfs comms.
    String principal = FlumeConfiguration.get().getKerberosPrincipal();
    String keytab = FlumeConfiguration.get().getKerberosKeytab();
    LOG.info("Kerberos login as " + principal + " from " + keytab);

    try {
      /*
       * SecurityUtil not present pre hadoop 20.2
       *
       * SecurityUtil.login not in pre-security Hadoop API
       *
       * // Keytab login does not need to auto refresh
       *
       * SecurityUtil.login(FlumeConfiguration.get(),
       * FlumeConfiguration.SECURITY_KERBEROS_KEYTAB,
       * FlumeConfiguration.SECURITY_KERBEROS_PRINCIPAL);
       */
      Class c = Class.forName("org.apache.hadoop.security.SecurityUtil");
      // get method login(Configuration, String, String);
      Method m = c.getMethod("login", Configuration.class, String.class, String.class);
      m.invoke(
          null,
          FlumeConfiguration.get(),
          FlumeConfiguration.SECURITY_KERBEROS_KEYTAB,
          FlumeConfiguration.SECURITY_KERBEROS_PRINCIPAL);
    } catch (Exception e) {
      LOG.error(
          "Flume failed when attempting to authenticate with keytab "
              + FlumeConfiguration.get().getKerberosKeytab()
              + " and principal '"
              + FlumeConfiguration.get().getKerberosPrincipal()
              + "'",
          e);

      // e.getMessage() comes from hadoop is worthless
      return;
    }

    try {
      /*
       * getLoginUser, getAuthenticationMethod, and isLoginKeytabBased are not
       * in Hadoop 20.2, only kerberized enhanced version.
       *
       * getUserName is in all 0.18.3+
       *
       * UserGroupInformation ugi = UserGroupInformation.getLoginUser();
       * LOG.info("Auth method: " + ugi.getAuthenticationMethod());
       * LOG.info(" User name: " + ugi.getUserName());
       * LOG.info(" Using keytab: " +
       * UserGroupInformation.isLoginKeytabBased());
       */

      Class<UserGroupInformation> c2 = UserGroupInformation.class;
      // static call, null this obj
      UserGroupInformation ugi = (UserGroupInformation) c2.getMethod("getLoginUser").invoke(null);
      String authMethod = c2.getMethod("getAuthenticationMethod").invoke(ugi).toString();
      boolean keytabBased = (Boolean) c2.getMethod("isLoginKeytabBased").invoke(ugi);

      LOG.info("Auth method: " + authMethod);
      LOG.info(" User name: " + ugi.getUserName());
      LOG.info(" Using keytab: " + keytabBased);
    } catch (Exception e) {
      LOG.error("Flume was unable to dump kerberos login user" + " and authentication method", e);
      return;
    }
  }
Пример #16
0
 public static String getBuildVersion() {
   return VersionInfo.getRevision();
 }
  public void _jspService(HttpServletRequest request, HttpServletResponse response)
      throws java.io.IOException, ServletException {

    PageContext pageContext = null;
    HttpSession session = null;
    ServletContext application = null;
    ServletConfig config = null;
    JspWriter out = null;
    Object page = this;
    JspWriter _jspx_out = null;
    PageContext _jspx_page_context = null;

    try {
      response.setContentType("text/html; charset=UTF-8");
      pageContext = _jspxFactory.getPageContext(this, request, response, null, true, 8192, true);
      _jspx_page_context = pageContext;
      application = pageContext.getServletContext();
      config = pageContext.getServletConfig();
      session = pageContext.getSession();
      out = pageContext.getOut();
      _jspx_out = out;
      _jspx_resourceInjector =
          (org.apache.jasper.runtime.ResourceInjector)
              application.getAttribute("com.sun.appserv.jsp.resource.injector");

      out.write('\n');

      JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
      ClusterStatus status = tracker.getClusterStatus();
      String trackerName = StringUtils.simpleHostname(tracker.getJobTrackerMachine());

      out.write("\n<html>\n<head>\n<title>");
      out.print(trackerName);
      out.write(
          " Hadoop Locality Statistics</title>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/hadoop.css\">\n</head>\n<body>\n<h1>");
      out.print(trackerName);
      out.write(" Hadoop Locality Statistics</h1>\n\n<b>State:</b> ");
      out.print(status.getJobTrackerState());
      out.write("<br>\n<b>Started:</b> ");
      out.print(new Date(tracker.getStartTime()));
      out.write("<br>\n<b>Version:</b> ");
      out.print(VersionInfo.getVersion());
      out.write(",\n                r");
      out.print(VersionInfo.getRevision());
      out.write("<br>\n<b>Compiled:</b> ");
      out.print(VersionInfo.getDate());
      out.write(" by\n                 ");
      out.print(VersionInfo.getUser());
      out.write("<br>\n<b>Identifier:</b> ");
      out.print(tracker.getTrackerIdentifier());
      out.write("<br>\n\n<hr>\n\n");

      Collection<JobInProgress> jobs = new ArrayList<JobInProgress>();
      jobs.addAll(tracker.completedJobs());
      jobs.addAll(tracker.runningJobs());
      jobs.addAll(tracker.failedJobs());
      int dataLocalMaps = 0;
      int rackLocalMaps = 0;
      int totalMaps = 0;
      int totalReduces = 0;
      for (JobInProgress job : jobs) {
        Counters counters = job.getCounters();
        dataLocalMaps += counters.getCounter(JobInProgress.Counter.DATA_LOCAL_MAPS);
        rackLocalMaps += counters.getCounter(JobInProgress.Counter.RACK_LOCAL_MAPS);
        totalMaps += counters.getCounter(JobInProgress.Counter.TOTAL_LAUNCHED_MAPS);
        totalReduces += counters.getCounter(JobInProgress.Counter.TOTAL_LAUNCHED_REDUCES);
      }
      int dataLocalMapPct = totalMaps == 0 ? 0 : (100 * dataLocalMaps) / totalMaps;
      int rackLocalMapPct = totalMaps == 0 ? 0 : (100 * rackLocalMaps) / totalMaps;
      int dataRackLocalMapPct =
          totalMaps == 0 ? 0 : (100 * (dataLocalMaps + rackLocalMaps)) / totalMaps;

      out.write("\n<p>\n<b>Data Local Maps:</b> ");
      out.print(dataLocalMaps);
      out.write(' ');
      out.write('(');
      out.print(dataLocalMapPct);
      out.write("%) <br>\n<b>Rack Local Maps:</b> ");
      out.print(rackLocalMaps);
      out.write(' ');
      out.write('(');
      out.print(rackLocalMapPct);
      out.write("%) <br>\n<b>Data or Rack Local:</b> ");
      out.print(dataLocalMaps + rackLocalMaps);
      out.write(' ');
      out.write('(');
      out.print(dataRackLocalMapPct);
      out.write("%) <br>\n<b>Total Maps:</b> ");
      out.print(totalMaps);
      out.write(" <br>\n<b>Total Reduces:</b> ");
      out.print(totalReduces);
      out.write(" <br>\n</p>\n\n");

      out.println(ServletUtil.htmlFooter());

      out.write('\n');
    } catch (Throwable t) {
      if (!(t instanceof SkipPageException)) {
        out = _jspx_out;
        if (out != null && out.getBufferSize() != 0) out.clearBuffer();
        if (_jspx_page_context != null) _jspx_page_context.handlePageException(t);
      }
    } finally {
      _jspxFactory.releasePageContext(_jspx_page_context);
    }
  }
Пример #18
0
 public static boolean isHadoop23() {
   String version = org.apache.hadoop.util.VersionInfo.getVersion();
   if (version.matches("\\b0\\.23\\..+\\b") || version.matches("\\b2\\..*")) return true;
   return false;
 }
Пример #19
0
  @SuppressWarnings({"unchecked", "deprecation"})
  @Test
  public void testNameNodeMXBeanInfo() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;

    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();

      FSNamesystem fsn = cluster.getNameNode().namesystem;

      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
      ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
      // get attribute "ClusterId"
      String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
      assertEquals(fsn.getClusterId(), clusterId);
      // get attribute "BlockPoolId"
      String blockpoolId = (String) mbs.getAttribute(mxbeanName, "BlockPoolId");
      assertEquals(fsn.getBlockPoolId(), blockpoolId);
      // get attribute "Version"
      String version = (String) mbs.getAttribute(mxbeanName, "Version");
      assertEquals(fsn.getVersion(), version);
      assertTrue(version.equals(VersionInfo.getVersion() + ", r" + VersionInfo.getRevision()));
      // get attribute "Used"
      Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
      assertEquals(fsn.getUsed(), used.longValue());
      // get attribute "Total"
      Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
      assertEquals(fsn.getTotal(), total.longValue());
      // get attribute "safemode"
      String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
      assertEquals(fsn.getSafemode(), safemode);
      // get attribute nondfs
      Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
      assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
      // get attribute percentremaining
      Float percentremaining = (Float) (mbs.getAttribute(mxbeanName, "PercentRemaining"));
      assertEquals(fsn.getPercentRemaining(), percentremaining.floatValue(), DELTA);
      // get attribute Totalblocks
      Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
      assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
      // get attribute alivenodeinfo
      String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
      Map<String, Map<String, Object>> liveNodes =
          (Map<String, Map<String, Object>>) JSON.parse(alivenodeinfo);
      assertTrue(liveNodes.size() > 0);
      for (Map<String, Object> liveNode : liveNodes.values()) {
        assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
        assertTrue(((Long) liveNode.get("nonDfsUsedSpace")) > 0);
        assertTrue(liveNode.containsKey("capacity"));
        assertTrue(((Long) liveNode.get("capacity")) > 0);
        assertTrue(liveNode.containsKey("numBlocks"));
        assertTrue(((Long) liveNode.get("numBlocks")) == 0);
      }
      assertEquals(fsn.getLiveNodes(), alivenodeinfo);
      // get attribute deadnodeinfo
      String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes"));
      assertEquals(fsn.getDeadNodes(), deadnodeinfo);
      // get attribute NameDirStatuses
      String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
      assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
      Map<String, Map<String, String>> statusMap =
          (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
      Collection<URI> nameDirUris = cluster.getNameDirs(0);
      for (URI nameDirUri : nameDirUris) {
        File nameDir = new File(nameDirUri);
        System.out.println("Checking for the presence of " + nameDir + " in active name dirs.");
        assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
      }
      assertEquals(2, statusMap.get("active").size());
      assertEquals(0, statusMap.get("failed").size());

      // This will cause the first dir to fail.
      File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]);
      assertEquals(0, FileUtil.chmod(new File(failedNameDir, "current").getAbsolutePath(), "000"));
      cluster.getNameNodeRpc().rollEditLog();

      nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
      statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
      for (URI nameDirUri : nameDirUris) {
        File nameDir = new File(nameDirUri);
        String expectedStatus = nameDir.equals(failedNameDir) ? "failed" : "active";
        System.out.println(
            "Checking for the presence of " + nameDir + " in " + expectedStatus + " name dirs.");
        assertTrue(statusMap.get(expectedStatus).containsKey(nameDir.getAbsolutePath()));
      }
      assertEquals(1, statusMap.get("active").size());
      assertEquals(1, statusMap.get("failed").size());
    } finally {
      if (cluster != null) {
        for (URI dir : cluster.getNameDirs(0)) {
          FileUtil.chmod(new File(new File(dir), "current").getAbsolutePath(), "755");
        }
        cluster.shutdown();
      }
    }
  }