private void lostFoundInit(DFSClient dfs) {
    lfInited = true;
    try {
      String lfName = "/lost+found";

      final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
      if (lfStatus == null) { // not exists
        lfInitedOk = dfs.mkdirs(lfName, null, true);
        lostFound = lfName;
      } else if (!lfStatus.isDir()) { // exists but not a directory
        LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
        lfInitedOk = false;
      } else { // exists and is a directory
        lostFound = lfName;
        lfInitedOk = true;
      }
    } catch (Exception e) {
      e.printStackTrace();
      lfInitedOk = false;
    }
    if (lostFound == null) {
      LOG.warn("Cannot initialize /lost+found .");
      lfInitedOk = false;
      internalError = true;
    }
  }
Exemple #2
0
  /**
   * Before Hadoop 2.8.0, there's no getStoragePolicy method for FileSystem interface, and we need
   * to keep compatible with it. See HADOOP-12161 for more details.
   *
   * @param path Path to get storage policy against
   * @return the storage policy name
   */
  private String getStoragePolicyForOldHDFSVersion(Path path) {
    try {
      if (this.fs instanceof DistributedFileSystem) {
        DistributedFileSystem dfs = (DistributedFileSystem) this.fs;
        HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath());
        if (null != status) {
          if (unspecifiedStoragePolicyId < 0) {
            // Get the unspecified id field through reflection to avoid compilation error.
            // In later version BlockStoragePolicySuite#ID_UNSPECIFIED is moved to
            // HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED
            Field idUnspecified = BlockStoragePolicySuite.class.getField("ID_UNSPECIFIED");
            unspecifiedStoragePolicyId = idUnspecified.getByte(BlockStoragePolicySuite.class);
          }
          byte storagePolicyId = status.getStoragePolicy();
          if (storagePolicyId != unspecifiedStoragePolicyId) {
            BlockStoragePolicy[] policies = dfs.getStoragePolicies();
            for (BlockStoragePolicy policy : policies) {
              if (policy.getId() == storagePolicyId) {
                return policy.getName();
              }
            }
          }
        }
      }
    } catch (Throwable e) {
      LOG.warn("failed to get block storage policy of [" + path + "]", e);
    }

    return null;
  }
  private static DatanodeInfo chooseDatanode(
      final NameNode namenode, final String path, final HttpOpParam.Op op, final long openOffset)
      throws IOException {
    if (op == GetOpParam.Op.OPEN
        || op == GetOpParam.Op.GETFILECHECKSUM
        || op == PostOpParam.Op.APPEND) {
      final HdfsFileStatus status = namenode.getFileInfo(path);
      final long len = status.getLen();
      if (op == GetOpParam.Op.OPEN && (openOffset < 0L || openOffset >= len)) {
        throw new IOException(
            "Offset="
                + openOffset
                + " out of the range [0, "
                + len
                + "); "
                + op
                + ", path="
                + path);
      }

      if (len > 0) {
        final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
        final LocatedBlocks locations = namenode.getBlockLocations(path, offset, 1);
        final int count = locations.locatedBlockCount();
        if (count > 0) {
          return JspHelper.bestNode(locations.get(0));
        }
      }
    }

    return namenode.getNamesystem().getRandomDatanode();
  }
  static DatanodeInfo chooseDatanode(
      final NameNode namenode,
      final String path,
      final HttpOpParam.Op op,
      final long openOffset,
      final long blocksize,
      Configuration conf)
      throws IOException {
    final BlockManager bm = namenode.getNamesystem().getBlockManager();

    if (op == PutOpParam.Op.CREATE) {
      // choose a datanode near to client
      final DatanodeDescriptor clientNode =
          bm.getDatanodeManager().getDatanodeByHost(getRemoteAddress());
      if (clientNode != null) {
        final DatanodeDescriptor[] datanodes =
            bm.getBlockPlacementPolicy().chooseTarget(path, 1, clientNode, null, blocksize);
        if (datanodes.length > 0) {
          return datanodes[0];
        }
      }
    } else if (op == GetOpParam.Op.OPEN
        || op == GetOpParam.Op.GETFILECHECKSUM
        || op == PostOpParam.Op.APPEND) {
      // choose a datanode containing a replica
      final NamenodeProtocols np = namenode.getRpcServer();
      final HdfsFileStatus status = np.getFileInfo(path);
      if (status == null) {
        throw new FileNotFoundException("File " + path + " not found.");
      }
      final long len = status.getLen();
      if (op == GetOpParam.Op.OPEN) {
        if (openOffset < 0L || (openOffset >= len && len > 0)) {
          throw new IOException(
              "Offset="
                  + openOffset
                  + " out of the range [0, "
                  + len
                  + "); "
                  + op
                  + ", path="
                  + path);
        }
      }

      if (len > 0) {
        final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
        final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
        final int count = locations.locatedBlockCount();
        if (count > 0) {
          return JspHelper.bestNode(locations.get(0).getLocations(), false, conf);
        }
      }
    }

    return (DatanodeDescriptor)
        bm.getDatanodeManager().getNetworkTopology().chooseRandom(NodeBase.ROOT);
  }
  @Test
  public void testPolicyPropagation() throws IOException {
    startUpCluster(false, -1);
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path path = new Path("/" + METHOD_NAME + ".dat");

    makeTestFile(path, 0, true);
    // Stat the file and check that the lazyPersist flag is returned back.
    HdfsFileStatus status = client.getFileInfo(path.toString());
    assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
  }
  @Test
  public void testPolicyNotSetByDefault() throws IOException {
    startUpCluster(false, -1);
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path path = new Path("/" + METHOD_NAME + ".dat");

    makeTestFile(path, 0, false);
    // Stat the file and check that the LAZY_PERSIST policy is not
    // returned back.
    HdfsFileStatus status = client.getFileInfo(path.toString());
    assertThat(status.getStoragePolicy(), not(LAZY_PERSIST_POLICY_ID));
  }
Exemple #7
0
 /** Convert a HdfsFileStatus object to a Json string. */
 public static String toJsonString(final HdfsFileStatus status, boolean includeType) {
   if (status == null) {
     return null;
   }
   final Map<String, Object> m = new TreeMap<String, Object>();
   m.put("localName", status.getLocalName());
   m.put("isDir", status.isDir());
   m.put("len", status.getLen());
   m.put("owner", status.getOwner());
   m.put("group", status.getGroup());
   m.put("permission", toString(status.getPermission()));
   m.put("accessTime", status.getAccessTime());
   m.put("modificationTime", status.getModificationTime());
   m.put("blockSize", status.getBlockSize());
   m.put("replication", status.getReplication());
   return includeType ? toJsonString(HdfsFileStatus.class, m) : JSON.toString(m);
 }
 @Override // ClientProtocol
 public String getLinkTarget(String path) throws IOException {
   metrics.incrGetLinkTargetOps();
   try {
     HdfsFileStatus stat = namesystem.getFileInfo(path, false);
     if (stat != null) {
       // NB: getSymlink throws IOException if !stat.isSymlink()
       return stat.getSymlink();
     }
   } catch (UnresolvedPathException e) {
     return e.getResolvedPath().toString();
   } catch (UnresolvedLinkException e) {
     // The NameNode should only throw an UnresolvedPathException
     throw new AssertionError("UnresolvedLinkException thrown");
   }
   return null;
 }
  @Test
  public void testPolicyPersistenceInFsImage() throws IOException {
    startUpCluster(false, -1);
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path path = new Path("/" + METHOD_NAME + ".dat");

    makeTestFile(path, 0, true);
    // checkpoint
    fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
    cluster.restartNameNode(true);

    // Stat the file and check that the lazyPersist flag is returned back.
    HdfsFileStatus status = client.getFileInfo(path.toString());
    assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
  }
 /**
  * Select a datanode to service this request. Currently, this looks at no more than the first five
  * blocks of a file, selecting a datanode randomly from the most represented.
  *
  * @param conf
  */
 private DatanodeID pickSrcDatanode(LocatedBlocks blks, HdfsFileStatus i, Configuration conf)
     throws IOException {
   if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
     // pick a random datanode
     NameNode nn = NameNodeHttpServer.getNameNodeFromContext(getServletContext());
     return NamenodeJspHelper.getRandomDatanode(nn);
   }
   return JspHelper.bestNode(blks, conf);
 }
  /** Create a redirection URL */
  private URL createRedirectURL(
      String path,
      String encodedPath,
      HdfsFileStatus status,
      UserGroupInformation ugi,
      ClientProtocol nnproxy,
      HttpServletRequest request,
      String dt)
      throws IOException {
    String scheme = request.getScheme();
    final LocatedBlocks blks =
        nnproxy.getBlockLocations(status.getFullPath(new Path(path)).toUri().getPath(), 0, 1);
    final Configuration conf = NameNodeHttpServer.getConfFromContext(getServletContext());
    final DatanodeID host = pickSrcDatanode(blks, status, conf);
    final String hostname;
    if (host instanceof DatanodeInfo) {
      hostname = host.getHostName();
    } else {
      hostname = host.getIpAddr();
    }

    int port = "https".equals(scheme) ? host.getInfoSecurePort() : host.getInfoPort();

    String dtParam = "";
    if (dt != null) {
      dtParam = JspHelper.getDelegationTokenUrlParam(dt);
    }

    // Add namenode address to the url params
    NameNode nn = NameNodeHttpServer.getNameNodeFromContext(getServletContext());
    String addr = nn.getNameNodeAddressHostPortString();
    String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);

    return new URL(
        scheme,
        hostname,
        port,
        "/streamFile"
            + encodedPath
            + '?'
            + "ugi="
            + ServletUtil.encodeQueryValue(ugi.getShortUserName())
            + dtParam
            + addrParam);
  }
  private void copyBlocksToLostFound(String parent, HdfsFileStatus file, LocatedBlocks blocks)
      throws IOException {
    final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
    final String fullName = file.getFullName(parent);
    OutputStream fos = null;
    try {
      if (!lfInited) {
        lostFoundInit(dfs);
      }
      if (!lfInitedOk) {
        throw new IOException("failed to initialize lost+found");
      }
      String target = lostFound + fullName;
      if (hdfsPathExists(target)) {
        LOG.warn(
            "Fsck: can't copy the remains of "
                + fullName
                + " to "
                + "lost+found, because "
                + target
                + " already exists.");
        return;
      }
      if (!namenode.getRpcServer().mkdirs(target, file.getPermission(), true)) {
        throw new IOException("failed to create directory " + target);
      }
      // create chains
      int chain = 0;
      boolean copyError = false;
      for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        LocatedBlock lblock = lBlk;
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(target + "/" + chain, true);
          if (fos == null) {
            throw new IOException(
                "Failed to copy " + fullName + " to /lost+found: could not store chain " + chain);
          }
          chain++;
        }

        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          LOG.error("Fsck: could not copy block " + lblock.getBlock() + " to " + target, e);
          fos.flush();
          fos.close();
          fos = null;
          internalError = true;
          copyError = true;
        }
      }
      if (copyError) {
        LOG.warn(
            "Fsck: there were errors copying the remains of the "
                + "corrupted file "
                + fullName
                + " to /lost+found");
      } else {
        LOG.info("Fsck: copied the remains of the corrupted file " + fullName + " to /lost+found");
      }
    } catch (Exception e) {
      LOG.error("copyBlocksToLostFound: error processing " + fullName, e);
      internalError = true;
    } finally {
      if (fos != null) fos.close();
      dfs.close();
    }
  }
  @VisibleForTesting
  void check(String parent, HdfsFileStatus file, Result res) throws IOException {
    String path = file.getFullName(parent);
    boolean isOpen = false;

    if (file.isDir()) {
      if (snapshottableDirs != null && snapshottableDirs.contains(path)) {
        String snapshotPath =
            (path.endsWith(Path.SEPARATOR) ? path : path + Path.SEPARATOR)
                + HdfsConstants.DOT_SNAPSHOT_DIR;
        HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo(snapshotPath);
        check(snapshotPath, snapshotFileInfo, res);
      }
      byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
      DirectoryListing thisListing;
      if (showFiles) {
        out.println(path + " <dir>");
      }
      res.totalDirs++;
      do {
        assert lastReturnedName != null;
        thisListing = namenode.getRpcServer().getListing(path, lastReturnedName, false);
        if (thisListing == null) {
          return;
        }
        HdfsFileStatus[] files = thisListing.getPartialListing();
        for (int i = 0; i < files.length; i++) {
          check(path, files[i], res);
        }
        lastReturnedName = thisListing.getLastName();
      } while (thisListing.hasMore());
      return;
    }
    if (file.isSymlink()) {
      if (showFiles) {
        out.println(path + " <symlink>");
      }
      res.totalSymlinks++;
      return;
    }
    long fileLen = file.getLen();
    // Get block locations without updating the file access time
    // and without block access tokens
    LocatedBlocks blocks;
    try {
      blocks = namenode.getNamesystem().getBlockLocations(path, 0, fileLen, false, false, false);
    } catch (FileNotFoundException fnfe) {
      blocks = null;
    }
    if (blocks == null) { // the file is deleted
      return;
    }
    isOpen = blocks.isUnderConstruction();
    if (isOpen && !showOpenFiles) {
      // We collect these stats about open files to report with default options
      res.totalOpenFilesSize += fileLen;
      res.totalOpenFilesBlocks += blocks.locatedBlockCount();
      res.totalOpenFiles++;
      return;
    }
    res.totalFiles++;
    res.totalSize += fileLen;
    res.totalBlocks += blocks.locatedBlockCount();
    if (showOpenFiles && isOpen) {
      out.print(
          path
              + " "
              + fileLen
              + " bytes, "
              + blocks.locatedBlockCount()
              + " block(s), OPENFORWRITE: ");
    } else if (showFiles) {
      out.print(path + " " + fileLen + " bytes, " + blocks.locatedBlockCount() + " block(s): ");
    } else {
      out.print('.');
    }
    if (res.totalFiles % 100 == 0) {
      out.println();
      out.flush();
    }
    int missing = 0;
    int corrupt = 0;
    long missize = 0;
    int underReplicatedPerFile = 0;
    int misReplicatedPerFile = 0;
    StringBuilder report = new StringBuilder();
    int i = 0;
    for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
      ExtendedBlock block = lBlk.getBlock();
      boolean isCorrupt = lBlk.isCorrupt();
      String blkName = block.toString();
      DatanodeInfo[] locs = lBlk.getLocations();
      NumberReplicas numberReplicas =
          namenode.getNamesystem().getBlockManager().countNodes(block.getLocalBlock());
      int liveReplicas = numberReplicas.liveReplicas();
      res.totalReplicas += liveReplicas;
      short targetFileReplication = file.getReplication();
      res.numExpectedReplicas += targetFileReplication;
      if (liveReplicas > targetFileReplication) {
        res.excessiveReplicas += (liveReplicas - targetFileReplication);
        res.numOverReplicatedBlocks += 1;
      }
      // Check if block is Corrupt
      if (isCorrupt) {
        corrupt++;
        res.corruptBlocks++;
        out.print(
            "\n"
                + path
                + ": CORRUPT blockpool "
                + block.getBlockPoolId()
                + " block "
                + block.getBlockName()
                + "\n");
      }
      if (liveReplicas >= minReplication) res.numMinReplicatedBlocks++;
      if (liveReplicas < targetFileReplication && liveReplicas > 0) {
        res.missingReplicas += (targetFileReplication - liveReplicas);
        res.numUnderReplicatedBlocks += 1;
        underReplicatedPerFile++;
        if (!showFiles) {
          out.print("\n" + path + ": ");
        }
        out.println(
            " Under replicated "
                + block
                + ". Target Replicas is "
                + targetFileReplication
                + " but found "
                + liveReplicas
                + " replica(s).");
      }
      // verify block placement policy
      BlockPlacementStatus blockPlacementStatus =
          bpPolicy.verifyBlockPlacement(path, lBlk, targetFileReplication);
      if (!blockPlacementStatus.isPlacementPolicySatisfied()) {
        res.numMisReplicatedBlocks++;
        misReplicatedPerFile++;
        if (!showFiles) {
          if (underReplicatedPerFile == 0) out.println();
          out.print(path + ": ");
        }
        out.println(
            " Replica placement policy is violated for "
                + block
                + ". "
                + blockPlacementStatus.getErrorDescription());
      }
      report.append(i + ". " + blkName + " len=" + block.getNumBytes());
      if (liveReplicas == 0) {
        report.append(" MISSING!");
        res.addMissing(block.toString(), block.getNumBytes());
        missing++;
        missize += block.getNumBytes();
      } else {
        report.append(" repl=" + liveReplicas);
        if (showLocations || showRacks) {
          StringBuilder sb = new StringBuilder("[");
          for (int j = 0; j < locs.length; j++) {
            if (j > 0) {
              sb.append(", ");
            }
            if (showRacks) sb.append(NodeBase.getPath(locs[j]));
            else sb.append(locs[j]);
          }
          sb.append(']');
          report.append(" " + sb.toString());
        }
      }
      report.append('\n');
      i++;
    }
    if ((missing > 0) || (corrupt > 0)) {
      if (!showFiles && (missing > 0)) {
        out.print(
            "\n" + path + ": MISSING " + missing + " blocks of total size " + missize + " B.");
      }
      res.corruptFiles++;
      if (isOpen) {
        LOG.info("Fsck: ignoring open file " + path);
      } else {
        if (doMove) copyBlocksToLostFound(parent, file, blocks);
        if (doDelete) deleteCorruptedFile(path);
      }
    }
    if (showFiles) {
      if (missing > 0) {
        out.print(" MISSING " + missing + " blocks of total size " + missize + " B\n");
      } else if (underReplicatedPerFile == 0 && misReplicatedPerFile == 0) {
        out.print(" OK\n");
      }
      if (showBlocks) {
        out.print(report.toString() + "\n");
      }
    }
  }
Exemple #14
0
  /** Convert a HdfsFileStatus object to a Json string. */
  public static String toJsonString(final HdfsFileStatus status, boolean includeType) {
    if (status == null) {
      return null;
    }
    final Map<String, Object> m = new TreeMap<String, Object>();
    m.put("pathSuffix", status.getLocalName());
    m.put("type", PathType.valueOf(status));
    if (status.isSymlink()) {
      m.put("symlink", status.getSymlink());
    }

    m.put("length", status.getLen());
    m.put("owner", status.getOwner());
    m.put("group", status.getGroup());
    FsPermission perm = status.getPermission();
    m.put("permission", toString(perm));
    if (perm.getAclBit()) {
      m.put("aclBit", true);
    }
    if (perm.getEncryptedBit()) {
      m.put("encBit", true);
    }
    m.put("accessTime", status.getAccessTime());
    m.put("modificationTime", status.getModificationTime());
    m.put("blockSize", status.getBlockSize());
    m.put("replication", status.getReplication());
    m.put("fileId", status.getFileId());
    m.put("childrenNum", status.getChildrenNum());
    m.put("storagePolicy", status.getStoragePolicy());
    return includeType ? toJsonString(FileStatus.class, m) : JSON.toString(m);
  }
 /**
  * Write a node to output. Node information includes path, modification, permission, owner and
  * group. For files, it also includes size, replication and block-size.
  */
 static void writeInfo(String parent, HdfsFileStatus i, XMLOutputter doc) throws IOException {
   final SimpleDateFormat ldf = df.get();
   doc.startTag(i.isDir() ? "directory" : "file");
   doc.attribute("path", i.getFullPath(new Path(parent)).toUri().getPath());
   doc.attribute("modified", ldf.format(new Date(i.getModificationTime())));
   doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime())));
   if (!i.isDir()) {
     doc.attribute("size", String.valueOf(i.getLen()));
     doc.attribute("replication", String.valueOf(i.getReplication()));
     doc.attribute("blocksize", String.valueOf(i.getBlockSize()));
   }
   doc.attribute("permission", (i.isDir() ? "d" : "-") + i.getPermission());
   doc.attribute("owner", i.getOwner());
   doc.attribute("group", i.getGroup());
   doc.endTag();
 }
  static void generateDirectoryStructure(
      JspWriter out, HttpServletRequest req, HttpServletResponse resp, Configuration conf)
      throws IOException, InterruptedException {
    final String dir =
        JspHelper.validatePath(StringEscapeUtils.unescapeHtml(req.getParameter("dir")));
    if (dir == null) {
      out.print("Invalid input");
      return;
    }
    String tokenString = req.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);
    UserGroupInformation ugi = JspHelper.getUGI(req, conf);
    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
    int namenodeInfoPort = -1;
    if (namenodeInfoPortStr != null) namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
    final String nnAddr = req.getParameter(JspHelper.NAMENODE_ADDRESS);
    if (nnAddr == null) {
      out.print(JspHelper.NAMENODE_ADDRESS + " url param is null");
      return;
    }

    DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
    String target = dir;
    final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
    if (targetStatus == null) { // not exists
      out.print("<h3>File or directory : " + target + " does not exist</h3>");
      JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, target, nnAddr);
    } else {
      if (!targetStatus.isDir()) { // a file
        List<LocatedBlock> blocks =
            dfs.getNamenode().getBlockLocations(dir, 0, 1).getLocatedBlocks();

        LocatedBlock firstBlock = null;
        DatanodeInfo[] locations = null;
        if (blocks.size() > 0) {
          firstBlock = blocks.get(0);
          locations = firstBlock.getLocations();
        }
        if (locations == null || locations.length == 0) {
          out.print("Empty file");
        } else {
          DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
          String fqdn = canonicalize(chosenNode.getIpAddr());
          int datanodePort = chosenNode.getXferPort();
          String redirectLocation =
              "http://"
                  + fqdn
                  + ":"
                  + chosenNode.getInfoPort()
                  + "/browseBlock.jsp?blockId="
                  + firstBlock.getBlock().getBlockId()
                  + "&blockSize="
                  + firstBlock.getBlock().getNumBytes()
                  + "&genstamp="
                  + firstBlock.getBlock().getGenerationStamp()
                  + "&filename="
                  + URLEncoder.encode(dir, "UTF-8")
                  + "&datanodePort="
                  + datanodePort
                  + "&namenodeInfoPort="
                  + namenodeInfoPort
                  + JspHelper.getDelegationTokenUrlParam(tokenString)
                  + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
          resp.sendRedirect(redirectLocation);
        }
        return;
      }
      // directory
      // generate a table and dump the info
      String[] headings = {
        "Name",
        "Type",
        "Size",
        "Replication",
        "Block Size",
        "Modification Time",
        "Permission",
        "Owner",
        "Group"
      };
      out.print("<h3>Contents of directory ");
      JspHelper.printPathWithLinks(dir, out, namenodeInfoPort, tokenString, nnAddr);
      out.print("</h3><hr>");
      JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, dir, nnAddr);
      out.print("<hr>");

      File f = new File(dir);
      String parent;
      if ((parent = f.getParent()) != null)
        out.print(
            "<a href=\""
                + req.getRequestURL()
                + "?dir="
                + parent
                + "&namenodeInfoPort="
                + namenodeInfoPort
                + JspHelper.getDelegationTokenUrlParam(tokenString)
                + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr)
                + "\">Go to parent directory</a><br>");

      DirectoryListing thisListing = dfs.listPaths(target, HdfsFileStatus.EMPTY_NAME);
      if (thisListing == null || thisListing.getPartialListing().length == 0) {
        out.print("Empty directory");
      } else {
        JspHelper.addTableHeader(out);
        int row = 0;
        JspHelper.addTableRow(out, headings, row++);
        String cols[] = new String[headings.length];
        do {
          HdfsFileStatus[] files = thisListing.getPartialListing();
          for (int i = 0; i < files.length; i++) {
            String localFileName = files[i].getLocalName();
            // Get the location of the first block of the file
            if (!files[i].isDir()) {
              cols[1] = "file";
              cols[2] = StringUtils.byteDesc(files[i].getLen());
              cols[3] = Short.toString(files[i].getReplication());
              cols[4] = StringUtils.byteDesc(files[i].getBlockSize());
            } else {
              cols[1] = "dir";
              cols[2] = "";
              cols[3] = "";
              cols[4] = "";
            }
            String datanodeUrl =
                req.getRequestURL()
                    + "?dir="
                    + URLEncoder.encode(files[i].getFullName(target), "UTF-8")
                    + "&namenodeInfoPort="
                    + namenodeInfoPort
                    + JspHelper.getDelegationTokenUrlParam(tokenString)
                    + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
            cols[0] = "<a href=\"" + datanodeUrl + "\">" + localFileName + "</a>";
            cols[5] = lsDateFormat.format(new Date((files[i].getModificationTime())));
            cols[6] = files[i].getPermission().toString();
            cols[7] = files[i].getOwner();
            cols[8] = files[i].getGroup();
            JspHelper.addTableRow(out, cols, row++);
          }
          if (!thisListing.hasMore()) {
            break;
          }
          thisListing = dfs.listPaths(target, thisListing.getLastName());
        } while (thisListing != null);
        JspHelper.addTableFooter(out);
      }
    }
    out.print(
        "<br><a href=\"http://"
            + canonicalize(nnAddr)
            + ":"
            + namenodeInfoPort
            + "/dfshealth.jsp\">Go back to DFS home</a>");
    dfs.close();
  }
Exemple #17
0
 static PathType valueOf(HdfsFileStatus status) {
   return status.isDir() ? DIRECTORY : status.isSymlink() ? SYMLINK : FILE;
 }