@Override public List<HdfsFileStatusWithId> listLocatedHdfsStatus(FileSystem fs, Path p, PathFilter filter) throws IOException { DistributedFileSystem dfs = ensureDfs(fs); DFSClient dfsc = dfs.getClient(); final String src = p.toUri().getPath(); DirectoryListing current = dfsc.listPaths(src, org.apache.hadoop.hdfs.protocol.HdfsFileStatus.EMPTY_NAME, true); if (current == null) { // the directory does not exist throw new FileNotFoundException("File " + p + " does not exist."); } final URI fsUri = fs.getUri(); List<HdfsFileStatusWithId> result = new ArrayList<HdfsFileStatusWithId>(current.getPartialListing().length); while (current != null) { org.apache.hadoop.hdfs.protocol.HdfsFileStatus[] hfss = current.getPartialListing(); for (int i = 0; i < hfss.length; ++i) { HdfsLocatedFileStatus next = (HdfsLocatedFileStatus) (hfss[i]); if (filter != null) { Path filterPath = next.getFullPath(p).makeQualified(fsUri, null); if (!filter.accept(filterPath)) continue; } LocatedFileStatus lfs = next.makeQualifiedLocated(fsUri, p); result.add(new HdfsFileStatusWithIdImpl(lfs, next.getFileId())); } current = current.hasMore() ? dfsc.listPaths(src, current.getLastName(), true) : null; } return result; }
@Override // ClientProtocol public DirectoryListing getListing(String src, byte[] startAfter, boolean needLocation) throws IOException { DirectoryListing files = namesystem.getListing(src, startAfter, needLocation); if (files != null) { metrics.incrGetListingOps(); metrics.incrFilesInGetListingOps(files.getPartialListing().length); } return files; }
@VisibleForTesting void check(String parent, HdfsFileStatus file, Result res) throws IOException { String path = file.getFullName(parent); boolean isOpen = false; if (file.isDir()) { if (snapshottableDirs != null && snapshottableDirs.contains(path)) { String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path + Path.SEPARATOR) + HdfsConstants.DOT_SNAPSHOT_DIR; HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo(snapshotPath); check(snapshotPath, snapshotFileInfo, res); } byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME; DirectoryListing thisListing; if (showFiles) { out.println(path + " <dir>"); } res.totalDirs++; do { assert lastReturnedName != null; thisListing = namenode.getRpcServer().getListing(path, lastReturnedName, false); if (thisListing == null) { return; } HdfsFileStatus[] files = thisListing.getPartialListing(); for (int i = 0; i < files.length; i++) { check(path, files[i], res); } lastReturnedName = thisListing.getLastName(); } while (thisListing.hasMore()); return; } if (file.isSymlink()) { if (showFiles) { out.println(path + " <symlink>"); } res.totalSymlinks++; return; } long fileLen = file.getLen(); // Get block locations without updating the file access time // and without block access tokens LocatedBlocks blocks; try { blocks = namenode.getNamesystem().getBlockLocations(path, 0, fileLen, false, false, false); } catch (FileNotFoundException fnfe) { blocks = null; } if (blocks == null) { // the file is deleted return; } isOpen = blocks.isUnderConstruction(); if (isOpen && !showOpenFiles) { // We collect these stats about open files to report with default options res.totalOpenFilesSize += fileLen; res.totalOpenFilesBlocks += blocks.locatedBlockCount(); res.totalOpenFiles++; return; } res.totalFiles++; res.totalSize += fileLen; res.totalBlocks += blocks.locatedBlockCount(); if (showOpenFiles && isOpen) { out.print( path + " " + fileLen + " bytes, " + blocks.locatedBlockCount() + " block(s), OPENFORWRITE: "); } else if (showFiles) { out.print(path + " " + fileLen + " bytes, " + blocks.locatedBlockCount() + " block(s): "); } else { out.print('.'); } if (res.totalFiles % 100 == 0) { out.println(); out.flush(); } int missing = 0; int corrupt = 0; long missize = 0; int underReplicatedPerFile = 0; int misReplicatedPerFile = 0; StringBuilder report = new StringBuilder(); int i = 0; for (LocatedBlock lBlk : blocks.getLocatedBlocks()) { ExtendedBlock block = lBlk.getBlock(); boolean isCorrupt = lBlk.isCorrupt(); String blkName = block.toString(); DatanodeInfo[] locs = lBlk.getLocations(); NumberReplicas numberReplicas = namenode.getNamesystem().getBlockManager().countNodes(block.getLocalBlock()); int liveReplicas = numberReplicas.liveReplicas(); res.totalReplicas += liveReplicas; short targetFileReplication = file.getReplication(); res.numExpectedReplicas += targetFileReplication; if (liveReplicas > targetFileReplication) { res.excessiveReplicas += (liveReplicas - targetFileReplication); res.numOverReplicatedBlocks += 1; } // Check if block is Corrupt if (isCorrupt) { corrupt++; res.corruptBlocks++; out.print( "\n" + path + ": CORRUPT blockpool " + block.getBlockPoolId() + " block " + block.getBlockName() + "\n"); } if (liveReplicas >= minReplication) res.numMinReplicatedBlocks++; if (liveReplicas < targetFileReplication && liveReplicas > 0) { res.missingReplicas += (targetFileReplication - liveReplicas); res.numUnderReplicatedBlocks += 1; underReplicatedPerFile++; if (!showFiles) { out.print("\n" + path + ": "); } out.println( " Under replicated " + block + ". Target Replicas is " + targetFileReplication + " but found " + liveReplicas + " replica(s)."); } // verify block placement policy BlockPlacementStatus blockPlacementStatus = bpPolicy.verifyBlockPlacement(path, lBlk, targetFileReplication); if (!blockPlacementStatus.isPlacementPolicySatisfied()) { res.numMisReplicatedBlocks++; misReplicatedPerFile++; if (!showFiles) { if (underReplicatedPerFile == 0) out.println(); out.print(path + ": "); } out.println( " Replica placement policy is violated for " + block + ". " + blockPlacementStatus.getErrorDescription()); } report.append(i + ". " + blkName + " len=" + block.getNumBytes()); if (liveReplicas == 0) { report.append(" MISSING!"); res.addMissing(block.toString(), block.getNumBytes()); missing++; missize += block.getNumBytes(); } else { report.append(" repl=" + liveReplicas); if (showLocations || showRacks) { StringBuilder sb = new StringBuilder("["); for (int j = 0; j < locs.length; j++) { if (j > 0) { sb.append(", "); } if (showRacks) sb.append(NodeBase.getPath(locs[j])); else sb.append(locs[j]); } sb.append(']'); report.append(" " + sb.toString()); } } report.append('\n'); i++; } if ((missing > 0) || (corrupt > 0)) { if (!showFiles && (missing > 0)) { out.print( "\n" + path + ": MISSING " + missing + " blocks of total size " + missize + " B."); } res.corruptFiles++; if (isOpen) { LOG.info("Fsck: ignoring open file " + path); } else { if (doMove) copyBlocksToLostFound(parent, file, blocks); if (doDelete) deleteCorruptedFile(path); } } if (showFiles) { if (missing > 0) { out.print(" MISSING " + missing + " blocks of total size " + missize + " B\n"); } else if (underReplicatedPerFile == 0 && misReplicatedPerFile == 0) { out.print(" OK\n"); } if (showBlocks) { out.print(report.toString() + "\n"); } } }
static void generateDirectoryStructure( JspWriter out, HttpServletRequest req, HttpServletResponse resp, Configuration conf) throws IOException, InterruptedException { final String dir = JspHelper.validatePath(StringEscapeUtils.unescapeHtml(req.getParameter("dir"))); if (dir == null) { out.print("Invalid input"); return; } String tokenString = req.getParameter(JspHelper.DELEGATION_PARAMETER_NAME); UserGroupInformation ugi = JspHelper.getUGI(req, conf); String namenodeInfoPortStr = req.getParameter("namenodeInfoPort"); int namenodeInfoPort = -1; if (namenodeInfoPortStr != null) namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr); final String nnAddr = req.getParameter(JspHelper.NAMENODE_ADDRESS); if (nnAddr == null) { out.print(JspHelper.NAMENODE_ADDRESS + " url param is null"); return; } DFSClient dfs = getDFSClient(ugi, nnAddr, conf); String target = dir; final HdfsFileStatus targetStatus = dfs.getFileInfo(target); if (targetStatus == null) { // not exists out.print("<h3>File or directory : " + target + " does not exist</h3>"); JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, target, nnAddr); } else { if (!targetStatus.isDir()) { // a file List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(dir, 0, 1).getLocatedBlocks(); LocatedBlock firstBlock = null; DatanodeInfo[] locations = null; if (blocks.size() > 0) { firstBlock = blocks.get(0); locations = firstBlock.getLocations(); } if (locations == null || locations.length == 0) { out.print("Empty file"); } else { DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf); String fqdn = canonicalize(chosenNode.getIpAddr()); int datanodePort = chosenNode.getXferPort(); String redirectLocation = "http://" + fqdn + ":" + chosenNode.getInfoPort() + "/browseBlock.jsp?blockId=" + firstBlock.getBlock().getBlockId() + "&blockSize=" + firstBlock.getBlock().getNumBytes() + "&genstamp=" + firstBlock.getBlock().getGenerationStamp() + "&filename=" + URLEncoder.encode(dir, "UTF-8") + "&datanodePort=" + datanodePort + "&namenodeInfoPort=" + namenodeInfoPort + JspHelper.getDelegationTokenUrlParam(tokenString) + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr); resp.sendRedirect(redirectLocation); } return; } // directory // generate a table and dump the info String[] headings = { "Name", "Type", "Size", "Replication", "Block Size", "Modification Time", "Permission", "Owner", "Group" }; out.print("<h3>Contents of directory "); JspHelper.printPathWithLinks(dir, out, namenodeInfoPort, tokenString, nnAddr); out.print("</h3><hr>"); JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, dir, nnAddr); out.print("<hr>"); File f = new File(dir); String parent; if ((parent = f.getParent()) != null) out.print( "<a href=\"" + req.getRequestURL() + "?dir=" + parent + "&namenodeInfoPort=" + namenodeInfoPort + JspHelper.getDelegationTokenUrlParam(tokenString) + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr) + "\">Go to parent directory</a><br>"); DirectoryListing thisListing = dfs.listPaths(target, HdfsFileStatus.EMPTY_NAME); if (thisListing == null || thisListing.getPartialListing().length == 0) { out.print("Empty directory"); } else { JspHelper.addTableHeader(out); int row = 0; JspHelper.addTableRow(out, headings, row++); String cols[] = new String[headings.length]; do { HdfsFileStatus[] files = thisListing.getPartialListing(); for (int i = 0; i < files.length; i++) { String localFileName = files[i].getLocalName(); // Get the location of the first block of the file if (!files[i].isDir()) { cols[1] = "file"; cols[2] = StringUtils.byteDesc(files[i].getLen()); cols[3] = Short.toString(files[i].getReplication()); cols[4] = StringUtils.byteDesc(files[i].getBlockSize()); } else { cols[1] = "dir"; cols[2] = ""; cols[3] = ""; cols[4] = ""; } String datanodeUrl = req.getRequestURL() + "?dir=" + URLEncoder.encode(files[i].getFullName(target), "UTF-8") + "&namenodeInfoPort=" + namenodeInfoPort + JspHelper.getDelegationTokenUrlParam(tokenString) + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr); cols[0] = "<a href=\"" + datanodeUrl + "\">" + localFileName + "</a>"; cols[5] = lsDateFormat.format(new Date((files[i].getModificationTime()))); cols[6] = files[i].getPermission().toString(); cols[7] = files[i].getOwner(); cols[8] = files[i].getGroup(); JspHelper.addTableRow(out, cols, row++); } if (!thisListing.hasMore()) { break; } thisListing = dfs.listPaths(target, thisListing.getLastName()); } while (thisListing != null); JspHelper.addTableFooter(out); } } out.print( "<br><a href=\"http://" + canonicalize(nnAddr) + ":" + namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>"); dfs.close(); }