/** Convert a HdfsFileStatus object to a Json string. */ public static String toJsonString(final HdfsFileStatus status, boolean includeType) { if (status == null) { return null; } final Map<String, Object> m = new TreeMap<String, Object>(); m.put("pathSuffix", status.getLocalName()); m.put("type", PathType.valueOf(status)); if (status.isSymlink()) { m.put("symlink", status.getSymlink()); } m.put("length", status.getLen()); m.put("owner", status.getOwner()); m.put("group", status.getGroup()); FsPermission perm = status.getPermission(); m.put("permission", toString(perm)); if (perm.getAclBit()) { m.put("aclBit", true); } if (perm.getEncryptedBit()) { m.put("encBit", true); } m.put("accessTime", status.getAccessTime()); m.put("modificationTime", status.getModificationTime()); m.put("blockSize", status.getBlockSize()); m.put("replication", status.getReplication()); m.put("fileId", status.getFileId()); m.put("childrenNum", status.getChildrenNum()); m.put("storagePolicy", status.getStoragePolicy()); return includeType ? toJsonString(FileStatus.class, m) : JSON.toString(m); }
/** * Write a node to output. Node information includes path, modification, permission, owner and * group. For files, it also includes size, replication and block-size. */ static void writeInfo(String parent, HdfsFileStatus i, XMLOutputter doc) throws IOException { final SimpleDateFormat ldf = df.get(); doc.startTag(i.isDir() ? "directory" : "file"); doc.attribute("path", i.getFullPath(new Path(parent)).toUri().getPath()); doc.attribute("modified", ldf.format(new Date(i.getModificationTime()))); doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime()))); if (!i.isDir()) { doc.attribute("size", String.valueOf(i.getLen())); doc.attribute("replication", String.valueOf(i.getReplication())); doc.attribute("blocksize", String.valueOf(i.getBlockSize())); } doc.attribute("permission", (i.isDir() ? "d" : "-") + i.getPermission()); doc.attribute("owner", i.getOwner()); doc.attribute("group", i.getGroup()); doc.endTag(); }
/** Convert a HdfsFileStatus object to a Json string. */ public static String toJsonString(final HdfsFileStatus status, boolean includeType) { if (status == null) { return null; } final Map<String, Object> m = new TreeMap<String, Object>(); m.put("localName", status.getLocalName()); m.put("isDir", status.isDir()); m.put("len", status.getLen()); m.put("owner", status.getOwner()); m.put("group", status.getGroup()); m.put("permission", toString(status.getPermission())); m.put("accessTime", status.getAccessTime()); m.put("modificationTime", status.getModificationTime()); m.put("blockSize", status.getBlockSize()); m.put("replication", status.getReplication()); return includeType ? toJsonString(HdfsFileStatus.class, m) : JSON.toString(m); }
private void copyBlocksToLostFound(String parent, HdfsFileStatus file, LocatedBlocks blocks) throws IOException { final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf); final String fullName = file.getFullName(parent); OutputStream fos = null; try { if (!lfInited) { lostFoundInit(dfs); } if (!lfInitedOk) { throw new IOException("failed to initialize lost+found"); } String target = lostFound + fullName; if (hdfsPathExists(target)) { LOG.warn( "Fsck: can't copy the remains of " + fullName + " to " + "lost+found, because " + target + " already exists."); return; } if (!namenode.getRpcServer().mkdirs(target, file.getPermission(), true)) { throw new IOException("failed to create directory " + target); } // create chains int chain = 0; boolean copyError = false; for (LocatedBlock lBlk : blocks.getLocatedBlocks()) { LocatedBlock lblock = lBlk; DatanodeInfo[] locs = lblock.getLocations(); if (locs == null || locs.length == 0) { if (fos != null) { fos.flush(); fos.close(); fos = null; } continue; } if (fos == null) { fos = dfs.create(target + "/" + chain, true); if (fos == null) { throw new IOException( "Failed to copy " + fullName + " to /lost+found: could not store chain " + chain); } chain++; } // copy the block. It's a pity it's not abstracted from DFSInputStream ... try { copyBlock(dfs, lblock, fos); } catch (Exception e) { LOG.error("Fsck: could not copy block " + lblock.getBlock() + " to " + target, e); fos.flush(); fos.close(); fos = null; internalError = true; copyError = true; } } if (copyError) { LOG.warn( "Fsck: there were errors copying the remains of the " + "corrupted file " + fullName + " to /lost+found"); } else { LOG.info("Fsck: copied the remains of the corrupted file " + fullName + " to /lost+found"); } } catch (Exception e) { LOG.error("copyBlocksToLostFound: error processing " + fullName, e); internalError = true; } finally { if (fos != null) fos.close(); dfs.close(); } }