/** Create a URI for redirecting request */ protected URI createRedirectUri( String servletpath, UserGroupInformation ugi, DatanodeID host, HttpServletRequest request, String tokenString) throws URISyntaxException { final String hostname = host instanceof DatanodeInfo ? ((DatanodeInfo) host).getHostName() : host.getHost(); final String scheme = request.getScheme(); final int port = "https".equals(scheme) ? (Integer) getServletContext().getAttribute("datanode.https.port") : host.getInfoPort(); final String filename = request.getPathInfo(); String dt = ""; if (tokenString != null) { dt = JspHelper.getDelegationTokenUrlParam(tokenString); } return new URI( scheme, null, hostname, port, servletpath, "filename=" + filename + "&ugi=" + ugi.getShortUserName() + dt, null); }
static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy( DatanodeID datanodeid, Configuration conf, int socketTimeout, boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException { final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname); InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr); if (LOG.isDebugEnabled()) { LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr); } // Since we're creating a new UserGroupInformation here, we know that no // future RPC proxies will be able to re-use the same connection. And // usages of this proxy tend to be one-off calls. // // This is a temporary fix: callers should really achieve this by using // RPC.stopProxy() on the resulting object, but this is currently not // working in trunk. See the discussion on HDFS-1965. Configuration confWithNoIpcIdle = new Configuration(conf); confWithNoIpcIdle.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); UserGroupInformation ticket = UserGroupInformation.createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString()); ticket.addToken(locatedBlock.getBlockToken()); return createClientDatanodeProtocolProxy( addr, ticket, confWithNoIpcIdle, NetUtils.getDefaultSocketFactory(conf), socketTimeout); }
/** Create a redirection URL */ private URL createRedirectURL( String path, String encodedPath, HdfsFileStatus status, UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request, String dt) throws IOException { String scheme = request.getScheme(); final LocatedBlocks blks = nnproxy.getBlockLocations(status.getFullPath(new Path(path)).toUri().getPath(), 0, 1); final Configuration conf = NameNodeHttpServer.getConfFromContext(getServletContext()); final DatanodeID host = pickSrcDatanode(blks, status, conf); final String hostname; if (host instanceof DatanodeInfo) { hostname = host.getHostName(); } else { hostname = host.getIpAddr(); } int port = "https".equals(scheme) ? host.getInfoSecurePort() : host.getInfoPort(); String dtParam = ""; if (dt != null) { dtParam = JspHelper.getDelegationTokenUrlParam(dt); } // Add namenode address to the url params NameNode nn = NameNodeHttpServer.getNameNodeFromContext(getServletContext()); String addr = nn.getNameNodeAddressHostPortString(); String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr); return new URL( scheme, hostname, port, "/streamFile" + encodedPath + '?' + "ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) + dtParam + addrParam); }
/** {@inheritDoc} */ public void readFields(DataInput in) throws IOException { super.readFields(in); // TODO: move it to DatanodeID once HADOOP-2797 has been committed this.ipcPort = in.readShort() & 0x0000ffff; storageInfo.layoutVersion = in.readInt(); storageInfo.namespaceID = in.readInt(); storageInfo.cTime = in.readLong(); }
/** {@inheritDoc} */ public void write(DataOutput out) throws IOException { super.write(out); // TODO: move it to DatanodeID once HADOOP-2797 has been committed out.writeShort(ipcPort); out.writeInt(storageInfo.getLayoutVersion()); out.writeInt(storageInfo.getNamespaceID()); out.writeLong(storageInfo.getCTime()); }
/** * Constructor. * * @param datanodeid Datanode to connect to. * @param conf Configuration. * @param socketTimeout Socket timeout to use. * @param connectToDnViaHostname connect to the Datanode using its hostname * @throws IOException */ public ClientDatanodeProtocolTranslatorPB( DatanodeID datanodeid, Configuration conf, int socketTimeout, boolean connectToDnViaHostname) throws IOException { final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname); InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr); if (LOG.isDebugEnabled()) { LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr); } rpcProxy = createClientDatanodeProtocolProxy( addr, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout); }
/** modified by tony */ @SuppressWarnings("deprecation") int loadEditRecords(int logVersion, DataInputStream in, boolean closeOnExit) throws IOException { FSNamesystem.LOG.info("logversion: " + logVersion); FSDirectory fsDir = fsNamesys.dir; int numEdits = 0; String clientName = null; String clientMachine = null; String path = null; int numOpAdd = 0, numOpClose = 0, numOpDelete = 0, numOpRenameOld = 0, numOpSetRepl = 0, numOpMkDir = 0, numOpSetPerm = 0, numOpSetOwner = 0, numOpSetGenStamp = 0, numOpTimes = 0, numOpRename = 0, numOpConcatDelete = 0, numOpSymlink = 0, numOpGetDelegationToken = 0, numOpRenewDelegationToken = 0, numOpCancelDelegationToken = 0, numOpUpdateMasterKey = 0, numOpOther = 0; try { while (true) { long timestamp = 0; long mtime = 0; long atime = 0; long blockSize = 0; byte opcode = -1; try { in.mark(1); opcode = in.readByte(); if (opcode == Ops.OP_INVALID) { in.reset(); // reset back to end of file if somebody reads it again break; // no more transactions } } catch (EOFException e) { break; // no more transactions } numEdits++; switch (opcode) { case Ops.OP_ADD: case Ops.OP_CLOSE: { // versions > 0 support per file replication // get name and replication int length = in.readInt(); // modified by tony if (-7 == logVersion && length != 3 || -17 < logVersion && logVersion < -7 && length != 4 || logVersion <= -17 && length != 7) { throw new IOException( "Incorrect data format." + " logVersion is " + logVersion + " but writables.length is " + length + ". "); } path = FSImageSerialization.readString(in); short replication = fsNamesys.adjustReplication(readShort(in)); mtime = readLong(in); if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) { atime = readLong(in); } if (logVersion < -7) { blockSize = readLong(in); } long fileSize = readLong(in); byte type = (byte) readLong(in); // get blocks boolean isFileUnderConstruction = (opcode == Ops.OP_ADD); BlockInfo blocks[] = readBlocks(in, logVersion, isFileUnderConstruction, replication); // Older versions of HDFS does not store the block size in inode. // If the file has more than one block, use the size of the // first block as the blocksize. Otherwise use the default // block size. if (-8 <= logVersion && blockSize == 0) { if (blocks.length > 1) { blockSize = blocks[0].getNumBytes(); } else { long first = ((blocks.length == 1) ? blocks[0].getNumBytes() : 0); blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first); } } PermissionStatus permissions = fsNamesys.getUpgradePermission(); if (logVersion <= -11) { permissions = PermissionStatus.read(in); } CodingMatrix codingMatrix = CodingMatrix.getMatrixofCertainType(type); codingMatrix.readFields(in); /** added by tony* */ LongWritable offset = new LongWritable(); offset.readFields(in); long headeroffset = offset.get(); // clientname, clientMachine and block locations of last block. if (opcode == Ops.OP_ADD && logVersion <= -12) { clientName = FSImageSerialization.readString(in); clientMachine = FSImageSerialization.readString(in); if (-13 <= logVersion) { readDatanodeDescriptorArray(in); } } else { clientName = ""; clientMachine = ""; } // The open lease transaction re-creates a file if necessary. // Delete the file if it already exists. if (FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug( opcode + ": " + path + " numblocks : " + blocks.length + " clientHolder " + clientName + " clientMachine " + clientMachine); } fsDir.unprotectedDelete(path, mtime); /** modified by tony add to the file tree */ INodeFile node = (INodeFile) fsDir.unprotectedAddFile( path, permissions, codingMatrix, headeroffset, fileSize, blocks, replication, mtime, atime, blockSize); if (isFileUnderConstruction) { numOpAdd++; // // Replace current node with a INodeUnderConstruction. // Recreate in-memory lease record. // // INodeFileUnderConstruction cons = new INodeFileUnderConstruction( // node.getLocalNameBytes(), // node.getReplication(), // node.getModificationTime(), // node.getPreferredBlockSize(), // node.getBlocks(), // node.getPermissionStatus(), // clientName, // clientMachine, // null); // TODO: INodeFileUnderConstruction cons = null; fsDir.replaceNode(path, node, cons); fsNamesys.leaseManager.addLease(cons.getClientName(), path); } break; } case Ops.OP_SET_REPLICATION: { numOpSetRepl++; path = FSImageSerialization.readString(in); short replication = fsNamesys.adjustReplication(readShort(in)); fsDir.unprotectedSetReplication(path, replication, null); break; } case Ops.OP_CONCAT_DELETE: { numOpConcatDelete++; int length = in.readInt(); if (length < 3) { // trg, srcs.., timestam throw new IOException("Incorrect data format. " + "Mkdir operation."); } String trg = FSImageSerialization.readString(in); int srcSize = length - 1 - 1; // trg and timestamp String[] srcs = new String[srcSize]; for (int i = 0; i < srcSize; i++) { srcs[i] = FSImageSerialization.readString(in); } timestamp = readLong(in); fsDir.unprotectedConcat(trg, srcs); break; } case Ops.OP_RENAME_OLD: { numOpRenameOld++; int length = in.readInt(); if (length != 3) { throw new IOException("Incorrect data format. " + "Mkdir operation."); } String s = FSImageSerialization.readString(in); String d = FSImageSerialization.readString(in); timestamp = readLong(in); HdfsFileStatus dinfo = fsDir.getFileInfo(d, false); fsDir.unprotectedRenameTo(s, d, timestamp); fsNamesys.changeLease(s, d, dinfo); break; } case Ops.OP_DELETE: { numOpDelete++; int length = in.readInt(); if (length != 2) { throw new IOException("Incorrect data format. " + "delete operation."); } path = FSImageSerialization.readString(in); timestamp = readLong(in); fsDir.unprotectedDelete(path, timestamp); break; } case Ops.OP_MKDIR: { numOpMkDir++; PermissionStatus permissions = fsNamesys.getUpgradePermission(); int length = in.readInt(); if (-17 < logVersion && length != 2 || logVersion <= -17 && length != 3) { throw new IOException("Incorrect data format. " + "Mkdir operation."); } path = FSImageSerialization.readString(in); timestamp = readLong(in); // The disk format stores atimes for directories as well. // However, currently this is not being updated/used because of // performance reasons. if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) { atime = readLong(in); } if (logVersion <= -11) { permissions = PermissionStatus.read(in); } fsDir.unprotectedMkdir(path, permissions, timestamp); break; } case Ops.OP_SET_GENSTAMP: { numOpSetGenStamp++; long lw = in.readLong(); fsNamesys.setGenerationStamp(lw); break; } case Ops.OP_DATANODE_ADD: { numOpOther++; // Datanodes are not persistent any more. FSImageSerialization.DatanodeImage.skipOne(in); break; } case Ops.OP_DATANODE_REMOVE: { numOpOther++; DatanodeID nodeID = new DatanodeID(); nodeID.readFields(in); // Datanodes are not persistent any more. break; } case Ops.OP_SET_PERMISSIONS: { numOpSetPerm++; fsDir.unprotectedSetPermission( FSImageSerialization.readString(in), FsPermission.read(in)); break; } case Ops.OP_SET_OWNER: { numOpSetOwner++; fsDir.unprotectedSetOwner( FSImageSerialization.readString(in), FSImageSerialization.readString_EmptyAsNull(in), FSImageSerialization.readString_EmptyAsNull(in)); break; } case Ops.OP_SET_NS_QUOTA: { fsDir.unprotectedSetQuota( FSImageSerialization.readString(in), readLongWritable(in), FSConstants.QUOTA_DONT_SET); break; } case Ops.OP_CLEAR_NS_QUOTA: { fsDir.unprotectedSetQuota( FSImageSerialization.readString(in), FSConstants.QUOTA_RESET, FSConstants.QUOTA_DONT_SET); break; } case Ops.OP_SET_QUOTA: fsDir.unprotectedSetQuota( FSImageSerialization.readString(in), readLongWritable(in), readLongWritable(in)); break; case Ops.OP_TIMES: { numOpTimes++; int length = in.readInt(); if (length != 3) { throw new IOException("Incorrect data format. " + "times operation."); } path = FSImageSerialization.readString(in); mtime = readLong(in); atime = readLong(in); fsDir.unprotectedSetTimes(path, mtime, atime, true); break; } case Ops.OP_SYMLINK: { numOpSymlink++; int length = in.readInt(); if (length != 4) { throw new IOException("Incorrect data format. " + "symlink operation."); } path = FSImageSerialization.readString(in); String value = FSImageSerialization.readString(in); mtime = readLong(in); atime = readLong(in); PermissionStatus perm = PermissionStatus.read(in); fsDir.unprotectedSymlink(path, value, mtime, atime, perm); break; } case Ops.OP_RENAME: { numOpRename++; int length = in.readInt(); if (length != 3) { throw new IOException("Incorrect data format. " + "Mkdir operation."); } String s = FSImageSerialization.readString(in); String d = FSImageSerialization.readString(in); timestamp = readLong(in); Rename[] options = readRenameOptions(in); HdfsFileStatus dinfo = fsDir.getFileInfo(d, false); fsDir.unprotectedRenameTo(s, d, timestamp, options); fsNamesys.changeLease(s, d, dinfo); break; } case Ops.OP_GET_DELEGATION_TOKEN: { numOpGetDelegationToken++; DelegationTokenIdentifier delegationTokenId = new DelegationTokenIdentifier(); delegationTokenId.readFields(in); long expiryTime = readLong(in); fsNamesys .getDelegationTokenSecretManager() .addPersistedDelegationToken(delegationTokenId, expiryTime); break; } case Ops.OP_RENEW_DELEGATION_TOKEN: { numOpRenewDelegationToken++; DelegationTokenIdentifier delegationTokenId = new DelegationTokenIdentifier(); delegationTokenId.readFields(in); long expiryTime = readLong(in); fsNamesys .getDelegationTokenSecretManager() .updatePersistedTokenRenewal(delegationTokenId, expiryTime); break; } case Ops.OP_CANCEL_DELEGATION_TOKEN: { numOpCancelDelegationToken++; DelegationTokenIdentifier delegationTokenId = new DelegationTokenIdentifier(); delegationTokenId.readFields(in); fsNamesys .getDelegationTokenSecretManager() .updatePersistedTokenCancellation(delegationTokenId); break; } case Ops.OP_UPDATE_MASTER_KEY: { numOpUpdateMasterKey++; DelegationKey delegationKey = new DelegationKey(); delegationKey.readFields(in); fsNamesys.getDelegationTokenSecretManager().updatePersistedMasterKey(delegationKey); break; } default: { throw new IOException("Never seen opcode " + opcode); } } } } catch (IOException ex) { check203UpgradeFailure(logVersion, ex); } finally { if (closeOnExit) in.close(); } if (FSImage.LOG.isDebugEnabled()) { FSImage.LOG.debug( "numOpAdd = " + numOpAdd + " numOpClose = " + numOpClose + " numOpDelete = " + numOpDelete + " numOpRenameOld = " + numOpRenameOld + " numOpSetRepl = " + numOpSetRepl + " numOpMkDir = " + numOpMkDir + " numOpSetPerm = " + numOpSetPerm + " numOpSetOwner = " + numOpSetOwner + " numOpSetGenStamp = " + numOpSetGenStamp + " numOpTimes = " + numOpTimes + " numOpConcatDelete = " + numOpConcatDelete + " numOpRename = " + numOpRename + " numOpGetDelegationToken = " + numOpGetDelegationToken + " numOpRenewDelegationToken = " + numOpRenewDelegationToken + " numOpCancelDelegationToken = " + numOpCancelDelegationToken + " numOpUpdateMasterKey = " + numOpUpdateMasterKey + " numOpOther = " + numOpOther); } return numEdits; }