public void streamBlockInAscii( InetSocketAddress addr, long blockId, Token<BlockTokenIdentifier> accessToken, long genStamp, long blockSize, long offsetIntoBlock, long chunkSizeToView, JspWriter out, Configuration conf) throws IOException { if (chunkSizeToView == 0) return; Socket s = new Socket(); s.connect(addr, HdfsConstants.READ_TIMEOUT); s.setSoTimeout(HdfsConstants.READ_TIMEOUT); long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock); // Use the block name for file name. BlockReader blockReader = RemoteBlockReader.newBlockReader( s, addr.toString() + ":" + blockId, blockId, accessToken, genStamp, offsetIntoBlock, amtToRead, conf.getInt("io.file.buffer.size", 4096)); byte[] buf = new byte[(int) amtToRead]; int readOffset = 0; int retries = 2; while (amtToRead > 0) { int numRead; try { numRead = blockReader.readAll(buf, readOffset, (int) amtToRead); } catch (IOException e) { retries--; if (retries == 0) throw new IOException("Could not read data from datanode"); continue; } amtToRead -= numRead; readOffset += numRead; } blockReader = null; s.close(); out.print(HtmlQuoting.quoteHtmlChars(new String(buf))); }
/** * try to access a block on a data node. If fails - throws exception * * @param datanode * @param lblock * @throws IOException */ private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock) throws IOException { InetSocketAddress targetAddr = null; ExtendedBlock block = lblock.getBlock(); targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr()); BlockReader blockReader = new BlockReaderFactory(new DfsClientConf(conf)) .setInetSocketAddress(targetAddr) .setBlock(block) .setFileName( BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId())) .setBlockToken(lblock.getBlockToken()) .setStartOffset(0) .setLength(-1) .setVerifyChecksum(true) .setClientName("TestDataNodeVolumeFailure") .setDatanodeInfo(datanode) .setCachingStrategy(CachingStrategy.newDefaultStrategy()) .setClientCacheContext(ClientContext.getFromConf(conf)) .setConfiguration(conf) .setTracer(FsTracer.get(conf)) .setRemotePeerFactory( new RemotePeerFactory() { @Override public Peer newConnectedPeer( InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); try { sock.connect(addr, HdfsConstants.READ_TIMEOUT); sock.setSoTimeout(HdfsConstants.READ_TIMEOUT); peer = DFSUtilClient.peerFromSocket(sock); } finally { if (peer == null) { IOUtils.closeSocket(sock); } } return peer; } }) .build(); blockReader.close(); }
// try reading a block using a BlockReader directly protected void tryRead(final Configuration conf, LocatedBlock lblock, boolean shouldSucceed) { InetSocketAddress targetAddr = null; IOException ioe = null; BlockReader blockReader = null; ExtendedBlock block = lblock.getBlock(); try { DatanodeInfo[] nodes = lblock.getLocations(); targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr()); blockReader = new BlockReaderFactory(new DfsClientConf(conf)) .setFileName( BlockReaderFactory.getFileName( targetAddr, "test-blockpoolid", block.getBlockId())) .setBlock(block) .setBlockToken(lblock.getBlockToken()) .setInetSocketAddress(targetAddr) .setStartOffset(0) .setLength(-1) .setVerifyChecksum(true) .setClientName("TestBlockTokenWithDFS") .setDatanodeInfo(nodes[0]) .setCachingStrategy(CachingStrategy.newDefaultStrategy()) .setClientCacheContext(ClientContext.getFromConf(conf)) .setConfiguration(conf) .setTracer(FsTracer.get(conf)) .setRemotePeerFactory( new RemotePeerFactory() { @Override public Peer newConnectedPeer( InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); try { sock.connect(addr, HdfsConstants.READ_TIMEOUT); sock.setSoTimeout(HdfsConstants.READ_TIMEOUT); peer = DFSUtilClient.peerFromSocket(sock); } finally { if (peer == null) { IOUtils.closeSocket(sock); } } return peer; } }) .build(); } catch (IOException ex) { ioe = ex; } finally { if (blockReader != null) { try { blockReader.close(); } catch (IOException e) { throw new RuntimeException(e); } } } if (shouldSucceed) { Assert.assertNotNull( "OP_READ_BLOCK: access token is invalid, " + "when it is expected to be valid", blockReader); } else { Assert.assertNotNull( "OP_READ_BLOCK: access token is valid, " + "when it is expected to be invalid", ioe); Assert.assertTrue( "OP_READ_BLOCK failed due to reasons other than access token: ", ioe instanceof InvalidBlockTokenException); } }
/* * XXX (ab) Bulk of this method is copied verbatim from {@link DFSClient}, which is * bad. Both places should be refactored to provide a method to copy blocks * around. */ private void copyBlock(DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception { int failures = 0; InetSocketAddress targetAddr = null; TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>(); BlockReader blockReader = null; ExtendedBlock block = lblock.getBlock(); while (blockReader == null) { DatanodeInfo chosenNode; try { chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes); targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr()); } catch (IOException ie) { if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) { throw new IOException("Could not obtain block " + lblock, ie); } LOG.info("Could not obtain block from any node: " + ie); try { Thread.sleep(10000); } catch (InterruptedException iex) { } deadNodes.clear(); failures++; continue; } try { String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(), block.getBlockId()); blockReader = new BlockReaderFactory(dfs.getConf()) .setFileName(file) .setBlock(block) .setBlockToken(lblock.getBlockToken()) .setStartOffset(0) .setLength(-1) .setVerifyChecksum(true) .setClientName("fsck") .setDatanodeInfo(chosenNode) .setInetSocketAddress(targetAddr) .setCachingStrategy(CachingStrategy.newDropBehind()) .setClientCacheContext(dfs.getClientContext()) .setConfiguration(namenode.conf) .setRemotePeerFactory( new RemotePeerFactory() { @Override public Peer newConnectedPeer(InetSocketAddress addr) throws IOException { Peer peer = null; Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket(); try { s.connect(addr, HdfsServerConstants.READ_TIMEOUT); s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); peer = TcpPeerServer.peerFromSocketAndKey( s, namenode.getRpcServer().getDataEncryptionKey()); } finally { if (peer == null) { IOUtils.closeQuietly(s); } } return peer; } }) .build(); } catch (IOException ex) { // Put chosen node into dead list, continue LOG.info("Failed to connect to " + targetAddr + ":" + ex); deadNodes.add(chosenNode); } } byte[] buf = new byte[1024]; int cnt = 0; boolean success = true; long bytesRead = 0; try { while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) { fos.write(buf, 0, cnt); bytesRead += cnt; } if (bytesRead != block.getNumBytes()) { throw new IOException( "Recorded block size is " + block.getNumBytes() + ", but datanode returned " + bytesRead + " bytes"); } } catch (Exception e) { LOG.error("Error reading block", e); success = false; } finally { blockReader.close(); } if (!success) { throw new Exception("Could not copy block data for " + lblock.getBlock()); } }
/* * XXX (ab) Bulk of this method is copied verbatim from {@link DFSClient}, which is * bad. Both places should be refactored to provide a method to copy blocks * around. */ private void copyBlock(DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception { int failures = 0; InetSocketAddress targetAddr = null; TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>(); Socket s = null; BlockReader blockReader = null; Block block = lblock.getBlock(); while (s == null) { DatanodeInfo chosenNode; try { chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes); targetAddr = NetUtils.createSocketAddr(chosenNode.getName()); } catch (IOException ie) { if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) { throw new IOException("Could not obtain block " + lblock); } LOG.info("Could not obtain block from any node: " + ie); try { Thread.sleep(10000); } catch (InterruptedException iex) { } deadNodes.clear(); failures++; continue; } try { s = new Socket(); s.connect(targetAddr, HdfsConstants.READ_TIMEOUT); s.setSoTimeout(HdfsConstants.READ_TIMEOUT); blockReader = BlockReader.newBlockReader( DataTransferProtocol.DATA_TRANSFER_VERSION, nn.getNamesystem().getFSImage().storage.namespaceID, s, targetAddr.toString() + ":" + block.getBlockId(), block.getBlockId(), block.getGenerationStamp(), 0, -1, conf.getInt("io.file.buffer.size", 4096)); } catch (IOException ex) { // Put chosen node into dead list, continue LOG.info("Failed to connect to " + targetAddr + ":" + ex); deadNodes.add(chosenNode); if (s != null) { try { s.close(); } catch (IOException iex) { } } s = null; } } if (blockReader == null) { throw new Exception("Could not open data stream for " + lblock.getBlock()); } byte[] buf = new byte[1024]; int cnt = 0; boolean success = true; long bytesRead = 0; try { while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) { fos.write(buf, 0, cnt); bytesRead += cnt; } if (bytesRead != block.getNumBytes()) { throw new IOException( "Recorded block size is " + block.getNumBytes() + ", but datanode returned " + bytesRead + " bytes"); } } catch (Exception e) { e.printStackTrace(); success = false; } finally { try { s.close(); } catch (Exception e1) { } } if (!success) throw new Exception("Could not copy block data for " + lblock.getBlock()); }