/** * Re-cache the block into memory * * @param blockIndex The block index of the current file. * @return true if succeed, false otherwise * @throws IOException */ boolean recache(int blockIndex) throws IOException { String path = getUfsPath(); UnderFileSystem underFsClient = UnderFileSystem.get(path, mTachyonConf); InputStream inputStream = null; BlockOutStream bos = null; try { inputStream = underFsClient.open(path); long length = getBlockSizeByte(); long offset = blockIndex * length; inputStream.skip(offset); int bufferBytes = (int) mTachyonConf.getBytes(Constants.USER_FILE_BUFFER_BYTES, Constants.MB); byte[] buffer = new byte[bufferBytes]; bos = BlockOutStream.get(this, WriteType.TRY_CACHE, blockIndex, mTachyonConf); int limit; while (length > 0 && ((limit = inputStream.read(buffer)) >= 0)) { if (limit != 0) { if (length >= limit) { bos.write(buffer, 0, limit); length -= limit; } else { bos.write(buffer, 0, (int) length); length = 0; } } } bos.close(); } catch (IOException e) { LOG.warn(e.getMessage(), e); if (bos != null) { bos.cancel(); } return false; } finally { if (inputStream != null) { inputStream.close(); } } return true; }
@Override public int read(byte[] b, int off, int len) throws IOException { if (b == null) { throw new NullPointerException(); } else if (off < 0 || len < 0 || len > b.length - off) { throw new IndexOutOfBoundsException(); } else if (len == 0) { return 0; } else if (mBlockPos == mBlockInfo.length) { return -1; } // We read at most len bytes, but if mBlockPos + len exceeds the length of the file, we only // read up to the end of the file len = (int) Math.min(len, mBlockInfo.length - mBlockPos); int bytesLeft = len; // Lazy initialization of the out stream for caching to avoid collisions with other caching // attempts that are invalidated later due to seek/skips if (bytesLeft > 0 && mBlockOutStream == null && mRecache) { try { mBlockOutStream = BlockOutStream.get(mFile, WriteType.TRY_CACHE, mBlockIndex, mTachyonConf); // We should only cache when we are writing to a local worker if (mBlockOutStream instanceof RemoteBlockOutStream) { LOG.info("Cannot find a local worker to write to, recache attempt cancelled."); cancelRecache(); } } catch (IOException ioe) { LOG.warn("Recache attempt failed.", ioe); cancelRecache(); } } // While we still have bytes to read, make sure the buffer is set to read the byte at mBlockPos. // If we fail to set mCurrentBuffer, we stream the rest from the underfs while (bytesLeft > 0 && mAttemptReadFromWorkers && updateCurrentBuffer()) { int bytesToRead = Math.min(bytesLeft, mCurrentBuffer.remaining()); mCurrentBuffer.get(b, off, bytesToRead); if (mRecache) { mBlockOutStream.write(b, off, bytesToRead); } off += bytesToRead; bytesLeft -= bytesToRead; mBlockPos += bytesToRead; } mBytesReadRemote += len - bytesLeft; mTachyonFS.getClientMetrics().incBytesReadRemote(len - bytesLeft); if (bytesLeft > 0) { // Unable to read from worker memory, reading this block from underfs in the future. mAttemptReadFromWorkers = false; // We failed to read everything from mCurrentBuffer, so we need to stream the rest from the // underfs if (!setupStreamFromUnderFs()) { LOG.error( "Failed to read at position " + mBlockPos + " in block " + mBlockInfo.getBlockId() + " from workers or underfs"); // Return the number of bytes we managed to read return len - bytesLeft; } while (bytesLeft > 0) { int readBytes = mCheckpointInputStream.read(b, off, bytesLeft); if (readBytes <= 0) { LOG.error("Checkpoint stream read 0 bytes, which shouldn't ever happen"); return len - bytesLeft; } if (mRecache) { mBlockOutStream.write(b, off, readBytes); } off += readBytes; bytesLeft -= readBytes; mBlockPos += readBytes; mCheckpointPos += readBytes; mTachyonFS.getClientMetrics().incBytesReadUfs(readBytes); } } return len; }