/** @return a {@link JournalEntry} representing the state of the container id generator */ private JournalEntry getContainerIdJournalEntry() { BlockContainerIdGeneratorEntry blockContainerIdGenerator = BlockContainerIdGeneratorEntry.newBuilder() .setNextContainerId(mJournaledNextContainerId) .build(); return JournalEntry.newBuilder() .setBlockContainerIdGenerator(blockContainerIdGenerator) .build(); }
@Override public void streamToJournalCheckpoint(JournalOutputStream outputStream) throws IOException { outputStream.writeEntry(getContainerIdJournalEntry()); for (MasterBlockInfo blockInfo : mBlocks.values()) { BlockInfoEntry blockInfoEntry = BlockInfoEntry.newBuilder() .setBlockId(blockInfo.getBlockId()) .setLength(blockInfo.getLength()) .build(); outputStream.writeEntry(JournalEntry.newBuilder().setBlockInfo(blockInfoEntry).build()); } }
/** * Marks a block as committed, but without a worker location. This means the block is only in ufs. * * @param blockId the id of the block to commit * @param length the length of the block */ public void commitBlockInUFS(long blockId, long length) { LOG.debug("Commit block in ufs. blockId: {}, length: {}", blockId, length); if (mBlocks.get(blockId) != null) { // Block metadata already exists, so do not need to create a new one. return; } // The block has not been committed previously, so add the metadata to commit the block. MasterBlockInfo block = new MasterBlockInfo(blockId, length); long counter = AsyncJournalWriter.INVALID_FLUSH_COUNTER; synchronized (block) { if (mBlocks.putIfAbsent(blockId, block) == null) { // Successfully added the new block metadata. Append a journal entry for the new metadata. BlockInfoEntry blockInfo = BlockInfoEntry.newBuilder().setBlockId(blockId).setLength(length).build(); counter = appendJournalEntry(JournalEntry.newBuilder().setBlockInfo(blockInfo).build()); } } waitForJournalFlush(counter); }
// TODO(binfan): check the logic is correct or not when commitBlock is a retry public void commitBlock( long workerId, long usedBytesOnTier, String tierAlias, long blockId, long length) throws NoWorkerException { LOG.debug( "Commit block from workerId: {}, usedBytesOnTier: {}, blockId: {}, length: {}", workerId, usedBytesOnTier, blockId, length); long counter = AsyncJournalWriter.INVALID_FLUSH_COUNTER; MasterWorkerInfo worker = mWorkers.getFirstByField(ID_INDEX, workerId); // TODO(peis): Check lost workers as well. if (worker == null) { throw new NoWorkerException(ExceptionMessage.NO_WORKER_FOUND.getMessage(workerId)); } // Lock the worker metadata first. synchronized (worker) { // Loop until block metadata is successfully locked. for (; ; ) { boolean newBlock = false; MasterBlockInfo block = mBlocks.get(blockId); if (block == null) { // The block metadata doesn't exist yet. block = new MasterBlockInfo(blockId, length); newBlock = true; } // Lock the block metadata. synchronized (block) { boolean writeJournal = false; if (newBlock) { if (mBlocks.putIfAbsent(blockId, block) != null) { // Another thread already inserted the metadata for this block, so start loop over. continue; } // Successfully added the new block metadata. Append a journal entry for the new // metadata. writeJournal = true; } else if (block.getLength() != length && block.getLength() == Constants.UNKNOWN_SIZE) { // The block size was previously unknown. Update the block size with the committed // size, and append a journal entry. block.updateLength(length); writeJournal = true; } if (writeJournal) { BlockInfoEntry blockInfo = BlockInfoEntry.newBuilder().setBlockId(blockId).setLength(length).build(); counter = appendJournalEntry(JournalEntry.newBuilder().setBlockInfo(blockInfo).build()); } // At this point, both the worker and the block metadata are locked. // Update the block metadata with the new worker location. block.addWorker(workerId, tierAlias); // This worker has this block, so it is no longer lost. mLostBlocks.remove(blockId); // Update the worker information for this new block. // TODO(binfan): when retry commitBlock on master is expected, make sure metrics are not // double counted. worker.addBlock(blockId); worker.updateUsedBytes(tierAlias, usedBytesOnTier); worker.updateLastUpdatedTimeMs(); } break; } } waitForJournalFlush(counter); }