/** * Marks a block as committed on a specific worker. * * @param workerId the worker id committing the block * @param usedBytesOnTier the updated used bytes on the tier of the worker * @param tierAlias the alias of the storage tier where the worker is committing the block to * @param blockId the committing block id * @param length the length of the block */ public void commitBlock( long workerId, long usedBytesOnTier, String tierAlias, long blockId, long length) { LOG.debug( "Commit block from worker: {}", FormatUtils.parametersToString(workerId, usedBytesOnTier, blockId, length)); synchronized (mBlocks) { synchronized (mWorkers) { MasterWorkerInfo workerInfo = mWorkers.getFirstByField(mIdIndex, workerId); workerInfo.addBlock(blockId); workerInfo.updateUsedBytes(tierAlias, usedBytesOnTier); workerInfo.updateLastUpdatedTimeMs(); MasterBlockInfo masterBlockInfo = mBlocks.get(blockId); if (masterBlockInfo == null) { masterBlockInfo = new MasterBlockInfo(blockId, length); mBlocks.put(blockId, masterBlockInfo); BlockInfoEntry blockInfo = BlockInfoEntry.newBuilder() .setBlockId(masterBlockInfo.getBlockId()) .setLength(masterBlockInfo.getLength()) .build(); writeJournalEntry(JournalEntry.newBuilder().setBlockInfo(blockInfo).build()); flushJournal(); } masterBlockInfo.addWorker(workerId, tierAlias); mLostBlocks.remove(blockId); } } }
private JournalEntry newCompletePartitionEntry(long fileId, PartitionInfo info) { CompletePartitionEntry completePartition = CompletePartitionEntry.newBuilder() .setStoreId(fileId) .setBlockId(info.blockId) .setKeyStartBytes(ByteString.copyFrom(info.keyStart)) .setKeyLimitBytes(ByteString.copyFrom(info.keyLimit)) .build(); return JournalEntry.newBuilder().setCompletePartition(completePartition).build(); }
@Override public void streamToJournalCheckpoint(JournalOutputStream outputStream) throws IOException { outputStream.writeEntry(mBlockContainerIdGenerator.toJournalEntry()); for (MasterBlockInfo blockInfo : mBlocks.values()) { BlockInfoEntry blockInfoEntry = BlockInfoEntry.newBuilder() .setBlockId(blockInfo.getBlockId()) .setLength(blockInfo.getLength()) .build(); outputStream.writeEntry(JournalEntry.newBuilder().setBlockInfo(blockInfoEntry).build()); } }
@Override public synchronized JournalEntry toJournalEntry() { InodeDirectoryEntry inodeDirectory = InodeDirectoryEntry.newBuilder() .setCreationTimeMs(getCreationTimeMs()) .setId(getId()) .setName(getName()) .setParentId(getParentId()) .setPersisted(isPersisted()) .setPinned(isPinned()) .setLastModificationTimeMs(getLastModificationTimeMs()) .setUserName(getUserName()) .setGroupName(getGroupName()) .setPermission(getPermission()) .build(); return JournalEntry.newBuilder().setInodeDirectory(inodeDirectory).build(); }
/** * Marks a block as committed, but without a worker location. This means the block is only in ufs. * * @param blockId the id of the block to commit * @param length the length of the block */ public void commitBlockInUFS(long blockId, long length) { LOG.debug("Commit block to ufs: {}", FormatUtils.parametersToString(blockId, length)); synchronized (mBlocks) { MasterBlockInfo masterBlockInfo = mBlocks.get(blockId); if (masterBlockInfo == null) { // The block has not been committed previously, so add the metadata to commit the block. masterBlockInfo = new MasterBlockInfo(blockId, length); mBlocks.put(blockId, masterBlockInfo); BlockInfoEntry blockInfo = BlockInfoEntry.newBuilder() .setBlockId(masterBlockInfo.getBlockId()) .setLength(masterBlockInfo.getLength()) .build(); writeJournalEntry(JournalEntry.newBuilder().setBlockInfo(blockInfo).build()); flushJournal(); } } }
private JournalEntry newCompleteStoreEntry(long fileId) { CompleteStoreEntry completeStore = CompleteStoreEntry.newBuilder().setStoreId(fileId).build(); return JournalEntry.newBuilder().setCompleteStore(completeStore).build(); }
private JournalEntry newCreateStoreEntry(long fileId) { CreateStoreEntry createStore = CreateStoreEntry.newBuilder().setStoreId(fileId).build(); return JournalEntry.newBuilder().setCreateStore(createStore).build(); }