private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOException { LOG.info("Formatting block pool " + blockpoolID + " directory " + bpSdir.getCurrentDir()); bpSdir.clearDirectory(); // create directory this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; this.cTime = nsInfo.getCTime(); this.namespaceID = nsInfo.getNamespaceID(); this.blockpoolID = nsInfo.getBlockPoolID(); writeProperties(bpSdir); }
private void doTransition( DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo, StartupOption startOpt) throws IOException { if (startOpt == StartupOption.ROLLBACK) { throw new RuntimeException("未实现"); } readProperties(sd); checkVersionUpgradable(this.layoutVersion); // 版本号为负数,越小越高,文件里面的layoutVersion必须比hadoop版本大或者等于 assert this.layoutVersion >= HdfsConstants.DATANODE_LAYOUT_VERSION : "不允许将来的版本"; boolean federationSupported = DataNodeLayoutVersion.supports(LayoutVersion.Feature.FEDERATION, layoutVersion); if (!federationSupported && getNamespaceID() != nsInfo.getNamespaceID()) { throw new IOException( "Incompatible namespaceIDs in " + sd.getRoot().getCanonicalPath() + ": namenode namespaceID = " + nsInfo.getNamespaceID() + "; datanode namespaceID = " + getNamespaceID()); } if (federationSupported && !getClusterID().equals(nsInfo.getClusterID())) { throw new IOException( "Incompatible clusterIDs in " + sd.getRoot().getCanonicalPath() + ": namenode clusterID = " + nsInfo.getClusterID() + "; datanode clusterID = " + getClusterID()); } boolean haveValidStorageId = DataNodeLayoutVersion.supports( LayoutVersion.Feature.ADD_DATANODE_AND_STORAGE_UUIDS, layoutVersion) && DatanodeStorage.isValidStorageId(sd.getStorageUuid()); if (this.layoutVersion == HdfsConstants.DATANODE_LAYOUT_VERSION) { createStorageID(sd, !haveValidStorageId); return; } throw new IOException( "BUG: The stored LV = " + this.getLayoutVersion() + " is newer than the supported LV = " + HdfsConstants.DATANODE_LAYOUT_VERSION); }
private void doTransition( DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo, StartupOption startOpt) throws IOException { if (startOpt == StartupOption.ROLLBACK && sd.getPreviousDir().exists()) { throw new RuntimeException("ROLLBACK 未实现"); } else { int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd)); LOG.info("从垃圾桶恢复" + restored + "块文件."); } readProperties(sd); checkVersionUpgradable(this.layoutVersion); assert this.layoutVersion >= HdfsConstants.DATANODE_LAYOUT_VERSION : "Future version is not allowed"; if (getNamespaceID() != nsInfo.getNamespaceID()) { throw new IOException( "Incompatible namespaceIDs in " + sd.getRoot().getCanonicalPath() + ": namenode namespaceID = " + nsInfo.getNamespaceID() + "; datanode namespaceID = " + getNamespaceID()); } if (!blockpoolID.equals(nsInfo.getBlockPoolID())) { throw new IOException( "Incompatible blockpoolIDs in " + sd.getRoot().getCanonicalPath() + ": namenode blockpoolID = " + nsInfo.getBlockPoolID() + "; datanode blockpoolID = " + blockpoolID); } if (this.layoutVersion == HdfsConstants.DATANODE_LAYOUT_VERSION && this.cTime == nsInfo.getCTime()) { return; // 正常启动 } throw new RuntimeException("upgrade 未实现"); }
void format(StorageDirectory sd, NamespaceInfo nsInfo, String datanodeUuid) throws IOException { sd.clearDirectory(); this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; this.clusterID = nsInfo.getClusterID(); this.namespaceID = nsInfo.getNamespaceID(); this.cTime = 0; setDatanodeUuid(datanodeUuid); if (sd.getStorageUuid() == null) { // 分配新的 Storage UUID. sd.setStorageUuid(DatanodeStorage.generateUuid()); } writeProperties(sd); }
synchronized List<StorageLocation> addStorageLocations( DataNode datanode, NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs, StartupOption startOpt) throws IOException { final String bpid = nsInfo.getBlockPoolID(); List<StorageLocation> successVolumes = Lists.newArrayList(); for (StorageLocation dataDir : dataDirs) { File root = dataDir.getFile(); if (!containsStorageDir(root)) { try { StorageDirectory sd = loadStorageDirectory(datanode, nsInfo, root, startOpt); addStorageDir(sd); } catch (IOException e) { LOG.warn(e); continue; } } else { LOG.info("Storage directory " + dataDir + " has already been used."); } List<File> bpDataDirs = new ArrayList<File>(); bpDataDirs.add(BlockPoolSliceStorage.getBpRoot(bpid, new File(root, STORAGE_DIR_CURRENT))); try { makeBlockPoolDataDir(bpDataDirs, null); BlockPoolSliceStorage bpStorage = this.bpStorageMap.get(bpid); if (bpStorage == null) { bpStorage = new BlockPoolSliceStorage( nsInfo.getNamespaceID(), bpid, nsInfo.getCTime(), nsInfo.getClusterID()); } bpStorage.recoverTransitionRead(datanode, nsInfo, bpDataDirs, startOpt); addBlockPoolStorage(bpid, bpStorage); } catch (IOException e) { LOG.warn("Failed to add storage for block pool: " + bpid + " : " + e.getMessage()); continue; } successVolumes.add(dataDir); } return successVolumes; }