private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOException { LOG.info("Formatting block pool " + blockpoolID + " directory " + bpSdir.getCurrentDir()); bpSdir.clearDirectory(); // create directory this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; this.cTime = nsInfo.getCTime(); this.namespaceID = nsInfo.getNamespaceID(); this.blockpoolID = nsInfo.getBlockPoolID(); writeProperties(bpSdir); }
void format(StorageDirectory sd, NamespaceInfo nsInfo, String datanodeUuid) throws IOException { sd.clearDirectory(); this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; this.clusterID = nsInfo.getClusterID(); this.namespaceID = nsInfo.getNamespaceID(); this.cTime = 0; setDatanodeUuid(datanodeUuid); if (sd.getStorageUuid() == null) { // 分配新的 Storage UUID. sd.setStorageUuid(DatanodeStorage.generateUuid()); } writeProperties(sd); }
private StorageDirectory loadStorageDirectory( DataNode datanode, NamespaceInfo nsInfo, File dataDir, StartupOption startOpt) throws IOException { StorageDirectory sd = new StorageDirectory(dataDir, null, false); try { StorageState curState = sd.analyzeStorage(startOpt, this); switch (curState) { case NORMAL: break; case NON_EXISTENT: LOG.info("Storage directory " + dataDir + " does not exist"); throw new IOException("Storage directory " + dataDir + " does not exist"); case NOT_FORMATTED: // format LOG.info( "Storage directory " + dataDir + " is not formatted for " + nsInfo.getBlockPoolID()); LOG.info("Formatting ..."); format(sd, nsInfo, datanode.getDatanodeUuid()); break; default: throw new RuntimeException("未实现"); } doTransition(datanode, sd, nsInfo, startOpt); writeProperties(sd); return sd; } catch (IOException ioe) { sd.unlock(); throw ioe; } }
// 分析存储目录。如果需要的话,从先前的转换中恢复过来。 void recoverTransitionRead( DataNode datanode, NamespaceInfo nsInfo, Collection<File> dataDirs, StartupOption startOpt) throws IOException { LOG.info("存储目录分析 bpid " + nsInfo.getBlockPoolID()); for (StorageDirectory sd : loadBpStorageDirectories(datanode, nsInfo, dataDirs, startOpt)) { addStorageDir(sd); } }
synchronized List<StorageLocation> addStorageLocations( DataNode datanode, NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs, StartupOption startOpt) throws IOException { final String bpid = nsInfo.getBlockPoolID(); List<StorageLocation> successVolumes = Lists.newArrayList(); for (StorageLocation dataDir : dataDirs) { File root = dataDir.getFile(); if (!containsStorageDir(root)) { try { StorageDirectory sd = loadStorageDirectory(datanode, nsInfo, root, startOpt); addStorageDir(sd); } catch (IOException e) { LOG.warn(e); continue; } } else { LOG.info("Storage directory " + dataDir + " has already been used."); } List<File> bpDataDirs = new ArrayList<File>(); bpDataDirs.add(BlockPoolSliceStorage.getBpRoot(bpid, new File(root, STORAGE_DIR_CURRENT))); try { makeBlockPoolDataDir(bpDataDirs, null); BlockPoolSliceStorage bpStorage = this.bpStorageMap.get(bpid); if (bpStorage == null) { bpStorage = new BlockPoolSliceStorage( nsInfo.getNamespaceID(), bpid, nsInfo.getCTime(), nsInfo.getClusterID()); } bpStorage.recoverTransitionRead(datanode, nsInfo, bpDataDirs, startOpt); addBlockPoolStorage(bpid, bpStorage); } catch (IOException e) { LOG.warn("Failed to add storage for block pool: " + bpid + " : " + e.getMessage()); continue; } successVolumes.add(dataDir); } return successVolumes; }
private void doTransition( DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo, StartupOption startOpt) throws IOException { if (startOpt == StartupOption.ROLLBACK) { throw new RuntimeException("未实现"); } readProperties(sd); checkVersionUpgradable(this.layoutVersion); // 版本号为负数,越小越高,文件里面的layoutVersion必须比hadoop版本大或者等于 assert this.layoutVersion >= HdfsConstants.DATANODE_LAYOUT_VERSION : "不允许将来的版本"; boolean federationSupported = DataNodeLayoutVersion.supports(LayoutVersion.Feature.FEDERATION, layoutVersion); if (!federationSupported && getNamespaceID() != nsInfo.getNamespaceID()) { throw new IOException( "Incompatible namespaceIDs in " + sd.getRoot().getCanonicalPath() + ": namenode namespaceID = " + nsInfo.getNamespaceID() + "; datanode namespaceID = " + getNamespaceID()); } if (federationSupported && !getClusterID().equals(nsInfo.getClusterID())) { throw new IOException( "Incompatible clusterIDs in " + sd.getRoot().getCanonicalPath() + ": namenode clusterID = " + nsInfo.getClusterID() + "; datanode clusterID = " + getClusterID()); } boolean haveValidStorageId = DataNodeLayoutVersion.supports( LayoutVersion.Feature.ADD_DATANODE_AND_STORAGE_UUIDS, layoutVersion) && DatanodeStorage.isValidStorageId(sd.getStorageUuid()); if (this.layoutVersion == HdfsConstants.DATANODE_LAYOUT_VERSION) { createStorageID(sd, !haveValidStorageId); return; } throw new IOException( "BUG: The stored LV = " + this.getLayoutVersion() + " is newer than the supported LV = " + HdfsConstants.DATANODE_LAYOUT_VERSION); }
private void doTransition( DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo, StartupOption startOpt) throws IOException { if (startOpt == StartupOption.ROLLBACK && sd.getPreviousDir().exists()) { throw new RuntimeException("ROLLBACK 未实现"); } else { int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd)); LOG.info("从垃圾桶恢复" + restored + "块文件."); } readProperties(sd); checkVersionUpgradable(this.layoutVersion); assert this.layoutVersion >= HdfsConstants.DATANODE_LAYOUT_VERSION : "Future version is not allowed"; if (getNamespaceID() != nsInfo.getNamespaceID()) { throw new IOException( "Incompatible namespaceIDs in " + sd.getRoot().getCanonicalPath() + ": namenode namespaceID = " + nsInfo.getNamespaceID() + "; datanode namespaceID = " + getNamespaceID()); } if (!blockpoolID.equals(nsInfo.getBlockPoolID())) { throw new IOException( "Incompatible blockpoolIDs in " + sd.getRoot().getCanonicalPath() + ": namenode blockpoolID = " + nsInfo.getBlockPoolID() + "; datanode blockpoolID = " + blockpoolID); } if (this.layoutVersion == HdfsConstants.DATANODE_LAYOUT_VERSION && this.cTime == nsInfo.getCTime()) { return; // 正常启动 } throw new RuntimeException("upgrade 未实现"); }
private StorageDirectory loadStorageDirectory( DataNode datanode, NamespaceInfo nsInfo, File dataDir, StartupOption startOpt) throws IOException { StorageDirectory sd = new StorageDirectory(dataDir, null, true); try { StorageState curState = sd.analyzeStorage(startOpt, this); switch (curState) { case NORMAL: break; case NON_EXISTENT: LOG.info("Block pool storage directory " + dataDir + " does not exist"); throw new IOException("Storage directory " + dataDir + " does not exist"); case NOT_FORMATTED: // format LOG.info( "Block pool storage directory " + dataDir + " is not formatted for " + nsInfo.getBlockPoolID()); LOG.info("Formatting ..."); format(sd, nsInfo); break; default: throw new RuntimeException("未实现"); } doTransition(datanode, sd, nsInfo, startOpt); if (getCTime() != nsInfo.getCTime()) { throw new IOException("Data-node and name-node CTimes 必须是相同."); } // 3. 更新成功加载存储。 setServiceLayoutVersion(getServiceLayoutVersion()); writeProperties(sd); return sd; } catch (IOException ioe) { sd.unlock(); throw ioe; } }
// 分析特定区块池的存储目录。 void recoverTransitionRead( DataNode datanode, NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs, StartupOption startOpt) throws IOException { if (this.initialized) { LOG.info( "DataNode version: " + HdfsConstants.DATANODE_LAYOUT_VERSION + " and NameNode layout version: " + nsInfo.getLayoutVersion()); this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size()); this.initialized = true; } if (addStorageLocations(datanode, nsInfo, dataDirs, startOpt).isEmpty()) { throw new IOException("所有指定目录都未能加载."); } }
List<StorageDirectory> loadBpStorageDirectories( DataNode datanode, NamespaceInfo nsInfo, Collection<File> dataDirs, StartupOption startOpt) throws IOException { List<StorageDirectory> succeedDirs = Lists.newArrayList(); try { for (File dataDir : dataDirs) { if (containsStorageDir(dataDir)) { throw new IOException( "BlockPoolSliceStorage.recoverTransitionRead: " + "加载一个已经在使用的块存储: " + dataDir); } StorageDirectory sd = loadStorageDirectory(datanode, nsInfo, dataDir, startOpt); succeedDirs.add(sd); } } catch (IOException e) { LOG.warn( "Failed to analyze storage directories for block pool " + nsInfo.getBlockPoolID(), e); throw e; } return succeedDirs; }