Example #1
0
 /**
  * Create a blook pool slice
  *
  * @param bpid Block pool Id
  * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
  * @param bpDir directory corresponding to the BlockPool
  * @param conf configuration
  * @throws IOException
  */
 BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir, Configuration conf)
     throws IOException {
   this.bpid = bpid;
   this.volume = volume;
   this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
   this.finalizedDir = new File(currentDir, DataStorage.STORAGE_DIR_FINALIZED);
   this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
   if (!this.finalizedDir.exists()) {
     if (!this.finalizedDir.mkdirs()) {
       throw new IOException("Failed to mkdirs " + this.finalizedDir);
     }
   }
   this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
   if (tmpDir.exists()) {
     FileUtil.fullyDelete(tmpDir);
   }
   this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
   final boolean supportAppends =
       conf.getBoolean(
           DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
   if (rbwDir.exists() && !supportAppends) {
     FileUtil.fullyDelete(rbwDir);
   }
   if (!rbwDir.mkdirs()) { // create rbw directory if not exist
     if (!rbwDir.isDirectory()) {
       throw new IOException("Mkdirs failed to create " + rbwDir.toString());
     }
   }
   if (!tmpDir.mkdirs()) {
     if (!tmpDir.isDirectory()) {
       throw new IOException("Mkdirs failed to create " + tmpDir.toString());
     }
   }
   // 循环的获取BP目录的大小
   this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
   this.dfsUsage.start();
   // 注册一个JVM关闭的钩子
   ShutdownHookManager.get()
       .addShutdownHook(
           new Runnable() {
             @Override
             public void run() {
               if (!dfsUsedSaved) {
                 saveDfsUsed();
               }
             }
           },
           SHUTDOWN_HOOK_PRIORITY);
 }
Example #2
0
  // 在数据节点上创建块池的物理目录
  static void makeBlockPoolDataDir(Collection<File> dataDirs, Configuration conf)
      throws IOException {
    if (conf == null) conf = new HdfsConfiguration();

    LocalFileSystem localFS = FileSystem.getLocal(conf);
    FsPermission permission =
        new FsPermission(
            conf.get(
                DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
                DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));

    for (File data : dataDirs) {
      try {
        DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
      } catch (IOException e) {
        LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": " + e.getMessage());
      }
    }
  }
Example #3
0
 static {
   addDeprecatedKeys();
   Configuration.addDefaultResource("hdfs-default.xml");
   Configuration.addDefaultResource("hdfs-site.xml");
 }
Example #4
0
 private static void addDeprecatedKeys() {
   Configuration.addDeprecations(
       new DeprecationDelta[] {
         new DeprecationDelta("dfs.backup.address", DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY),
         new DeprecationDelta(
             "dfs.backup.http.address", DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY),
         new DeprecationDelta(
             "dfs.balance.bandwidthPerSec",
             DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY),
         new DeprecationDelta("dfs.data.dir", DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY),
         new DeprecationDelta("dfs.http.address", DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY),
         new DeprecationDelta("dfs.https.address", DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY),
         new DeprecationDelta("dfs.max.objects", DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY),
         new DeprecationDelta("dfs.name.dir", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY),
         new DeprecationDelta(
             "dfs.name.dir.restore", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY),
         new DeprecationDelta("dfs.name.edits.dir", DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY),
         new DeprecationDelta(
             "dfs.read.prefetch.size", DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY),
         new DeprecationDelta(
             "dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY),
         new DeprecationDelta(
             "dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY),
         new DeprecationDelta(
             "dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY),
         new DeprecationDelta("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY),
         new DeprecationDelta("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY),
         new DeprecationDelta(
             "fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY),
         new DeprecationDelta(
             "fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY),
         new DeprecationDelta(
             "heartbeat.recheck.interval",
             DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY),
         new DeprecationDelta(
             "dfs.https.client.keystore.resource",
             DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY),
         new DeprecationDelta(
             "dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY),
         new DeprecationDelta("slave.host.name", DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY),
         new DeprecationDelta("session.id", DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
         new DeprecationDelta(
             "dfs.access.time.precision", DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY),
         new DeprecationDelta(
             "dfs.replication.considerLoad",
             DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY),
         new DeprecationDelta(
             "dfs.replication.interval", DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY),
         new DeprecationDelta(
             "dfs.replication.min", DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY),
         new DeprecationDelta(
             "dfs.replication.pending.timeout.sec",
             DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY),
         new DeprecationDelta(
             "dfs.max-repl-streams", DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY),
         new DeprecationDelta("dfs.permissions", DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY),
         new DeprecationDelta(
             "dfs.permissions.supergroup", DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
         new DeprecationDelta(
             "dfs.write.packet.size", DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY),
         new DeprecationDelta("dfs.block.size", DFSConfigKeys.DFS_BLOCK_SIZE_KEY),
         new DeprecationDelta(
             "dfs.datanode.max.xcievers", DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY),
         new DeprecationDelta("io.bytes.per.checksum", DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY),
         new DeprecationDelta("dfs.federation.nameservices", DFSConfigKeys.DFS_NAMESERVICES),
         new DeprecationDelta("dfs.federation.nameservice.id", DFSConfigKeys.DFS_NAMESERVICE_ID),
         new DeprecationDelta(
             "dfs.client.file-block-storage-locations.timeout",
             DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS),
       });
 }