@Before
 public void setUp() throws Exception {
   // bring up a cluster of 2
   conf = new HdfsConfiguration();
   conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size);
   // Allow a single volume failure (there are two volumes)
   conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dn_num).build();
   cluster.waitActive();
   fs = cluster.getFileSystem();
   dataDir = new File(cluster.getDataDirectory());
 }
 @Before
 public void setUp() throws Exception {
   conf = new HdfsConfiguration();
   conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
   /*
    * Lower the DN heartbeat, DF rate, and recheck interval to one second
    * so state about failures and datanode death propagates faster.
    */
   conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
   conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
   conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
   // Allow a single volume failure (there are two volumes)
   conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
   cluster.waitActive();
   fs = cluster.getFileSystem();
   dataDir = cluster.getDataDirectory();
 }