/**
   * Make sure at least one non-transient volume has a saved copy of the replica. An infinite loop
   * is used to ensure the async lazy persist tasks are completely done before verification. Caller
   * of ensureLazyPersistBlocksAreSaved expects either a successful pass or timeout failure.
   */
  protected final void ensureLazyPersistBlocksAreSaved(LocatedBlocks locatedBlocks)
      throws IOException, InterruptedException {
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    List<? extends FsVolumeSpi> volumes = cluster.getDataNodes().get(0).getFSDataset().getVolumes();
    final Set<Long> persistedBlockIds = new HashSet<Long>();

    while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) {
      // Take 1 second sleep before each verification iteration
      Thread.sleep(1000);

      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
        for (FsVolumeSpi v : volumes) {
          if (v.isTransientStorage()) {
            continue;
          }

          FsVolumeImpl volume = (FsVolumeImpl) v;
          File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir();

          long blockId = lb.getBlock().getBlockId();
          File targetDir = DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
          File blockFile = new File(targetDir, lb.getBlock().getBlockName());
          if (blockFile.exists()) {
            // Found a persisted copy for this block and added to the Set
            persistedBlockIds.add(blockId);
          }
        }
      }
    }

    // We should have found a persisted copy for each located block.
    assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
  }
  /**
   * Regression test for HDFS-2742. The issue in this bug was: - DN does a block report while file
   * is open. This BR contains the block in RBW state. - Standby queues the RBW state in
   * PendingDatanodeMessages - Standby processes edit logs during failover. Before fixing this bug,
   * it was mistakenly applying the RBW reported state after the block had been completed, causing
   * the block to get marked corrupt. Instead, we should now be applying the RBW message on OP_ADD,
   * and then the FINALIZED message on OP_CLOSE.
   */
  @Test
  public void testBlockReportsWhileFileBeingWritten() throws Exception {
    FSDataOutputStream out = fs.create(TEST_FILE_PATH);
    try {
      AppendTestUtil.write(out, 0, 10);
      out.hflush();

      // Block report will include the RBW replica, but will be
      // queued on the StandbyNode.
      cluster.triggerBlockReports();

    } finally {
      IOUtils.closeStream(out);
    }

    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);

    // Verify that no replicas are marked corrupt, and that the
    // file is readable from the failed-over standby.
    BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
    BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
    assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
    assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());

    DFSTestUtil.readFile(fs, TEST_FILE_PATH);
  }
  @BeforeClass
  public static void setUp() throws Exception {

    Configuration conf = new Configuration();
    conf.set("hadoop.security.auth_to_local", "RULE:[2:$1]");
    dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
    jConf = new JobConf(conf);
    mrCluster =
        new MiniMRCluster(
            0,
            0,
            numSlaves,
            dfsCluster.getFileSystem().getUri().toString(),
            1,
            null,
            null,
            null,
            jConf);

    createTokenFileJson();
    verifySecretKeysInJSONFile();
    NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
    FileSystem fs = dfsCluster.getFileSystem();

    p1 = new Path("file1");
    p2 = new Path("file2");

    p1 = fs.makeQualified(p1);
  }
  protected void setupCluster(boolean simulated, long minFileSize, String[] racks, String[] hosts)
      throws IOException {
    conf = new Configuration();
    localFileSys = FileSystem.getLocal(conf);
    conf.setLong("dfs.blockreport.intervalMsec", 1000L);
    conf.set("dfs.replication.pending.timeout.sec", "2");
    conf.setLong("dfs.block.size", 1L);
    conf.set(
        "dfs.block.replicator.classname",
        "org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid");
    conf.setLong("hdfs.raid.min.filesize", minFileSize);
    Utils.loadTestCodecs(conf, 5, 5, 1, 3, "/raid", "/raidrs", false, true);
    conf.setInt("io.bytes.per.checksum", 1);
    excludeFile = new Path(TEST_DIR, "exclude" + System.currentTimeMillis());
    cleanFile(excludeFile);
    conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
    writeConfigFile(excludeFile, null);

    if (!simulated) {
      cluster = new MiniDFSCluster(conf, hosts.length, true, racks, hosts);
    } else {
      long[] capacities = new long[] {CAPACITY, CAPACITY, CAPACITY};
      cluster = new MiniDFSCluster(0, conf, hosts.length, true, true, null, racks, capacities);
    }
    cluster.waitActive();
    namesystem = cluster.getNameNode().getNamesystem();
    Assert.assertTrue(
        "BlockPlacementPolicy type is not correct.",
        namesystem.replicator instanceof BlockPlacementPolicyRaid);
    policy = (BlockPlacementPolicyRaid) namesystem.replicator;
    fs = cluster.getFileSystem();
    dfs = (DistributedFileSystem) fs;
    TestDirectoryRaidDfs.setupStripeStore(conf, fs);
  }
  /** Test that the NN re-learns of volume failures after restart. */
  @Test
  public void testVolFailureStatsPreservedOnNNRestart() throws Exception {
    assumeTrue(!System.getProperty("os.name").startsWith("Windows"));

    // Bring up two more datanodes that can tolerate 1 failure
    cluster.startDataNodes(conf, 2, true, null, null);
    cluster.waitActive();

    final DatanodeManager dm = cluster.getNamesystem().getBlockManager().getDatanodeManager();
    long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
    long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);

    // Fail the first volume on both datanodes (we have to keep the
    // third healthy so one node in the pipeline will not fail).
    File dn1Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
    File dn2Vol1 = new File(dataDir, "data" + (2 * 1 + 1));
    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));

    Path file1 = new Path("/test1");
    DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
    DFSTestUtil.waitReplication(fs, file1, (short) 2);

    // The NN reports two volumes failures
    DFSTestUtil.waitForDatanodeStatus(
        dm, 3, 0, 2, origCapacity - (1 * dnCapacity), WAIT_FOR_HEARTBEATS);

    // After restarting the NN it still see the two failures
    cluster.restartNameNode(0);
    cluster.waitActive();
    DFSTestUtil.waitForDatanodeStatus(
        dm, 3, 0, 2, origCapacity - (1 * dnCapacity), WAIT_FOR_HEARTBEATS);
  }
 @Before
 public void setUp() throws Exception {
   cluster = new MiniDFSCluster.Builder(CONF).build();
   cluster.waitActive();
   cluster.getNameNode();
   fs = (DistributedFileSystem) cluster.getFileSystem();
 }
 @BeforeClass
 public static void setUpBeforeClass() throws Exception {
   File minidfsDir = new File("target/minidfs-" + UUID.randomUUID()).getAbsoluteFile();
   minidfsDir.mkdirs();
   Assert.assertTrue(minidfsDir.exists());
   System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
   Configuration conf = new HdfsConfiguration();
   conf.set("dfs.namenode.fs-limits.min-block-size", String.valueOf(32));
   EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
   miniDFS = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
   dir = new Path(miniDFS.getURI() + "/dir");
   FileSystem fs = miniDFS.getFileSystem();
   fs.mkdirs(dir);
   writeFile(fs, new Path(dir + "/forAllTests/" + "path"), 1000);
   dummyEtc = new File(minidfsDir, "dummy-etc");
   dummyEtc.mkdirs();
   Assert.assertTrue(dummyEtc.exists());
   Configuration dummyConf = new Configuration(false);
   for (String file : new String[] {"core", "hdfs", "mapred", "yarn"}) {
     File siteXml = new File(dummyEtc, file + "-site.xml");
     FileOutputStream out = new FileOutputStream(siteXml);
     dummyConf.writeXml(out);
     out.close();
   }
   resourcesDir = minidfsDir.getAbsolutePath();
   hadoopConfDir = dummyEtc.getName();
   System.setProperty("sdc.resources.dir", resourcesDir);
   ;
 }
  /** copy empty directory on dfs file system */
  public void testEmptyDir() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
      final FileSystem hdfs = cluster.getFileSystem();
      namenode = FileSystem.getDefaultUri(conf).toString();
      if (namenode.startsWith("hdfs://")) {

        FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
        fs.mkdirs(new Path("/empty"));

        ToolRunner.run(
            new DistCpV1(conf),
            new String[] {"-log", namenode + "/logs", namenode + "/empty", namenode + "/dest"});
        fs = FileSystem.get(URI.create(namenode + "/destdat"), conf);
        assertTrue(
            "Destination directory does not exist.", fs.exists(new Path(namenode + "/dest")));
        deldir(hdfs, "/dest");
        deldir(hdfs, "/empty");
        deldir(hdfs, "/logs");
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
 /** copy files from dfs file system to local file system */
 public void testCopyFromDfsToLocal() throws Exception {
   MiniDFSCluster cluster = null;
   try {
     Configuration conf = new Configuration();
     final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
     cluster = new MiniDFSCluster(conf, 1, true, null);
     final FileSystem hdfs = cluster.getFileSystem();
     final String namenode = FileSystem.getDefaultUri(conf).toString();
     if (namenode.startsWith("hdfs://")) {
       MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
       ToolRunner.run(
           new DistCpV1(conf),
           new String[] {
             "-log", "/logs", namenode + "/srcdat", "file:///" + TEST_ROOT_DIR + "/destdat"
           });
       assertTrue(
           "Source and destination directories do not match.",
           checkFiles(localfs, TEST_ROOT_DIR + "/destdat", files));
       assertTrue("Log directory does not exist.", hdfs.exists(new Path("/logs")));
       deldir(localfs, TEST_ROOT_DIR + "/destdat");
       deldir(hdfs, "/logs");
       deldir(hdfs, "/srcdat");
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
  /** Test truncate over quota does not mark file as UC or create a lease */
  @Test(timeout = 60000)
  public void testTruncateOverQuota() throws Exception {
    final Path dir = new Path("/TestTruncateOverquota");
    final Path file = new Path(dir, "file");

    // create partial block file
    dfs.mkdirs(dir);
    DFSTestUtil.createFile(dfs, file, BLOCKSIZE / 2, REPLICATION, seed);

    // lower quota to cause exception when appending to partial block
    dfs.setQuota(dir, Long.MAX_VALUE - 1, 1);
    final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
    final long spaceUsed =
        dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
    try {
      dfs.truncate(file, BLOCKSIZE / 2 - 1);
      Assert.fail("truncate didn't fail");
    } catch (RemoteException e) {
      assertTrue(e.getClassName().contains("DSQuotaExceededException"));
    }

    // check that the file exists, isn't UC, and has no dangling lease
    LeaseManager lm = cluster.getNamesystem().getLeaseManager();
    INodeFile inode = fsdir.getINode(file.toString()).asFile();
    Assert.assertNotNull(inode);
    Assert.assertFalse("should not be UC", inode.isUnderConstruction());
    Assert.assertNull("should not have a lease", lm.getLease(inode));
    // make sure the quota usage is unchanged
    final long newSpaceUsed =
        dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
    assertEquals(spaceUsed, newSpaceUsed);
    // make sure edits aren't corrupted
    dfs.recoverLease(file);
    cluster.restartNameNodes();
  }
  private void corruptBlock(
      MiniDFSCluster cluster, FileSystem fs, final Path fileName, int dnIndex, Block block)
      throws IOException {
    // corrupt the block on datanode dnIndex
    // the indexes change once the nodes are restarted.
    // But the datadirectory will not change
    assertTrue(cluster.corruptReplica(block.getBlockName(), dnIndex));

    DataNodeProperties dnProps = cluster.stopDataNode(0);

    // Each datanode has multiple data dirs, check each
    for (int dn = dnIndex * 2; dn < dnIndex * 2 + 2; dn++) {
      File dataDir = new File(MiniDFSCluster.getBaseDirectory() + "data");
      File scanLogFile =
          new File(
              dataDir,
              "data"
                  + (dn + 1)
                  + MiniDFSCluster.FINALIZED_DIR_NAME
                  + "dncp_block_verification.log.curr");
      if (scanLogFile.exists()) {
        // wait for one minute for deletion to succeed;
        for (int i = 0; !scanLogFile.delete(); i++) {
          assertTrue("Could not delete log file in one minute", i < 60);
          try {
            Thread.sleep(1000);
          } catch (InterruptedException ignored) {
          }
        }
      }
    }

    // restart the detained so the corrupt replica will be detected
    cluster.restartDataNode(dnProps);
  }
 @AfterClass
 public static void afterClass() throws Exception {
   if (cluster == null) return;
   FileSystem fs = cluster.getFileSystem();
   bench.cleanup(fs);
   cluster.shutdown();
 }
  @After
  public void shutDownCluster() throws Exception {

    // Dump all RamDisk JMX metrics before shutdown the cluster
    printRamDiskJMXMetrics();

    if (fs != null) {
      fs.close();
      fs = null;
      client = null;
    }

    if (cluster != null) {
      cluster.shutdownDataNodes();
      cluster.shutdown();
      cluster = null;
    }

    if (jmx != null) {
      jmx = null;
    }

    IOUtils.closeQuietly(sockDir);
    sockDir = null;
  }
  protected final boolean verifyDeletedBlocks(LocatedBlocks locatedBlocks)
      throws IOException, InterruptedException {

    LOG.info("Verifying replica has no saved copy after deletion.");
    triggerBlockReport();

    while (DataNodeTestUtils.getPendingAsyncDeletions(cluster.getDataNodes().get(0)) > 0L) {
      Thread.sleep(1000);
    }

    final String bpid = cluster.getNamesystem().getBlockPoolId();
    List<? extends FsVolumeSpi> volumes = cluster.getDataNodes().get(0).getFSDataset().getVolumes();

    // Make sure deleted replica does not have a copy on either finalized dir of
    // transient volume or finalized dir of non-transient volume
    for (FsVolumeSpi v : volumes) {
      FsVolumeImpl volume = (FsVolumeImpl) v;
      File targetDir =
          (v.isTransientStorage())
              ? volume.getBlockPoolSlice(bpid).getFinalizedDir()
              : volume.getBlockPoolSlice(bpid).getLazypersistDir();
      if (verifyBlockDeletedFromDir(targetDir, locatedBlocks) == false) {
        return false;
      }
    }
    return true;
  }
  @BeforeClass
  public static void clusterSetupAtBegining()
      throws IOException, LoginException, URISyntaxException {
    SupportsBlocks = true;
    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

    cluster =
        new MiniDFSCluster.Builder(CONF)
            .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
            .numDataNodes(2)
            .build();
    cluster.waitClusterUp();

    fHdfs = cluster.getFileSystem(0);
    fHdfs2 = cluster.getFileSystem(1);
    fHdfs
        .getConf()
        .set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());
    fHdfs2
        .getConf()
        .set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());

    defaultWorkingDirectory =
        fHdfs.makeQualified(
            new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
    defaultWorkingDirectory2 =
        fHdfs2.makeQualified(
            new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));

    fHdfs.mkdirs(defaultWorkingDirectory);
    fHdfs2.mkdirs(defaultWorkingDirectory2);
  }
Exemple #16
0
 /** tests basedir option copying files from dfs file system to dfs file system */
 public void testBasedir() throws Exception {
   String namenode = null;
   MiniDFSCluster cluster = null;
   try {
     Configuration conf = new Configuration();
     cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem hdfs = cluster.getFileSystem();
     namenode = FileSystem.getDefaultUri(conf).toString();
     if (namenode.startsWith("hdfs://")) {
       MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat");
       ToolRunner.run(
           new DistCpV1(conf),
           new String[] {
             "-basedir", "/basedir", namenode + "/basedir/middle/srcdat", namenode + "/destdat"
           });
       assertTrue(
           "Source and destination directories do not match.",
           checkFiles(hdfs, "/destdat/middle/srcdat", files));
       deldir(hdfs, "/destdat");
       deldir(hdfs, "/basedir");
       deldir(hdfs, "/logs");
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
  /** Test that there are under replication blocks after vol failures */
  @Test
  public void testUnderReplicationAfterVolFailure() throws Exception {
    // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
    // volume failures which is currently not supported on Windows.
    assumeTrue(!Path.WINDOWS);

    // Bring up one more datanode
    cluster.startDataNodes(conf, 1, true, null, null);
    cluster.waitActive();

    final BlockManager bm = cluster.getNamesystem().getBlockManager();

    Path file1 = new Path("/test1");
    DFSTestUtil.createFile(fs, file1, 1024, (short) 3, 1L);
    DFSTestUtil.waitReplication(fs, file1, (short) 3);

    // Fail the first volume on both datanodes
    File dn1Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
    File dn2Vol1 = new File(dataDir, "data" + (2 * 1 + 1));
    DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);

    Path file2 = new Path("/test2");
    DFSTestUtil.createFile(fs, file2, 1024, (short) 3, 1L);
    DFSTestUtil.waitReplication(fs, file2, (short) 3);

    // underReplicatedBlocks are due to failed volumes
    int underReplicatedBlocks =
        BlockManagerTestUtil.checkHeartbeatAndGetUnderReplicatedBlocksCount(
            cluster.getNamesystem(), bm);
    assertTrue(
        "There is no under replicated block after volume failure", underReplicatedBlocks > 0);
  }
  /* we first start a cluster and fill the cluster up to a certain size.
   * then redistribute blocks according the required distribution.
   * Afterwards a balancer is running to balance the cluster.
   */
  private void testUnevenDistribution(long distribution[], long capacities[], String[] racks)
      throws Exception {
    int numDatanodes = distribution.length;
    if (capacities.length != numDatanodes || racks.length != numDatanodes) {
      throw new IllegalArgumentException("Array length is not the same");
    }

    // calculate total space that need to be filled
    long totalUsedSpace = 0L;
    for (int i = 0; i < distribution.length; i++) {
      totalUsedSpace += distribution[i];
    }

    // fill the cluster
    Block[] blocks = generateBlocks(totalUsedSpace, (short) numDatanodes);

    // redistribute blocks
    Block[][] blocksDN = distributeBlocks(blocks, (short) (numDatanodes - 1), distribution);

    // restart the cluster: do NOT format the cluster
    CONF.set("dfs.safemode.threshold.pct", "0.0f");
    cluster = new MiniDFSCluster(0, CONF, numDatanodes, false, true, null, racks, capacities);
    cluster.waitActive();
    client = DFSClient.createNamenode(CONF);

    cluster.injectBlocks(blocksDN);

    long totalCapacity = 0L;
    for (long capacity : capacities) {
      totalCapacity += capacity;
    }
    runBalancer(totalUsedSpace, totalCapacity);
  }
  @Test(timeout = 60000)
  public void testSymlinkHdfsDisable() throws Exception {
    Configuration conf = new HdfsConfiguration();
    // disable symlink resolution
    conf.setBoolean(CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY, false);
    // spin up minicluster, get dfs and filecontext
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    DistributedFileSystem dfs = cluster.getFileSystem();
    FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
    // Create test files/links
    FileContextTestHelper helper = new FileContextTestHelper("/tmp/TestSymlinkHdfsDisable");
    Path root = helper.getTestRootPath(fc);
    Path target = new Path(root, "target");
    Path link = new Path(root, "link");
    DFSTestUtil.createFile(dfs, target, 4096, (short) 1, 0xDEADDEAD);
    fc.createSymlink(target, link, false);

    // Try to resolve links with FileSystem and FileContext
    try {
      fc.open(link);
      fail("Expected error when attempting to resolve link");
    } catch (IOException e) {
      GenericTestUtils.assertExceptionContains("resolution is disabled", e);
    }
    try {
      dfs.open(link);
      fail("Expected error when attempting to resolve link");
    } catch (IOException e) {
      GenericTestUtils.assertExceptionContains("resolution is disabled", e);
    }
  }
  /* This test start a one-node cluster, fill the node to be 30% full;
   * It then adds an empty node and start balancing.
   * @param newCapacity new node's capacity
   * @param new
   */
  private void test(long[] capacities, String[] racks, long newCapacity, String newRack)
      throws Exception {
    int numOfDatanodes = capacities.length;
    assertEquals(numOfDatanodes, racks.length);
    cluster = new MiniDFSCluster(0, CONF, capacities.length, true, true, null, racks, capacities);
    try {
      cluster.waitActive();
      client = DFSClient.createNamenode(CONF);

      long totalCapacity = 0L;
      for (long capacity : capacities) {
        totalCapacity += capacity;
      }
      // fill up the cluster to be 30% full
      long totalUsedSpace = totalCapacity * 3 / 10;
      createFile(totalUsedSpace / numOfDatanodes, (short) numOfDatanodes);
      // start up an empty node with the same capacity and on the same rack
      cluster.startDataNodes(CONF, 1, true, null, new String[] {newRack}, new long[] {newCapacity});

      totalCapacity += newCapacity;

      // run balancer and validate results
      runBalancer(totalUsedSpace, totalCapacity);
    } finally {
      cluster.shutdown();
    }
  }
 @Test(timeout = 120000)
 public void testSeekAfterSetDropBehind() throws Exception {
   // start a cluster
   LOG.info("testSeekAfterSetDropBehind");
   Configuration conf = new HdfsConfiguration();
   MiniDFSCluster cluster = null;
   String TEST_PATH = "/test";
   int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
   try {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
     // verify that we can seek after setDropBehind
     FSDataInputStream fis = fs.open(new Path(TEST_PATH));
     try {
       Assert.assertTrue(fis.read() != -1); // create BlockReader
       fis.setDropBehind(false); // clear BlockReader
       fis.seek(2); // seek
     } finally {
       fis.close();
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
  /* fill up a cluster with <code>numNodes</code> datanodes
   * whose used space to be <code>size</code>
   */
  private Block[] generateBlocks(long size, short numNodes) throws IOException {
    cluster = new MiniDFSCluster(CONF, numNodes, true, null);
    try {
      cluster.waitActive();
      client = DFSClient.createNamenode(CONF);

      short replicationFactor = (short) (numNodes - 1);
      long fileLen = size / replicationFactor;
      createFile(fileLen, replicationFactor);

      List<LocatedBlock> locatedBlocks =
          client.getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();

      int numOfBlocks = locatedBlocks.size();
      Block[] blocks = new Block[numOfBlocks];
      for (int i = 0; i < numOfBlocks; i++) {
        Block b = locatedBlocks.get(i).getBlock();
        blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
      }

      return blocks;
    } finally {
      cluster.shutdown();
    }
  }
  /** Test DFS Raid */
  public void testBlockMissingException() throws Exception {
    LOG.info("Test testBlockMissingException started.");
    long blockSize = 1024L;
    int numBlocks = 4;
    conf = new Configuration();
    try {
      dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
      dfs.waitActive();
      fileSys = (DistributedFileSystem) dfs.getFileSystem();
      Path file1 = new Path("/user/dhruba/raidtest/file1");
      createOldFile(fileSys, file1, 1, numBlocks, blockSize);

      // extract block locations from File system. Wait till file is closed.
      LocatedBlocks locations = null;
      locations =
          fileSys.dfs.namenode.getBlockLocations(file1.toString(), 0, numBlocks * blockSize);
      // remove block of file
      LOG.info("Remove first block of file");
      corruptBlock(file1, locations.get(0).getBlock());

      // validate that the system throws BlockMissingException
      validateFile(fileSys, file1);
    } finally {
      if (fileSys != null) fileSys.close();
      if (dfs != null) dfs.shutdown();
    }
    LOG.info("Test testBlockMissingException completed.");
  }
Exemple #24
0
  @Before
  public void createHDFS() {
    try {
      Configuration hdConf = new Configuration();

      File baseDir = new File("./target/hdfs/hdfsTest").getAbsoluteFile();
      FileUtil.fullyDelete(baseDir);
      hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
      MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
      hdfsCluster = builder.build();

      hdfsURI =
          "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";

      hdPath = new org.apache.hadoop.fs.Path("/test");
      hdfs = hdPath.getFileSystem(hdConf);
      FSDataOutputStream stream = hdfs.create(hdPath);
      for (int i = 0; i < 10; i++) {
        stream.write("Hello HDFS\n".getBytes());
      }
      stream.close();

    } catch (Throwable e) {
      e.printStackTrace();
      Assert.fail("Test failed " + e.getMessage());
    }
  }
  /*
   * Since NameNode will not persist any locations of the block, addBlock()
   * retry call after restart NN should re-select the locations and return to
   * client. refer HDFS-5257
   */
  @Test
  public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception {
    final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
    NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
    // create file
    nameNodeRpc.create(
        src,
        FsPermission.getFileDefault(),
        "clientName",
        new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
        true,
        (short) 3,
        1024,
        null);
    // start first addBlock()
    LOG.info("Starting first addBlock for " + src);
    LocatedBlock lb1 =
        nameNodeRpc.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null);
    assertTrue("Block locations should be present", lb1.getLocations().length > 0);

    cluster.restartNameNode();
    nameNodeRpc = cluster.getNameNodeRpc();
    LocatedBlock lb2 =
        nameNodeRpc.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null);
    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
    assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
  }
 /**
  * Initialize the cluster, wait for it to become active, and get FileSystem instances for our test
  * users.
  *
  * @param format if true, format the NameNode and DataNodes before starting up
  * @throws Exception if any step fails
  */
 private static void initCluster(boolean format) throws Exception {
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format).build();
   cluster.waitActive();
   hdfs = cluster.getFileSystem();
   fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf);
   fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf);
 }
  @Before
  public void setupCluster() throws Exception {
    conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK);
    // Bump up replication interval so that we only run replication
    // checks explicitly.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 600);
    // Increase max streams so that we re-replicate quickly.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 1000);
    // See RandomDeleterPolicy javadoc.
    conf.setClass(
        DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
        RandomDeleterPolicy.class,
        BlockPlacementPolicy.class);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    cluster =
        new MiniDFSCluster.Builder(conf)
            .nnTopology(MiniDFSNNTopology.simpleHATopology())
            .numDataNodes(3)
            .build();
    nn1 = cluster.getNameNode(0);
    nn2 = cluster.getNameNode(1);

    cluster.waitActive();
    cluster.transitionToActive(0);
    // Trigger block reports so that the first NN trusts all
    // of the DNs, and will issue deletions
    cluster.triggerBlockReports();
    fs = HATestUtil.configureFailoverFs(cluster, conf);
  }
Exemple #28
0
  @Before
  @Override
  public void setUp() throws Exception {
    super.setUp();
    conf.setClass(
        PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class);

    // Many of the tests expect a replication value of 1 in the output
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

    // Build racks and hosts configuration to test dfsAdmin -printTopology
    String[] racks = {
      "/rack1", "/rack1", "/rack2", "/rack2",
      "/rack2", "/rack3", "/rack4", "/rack4"
    };
    String[] hosts = {
      "host1", "host2", "host3", "host4",
      "host5", "host6", "host7", "host8"
    };
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8).racks(racks).hosts(hosts).build();
    dfsCluster.waitClusterUp();
    namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

    username = System.getProperty("user.name");

    fs = dfsCluster.getFileSystem();
    assertTrue("Not a HDFS: " + fs.getUri(), fs instanceof DistributedFileSystem);
  }
  /**
   * Verify that the NameNode is able to still use <tt>READ_ONLY_SHARED</tt> replicas even when the
   * single NORMAL replica is offline (and the effective replication count is 0).
   */
  @Test
  public void testNormalReplicaOffline() throws Exception {
    // Stop the datanode hosting the NORMAL replica
    cluster.stopDataNode(normalDataNode.getXferAddr());

    // Force NameNode to detect that the datanode is down
    BlockManagerTestUtil.noticeDeadDatanode(cluster.getNameNode(), normalDataNode.getXferAddr());

    // The live replica count should now be zero (since the NORMAL replica is offline)
    NumberReplicas numberReplicas = blockManager.countNodes(block);
    assertThat(numberReplicas.liveReplicas(), is(0));

    // The block should be reported as under-replicated
    BlockManagerTestUtil.updateState(blockManager);
    assertThat(blockManager.getUnderReplicatedBlocksCount(), is(1L));

    // The BlockManager should be able to heal the replication count back to 1
    // by triggering an inter-datanode replication from one of the READ_ONLY_SHARED replicas
    BlockManagerTestUtil.computeAllPendingWork(blockManager);

    DFSTestUtil.waitForReplication(cluster, extendedBlock, 1, 1, 0);

    // There should now be 2 *locations* for the block, and 1 *replica*
    assertThat(getLocatedBlock().getLocations().length, is(2));
    validateNumberReplicas(1);
  }
  /**
   * Test if fsck can return -1 in case of failure
   *
   * @throws Exception
   */
  public void testFsckError() throws Exception {
    MiniDFSCluster cluster = null;
    try {
      // bring up a one-node cluster
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 1, true, null);
      String fileName = "/test.txt";
      Path filePath = new Path(fileName);
      FileSystem fs = cluster.getFileSystem();

      // create a one-block file
      DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
      DFSTestUtil.waitReplication(fs, filePath, (short) 1);

      // intentionally corrupt NN data structure
      INodeFile node = (INodeFile) cluster.getNameNode().namesystem.dir.rootDir.getNode(fileName);
      assertEquals(node.blocks.length, 1);
      node.blocks[0].setNumBytes(-1L); // set the block length to be negative

      // run fsck and expect a failure with -1 as the error code
      String outStr = runFsck(conf, -1, true, fileName);
      System.out.println(outStr);
      assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));

      // clean up file system
      fs.delete(filePath, true);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }