/** Test that the NN re-learns of volume failures after restart. */
  @Test
  public void testVolFailureStatsPreservedOnNNRestart() throws Exception {
    assumeTrue(!System.getProperty("os.name").startsWith("Windows"));

    // Bring up two more datanodes that can tolerate 1 failure
    cluster.startDataNodes(conf, 2, true, null, null);
    cluster.waitActive();

    final DatanodeManager dm = cluster.getNamesystem().getBlockManager().getDatanodeManager();
    long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
    long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);

    // Fail the first volume on both datanodes (we have to keep the
    // third healthy so one node in the pipeline will not fail).
    File dn1Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
    File dn2Vol1 = new File(dataDir, "data" + (2 * 1 + 1));
    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));

    Path file1 = new Path("/test1");
    DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
    DFSTestUtil.waitReplication(fs, file1, (short) 2);

    // The NN reports two volumes failures
    DFSTestUtil.waitForDatanodeStatus(
        dm, 3, 0, 2, origCapacity - (1 * dnCapacity), WAIT_FOR_HEARTBEATS);

    // After restarting the NN it still see the two failures
    cluster.restartNameNode(0);
    cluster.waitActive();
    DFSTestUtil.waitForDatanodeStatus(
        dm, 3, 0, 2, origCapacity - (1 * dnCapacity), WAIT_FOR_HEARTBEATS);
  }
  /** test savenamespace in the middle of a checkpoint */
  @Test
  public void testCheckpointWithSavenamespace() throws Exception {
    Configuration conf = getConf();
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    cluster.waitActive();
    FSNamesystem fsn = FSNamesystem.getFSNamesystem();

    // Replace the FSImage with a spy
    final FSImage originalImage = fsn.dir.fsImage;

    try {
      doAnEdit(fsn, 1);
      CheckpointSignature sig = fsn.rollEditLog();
      LOG.warn("Checkpoint signature: " + sig);

      // Do another edit
      doAnEdit(fsn, 2);

      // Save namespace
      fsn.saveNamespace(true, false);

      // try to do a rollFSImage, this should fail because the
      // saveNamespace have already occured after the call to
      // rollFSEdit
      try {
        fsn.rollFSImage(sig);
        assertTrue(
            "The rollFSImage immediately folloing the saveName " + " command should fail. ", false);
      } catch (IOException e) {
        LOG.info(
            "Expected exception while invoking rollFSImage "
                + " after a successful call to saveNamespace."
                + e);
      }

      // Now shut down and restart the NN
      originalImage.close();
      fsn.close();
      cluster.shutdown();
      fsn = null;

      // Start a new namesystem, which should be able to recover
      // the namespace from the previous incarnation.
      cluster = new MiniDFSCluster(conf, 1, false, null);
      cluster.waitActive();
      fsn = FSNamesystem.getFSNamesystem();

      // Make sure the image loaded including our edits.
      checkEditExists(cluster, 1);
      checkEditExists(cluster, 2);
    } finally {
      if (fsn != null) {
        fsn.close();
        cluster.shutdown();
      }
    }
  }
  private void testSaveWhileEditsRolled(boolean dosafemode, boolean force, boolean uncompressed)
      throws Exception {
    Configuration conf = getConf();
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    cluster.waitActive();
    FSNamesystem fsn = FSNamesystem.getFSNamesystem();

    // Replace the FSImage with a spy
    FSImage originalImage = fsn.dir.fsImage;
    FSImage spyImage = spy(originalImage);
    spyImage.setStorageDirectories(
        FSNamesystem.getNamespaceDirs(conf), FSNamesystem.getNamespaceEditsDirs(conf));
    fsn.dir.fsImage = spyImage;

    try {
      doAnEdit(fsn, 1);
      CheckpointSignature sig = fsn.rollEditLog();
      LOG.warn("Checkpoint signature: " + sig);
      // Do another edit
      doAnEdit(fsn, 2);

      // Save namespace
      if (dosafemode) {
        fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      }
      fsn.saveNamespace(force, uncompressed);

      // Now shut down and restart the NN
      originalImage.close();
      originalImage = null;
      fsn.close();
      fsn = null;
      cluster.shutdown();

      // Start a new namesystem, which should be able to recover
      // the namespace from the previous incarnation.
      cluster = new MiniDFSCluster(conf, 1, false, null);
      cluster.waitActive();
      fsn = FSNamesystem.getFSNamesystem();

      // Make sure the image loaded including our edits.
      checkEditExists(cluster, 1);
      checkEditExists(cluster, 2);
    } finally {
      if (originalImage != null) {
        originalImage.close();
      }
      if (fsn != null) {
        fsn.close();
        cluster.shutdown();
      }
    }
  }
  /** Test that there are under replication blocks after vol failures */
  @Test
  public void testUnderReplicationAfterVolFailure() throws Exception {
    // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
    // volume failures which is currently not supported on Windows.
    assumeTrue(!Path.WINDOWS);

    // Bring up one more datanode
    cluster.startDataNodes(conf, 1, true, null, null);
    cluster.waitActive();

    final BlockManager bm = cluster.getNamesystem().getBlockManager();

    Path file1 = new Path("/test1");
    DFSTestUtil.createFile(fs, file1, 1024, (short) 3, 1L);
    DFSTestUtil.waitReplication(fs, file1, (short) 3);

    // Fail the first volume on both datanodes
    File dn1Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
    File dn2Vol1 = new File(dataDir, "data" + (2 * 1 + 1));
    DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);

    Path file2 = new Path("/test2");
    DFSTestUtil.createFile(fs, file2, 1024, (short) 3, 1L);
    DFSTestUtil.waitReplication(fs, file2, (short) 3);

    // underReplicatedBlocks are due to failed volumes
    int underReplicatedBlocks =
        BlockManagerTestUtil.checkHeartbeatAndGetUnderReplicatedBlocksCount(
            cluster.getNamesystem(), bm);
    assertTrue(
        "There is no under replicated block after volume failure", underReplicatedBlocks > 0);
  }
  @Before
  public void setupCluster() throws Exception {
    conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK);
    // Bump up replication interval so that we only run replication
    // checks explicitly.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 600);
    // Increase max streams so that we re-replicate quickly.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 1000);
    // See RandomDeleterPolicy javadoc.
    conf.setClass(
        DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
        RandomDeleterPolicy.class,
        BlockPlacementPolicy.class);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    cluster =
        new MiniDFSCluster.Builder(conf)
            .nnTopology(MiniDFSNNTopology.simpleHATopology())
            .numDataNodes(3)
            .build();
    nn1 = cluster.getNameNode(0);
    nn2 = cluster.getNameNode(1);

    cluster.waitActive();
    cluster.transitionToActive(0);
    // Trigger block reports so that the first NN trusts all
    // of the DNs, and will issue deletions
    cluster.triggerBlockReports();
    fs = HATestUtil.configureFailoverFs(cluster, conf);
  }
Esempio n. 6
0
 @Test(timeout = 120000)
 public void testSeekAfterSetDropBehind() throws Exception {
   // start a cluster
   LOG.info("testSeekAfterSetDropBehind");
   Configuration conf = new HdfsConfiguration();
   MiniDFSCluster cluster = null;
   String TEST_PATH = "/test";
   int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
   try {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
     // verify that we can seek after setDropBehind
     FSDataInputStream fis = fs.open(new Path(TEST_PATH));
     try {
       Assert.assertTrue(fis.read() != -1); // create BlockReader
       fis.setDropBehind(false); // clear BlockReader
       fis.seek(2); // seek
     } finally {
       fis.close();
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
 /**
  * Initialize the cluster, wait for it to become active, and get FileSystem instances for our test
  * users.
  *
  * @param format if true, format the NameNode and DataNodes before starting up
  * @throws Exception if any step fails
  */
 private static void initCluster(boolean format) throws Exception {
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format).build();
   cluster.waitActive();
   hdfs = cluster.getFileSystem();
   fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf);
   fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf);
 }
Esempio n. 8
0
  /**
   * test 1. create DFS cluster with 3 storage directories - 2 EDITS_IMAGE, 1 EDITS 2. create a
   * cluster and write a file 3. corrupt/disable one storage (or two) by removing 4. run
   * doCheckpoint - it will fail on removed dirs (which will invalidate the storages) 5. write
   * another file 6. check that edits and fsimage differ 7. run doCheckpoint 8. verify that all the
   * image and edits files are the same.
   */
  @Test
  public void testStorageRestore() throws Exception {
    int numDatanodes = 2;
    cluster =
        new MiniDFSCluster(0, config, numDatanodes, true, false, true, null, null, null, null);
    cluster.waitActive();

    SecondaryNameNode secondary = new SecondaryNameNode(config);

    FileSystem fs = cluster.getFileSystem();
    Path path = new Path("/", "test");
    writeFile(fs, path, 2);

    invalidateStorage(cluster.getNameNode().getFSImage());

    path = new Path("/", "test1");
    writeFile(fs, path, 2);

    checkFiles(false);

    secondary.doCheckpoint();

    checkFiles(true);
    secondary.shutdown();
    cluster.shutdown();
  }
  protected void setupCluster(boolean simulated, long minFileSize, String[] racks, String[] hosts)
      throws IOException {
    conf = new Configuration();
    localFileSys = FileSystem.getLocal(conf);
    conf.setLong("dfs.blockreport.intervalMsec", 1000L);
    conf.set("dfs.replication.pending.timeout.sec", "2");
    conf.setLong("dfs.block.size", 1L);
    conf.set(
        "dfs.block.replicator.classname",
        "org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid");
    conf.setLong("hdfs.raid.min.filesize", minFileSize);
    Utils.loadTestCodecs(conf, 5, 5, 1, 3, "/raid", "/raidrs", false, true);
    conf.setInt("io.bytes.per.checksum", 1);
    excludeFile = new Path(TEST_DIR, "exclude" + System.currentTimeMillis());
    cleanFile(excludeFile);
    conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
    writeConfigFile(excludeFile, null);

    if (!simulated) {
      cluster = new MiniDFSCluster(conf, hosts.length, true, racks, hosts);
    } else {
      long[] capacities = new long[] {CAPACITY, CAPACITY, CAPACITY};
      cluster = new MiniDFSCluster(0, conf, hosts.length, true, true, null, racks, capacities);
    }
    cluster.waitActive();
    namesystem = cluster.getNameNode().getNamesystem();
    Assert.assertTrue(
        "BlockPlacementPolicy type is not correct.",
        namesystem.replicator instanceof BlockPlacementPolicyRaid);
    policy = (BlockPlacementPolicyRaid) namesystem.replicator;
    fs = cluster.getFileSystem();
    dfs = (DistributedFileSystem) fs;
    TestDirectoryRaidDfs.setupStripeStore(conf, fs);
  }
Esempio n. 10
0
  /* This test start a one-node cluster, fill the node to be 30% full;
   * It then adds an empty node and start balancing.
   * @param newCapacity new node's capacity
   * @param new
   */
  private void test(long[] capacities, String[] racks, long newCapacity, String newRack)
      throws Exception {
    int numOfDatanodes = capacities.length;
    assertEquals(numOfDatanodes, racks.length);
    cluster = new MiniDFSCluster(0, CONF, capacities.length, true, true, null, racks, capacities);
    try {
      cluster.waitActive();
      client = DFSClient.createNamenode(CONF);

      long totalCapacity = 0L;
      for (long capacity : capacities) {
        totalCapacity += capacity;
      }
      // fill up the cluster to be 30% full
      long totalUsedSpace = totalCapacity * 3 / 10;
      createFile(totalUsedSpace / numOfDatanodes, (short) numOfDatanodes);
      // start up an empty node with the same capacity and on the same rack
      cluster.startDataNodes(CONF, 1, true, null, new String[] {newRack}, new long[] {newCapacity});

      totalCapacity += newCapacity;

      // run balancer and validate results
      runBalancer(totalUsedSpace, totalCapacity);
    } finally {
      cluster.shutdown();
    }
  }
Esempio n. 11
0
  /* fill up a cluster with <code>numNodes</code> datanodes
   * whose used space to be <code>size</code>
   */
  private Block[] generateBlocks(long size, short numNodes) throws IOException {
    cluster = new MiniDFSCluster(CONF, numNodes, true, null);
    try {
      cluster.waitActive();
      client = DFSClient.createNamenode(CONF);

      short replicationFactor = (short) (numNodes - 1);
      long fileLen = size / replicationFactor;
      createFile(fileLen, replicationFactor);

      List<LocatedBlock> locatedBlocks =
          client.getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();

      int numOfBlocks = locatedBlocks.size();
      Block[] blocks = new Block[numOfBlocks];
      for (int i = 0; i < numOfBlocks; i++) {
        Block b = locatedBlocks.get(i).getBlock();
        blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
      }

      return blocks;
    } finally {
      cluster.shutdown();
    }
  }
Esempio n. 12
0
  /* we first start a cluster and fill the cluster up to a certain size.
   * then redistribute blocks according the required distribution.
   * Afterwards a balancer is running to balance the cluster.
   */
  private void testUnevenDistribution(long distribution[], long capacities[], String[] racks)
      throws Exception {
    int numDatanodes = distribution.length;
    if (capacities.length != numDatanodes || racks.length != numDatanodes) {
      throw new IllegalArgumentException("Array length is not the same");
    }

    // calculate total space that need to be filled
    long totalUsedSpace = 0L;
    for (int i = 0; i < distribution.length; i++) {
      totalUsedSpace += distribution[i];
    }

    // fill the cluster
    Block[] blocks = generateBlocks(totalUsedSpace, (short) numDatanodes);

    // redistribute blocks
    Block[][] blocksDN = distributeBlocks(blocks, (short) (numDatanodes - 1), distribution);

    // restart the cluster: do NOT format the cluster
    CONF.set("dfs.safemode.threshold.pct", "0.0f");
    cluster = new MiniDFSCluster(0, CONF, numDatanodes, false, true, null, racks, capacities);
    cluster.waitActive();
    client = DFSClient.createNamenode(CONF);

    cluster.injectBlocks(blocksDN);

    long totalCapacity = 0L;
    for (long capacity : capacities) {
      totalCapacity += capacity;
    }
    runBalancer(totalUsedSpace, totalCapacity);
  }
  /** Test DFS Raid */
  public void testBlockMissingException() throws Exception {
    LOG.info("Test testBlockMissingException started.");
    long blockSize = 1024L;
    int numBlocks = 4;
    conf = new Configuration();
    try {
      dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
      dfs.waitActive();
      fileSys = (DistributedFileSystem) dfs.getFileSystem();
      Path file1 = new Path("/user/dhruba/raidtest/file1");
      createOldFile(fileSys, file1, 1, numBlocks, blockSize);

      // extract block locations from File system. Wait till file is closed.
      LocatedBlocks locations = null;
      locations =
          fileSys.dfs.namenode.getBlockLocations(file1.toString(), 0, numBlocks * blockSize);
      // remove block of file
      LOG.info("Remove first block of file");
      corruptBlock(file1, locations.get(0).getBlock());

      // validate that the system throws BlockMissingException
      validateFile(fileSys, file1);
    } finally {
      if (fileSys != null) fileSys.close();
      if (dfs != null) dfs.shutdown();
    }
    LOG.info("Test testBlockMissingException completed.");
  }
 @Before
 public void setUp() throws Exception {
   cluster = new MiniDFSCluster.Builder(CONF).build();
   cluster.waitActive();
   cluster.getNameNode();
   fs = (DistributedFileSystem) cluster.getFileSystem();
 }
 @Override
 protected void setUp() throws Exception {
   cluster = new MiniDFSCluster(CONF, 1, true, null);
   cluster.waitActive();
   fs = (DistributedFileSystem) cluster.getFileSystem();
   metrics = fs.getClient().getDFSClientMetrics();
 }
Esempio n. 16
0
 @Before
 public void setUp() throws Exception {
   conf = new HdfsConfiguration();
   conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
   cluster.waitActive();
   fs = cluster.getFileSystem();
 }
 @Override
 protected void setUp() throws Exception {
   cluster = new MiniDFSCluster(CONF, 1, true, null);
   cluster.waitActive();
   cluster.getNameNode();
   nnMetrics = NameNode.getNameNodeMetrics();
   fs = (DistributedFileSystem) cluster.getFileSystem();
 }
  @Before
  public void setUp() throws Exception {
    conf = new Configuration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
    cluster.waitActive();

    fsdir = cluster.getNamesystem().getFSDirectory();
    dfs = cluster.getFileSystem();
  }
Esempio n. 19
0
 private static MiniDFSCluster startMini(String testName) throws IOException {
   File baseDir = new File("./target/hdfs/" + testName).getAbsoluteFile();
   FileUtil.fullyDelete(baseDir);
   Configuration conf = new Configuration();
   conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
   MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
   MiniDFSCluster hdfsCluster = builder.clusterId(testName).build();
   hdfsCluster.waitActive();
   return hdfsCluster;
 }
Esempio n. 20
0
  public void testRPCInterrupted() throws IOException, InterruptedException {
    final MiniDFSCluster cluster;
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster(conf, 3, true, null);
    final AtomicBoolean passed = new AtomicBoolean(false);
    try {
      cluster.waitActive();
      Thread rpcThread =
          new Thread(
              new Runnable() {
                @Override
                public void run() {
                  FileSystem fs = null;
                  try {
                    fs = cluster.getUniqueFileSystem();
                    int i = 0;
                    while (true) {
                      fs.create(new Path(String.format("/file-%09d", i++)));
                      fs.listStatus(new Path("/"));
                    }
                  } catch (IOException e) {
                    if (e.getCause() instanceof InterruptedException) {
                      // the underlying InterruptedException should be wrapped to an
                      // IOException and end up here
                      passed.set(true);
                    } else {
                      passed.set(false);
                      fail(e.getMessage());
                    }
                  } finally {
                    if (fs != null) {
                      try {
                        fs.close();
                      } catch (IOException e) {
                        passed.set(false);
                        LOG.error(e);
                        fail(e.toString());
                      }
                    }
                  }
                }
              });
      FileSystem fs2 = cluster.getUniqueFileSystem();

      rpcThread.start();
      Thread.sleep(1000);
      rpcThread.interrupt();
      rpcThread.join();
      FileStatus[] statuses = fs2.listStatus(new Path("/"));
      assertTrue("expect at least 1 file created", statuses.length > 0);
      assertTrue("error in writing thread, see above", passed.get());
    } finally {
      cluster.shutdown();
    }
  }
  /**
   * testing that APPEND operation can handle token expiration when re-establishing pipeline is
   * needed
   */
  @Test
  public void testAppend() throws Exception {
    MiniDFSCluster cluster = null;
    int numDataNodes = 2;
    Configuration conf = getConf(numDataNodes);

    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
      cluster.waitActive();
      assertEquals(numDataNodes, cluster.getDataNodes().size());

      final NameNode nn = cluster.getNameNode();
      final BlockManager bm = nn.getNamesystem().getBlockManager();
      final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();

      // set a short token lifetime (1 second)
      SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
      Path fileToAppend = new Path(FILE_TO_APPEND);
      FileSystem fs = cluster.getFileSystem();
      byte[] expected = generateBytes(FILE_SIZE);
      // write a one-byte file
      FSDataOutputStream stm = writeFile(fs, fileToAppend, (short) numDataNodes, BLOCK_SIZE);
      stm.write(expected, 0, 1);
      stm.close();
      // open the file again for append
      stm = fs.append(fileToAppend);
      int mid = expected.length - 1;
      stm.write(expected, 1, mid - 1);
      stm.hflush();

      /*
       * wait till token used in stm expires
       */
      Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
      while (!SecurityTestUtil.isBlockTokenExpired(token)) {
        try {
          Thread.sleep(10);
        } catch (InterruptedException ignored) {
        }
      }

      // remove a datanode to force re-establishing pipeline
      cluster.stopDataNode(0);
      // append the rest of the file
      stm.write(expected, mid, expected.length - mid);
      stm.close();
      // check if append is successful
      FSDataInputStream in5 = fs.open(fileToAppend);
      assertTrue(checkFile1(in5, expected));
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
  @BeforeClass
  public static void setUp() throws Exception {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
    cluster.waitActive();

    FSNamesystem fsn = cluster.getNamesystem();
    fsdir = fsn.getFSDirectory();

    hdfs = cluster.getFileSystem();
  }
  /**
   * The following test first creates a file. It verifies the block information from a datanode.
   * Then, it updates the block with new information and verifies again.
   */
  @Test
  public void testBlockMetaDataInfo() throws Exception {
    MiniDFSCluster cluster = null;

    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
      cluster.waitActive();

      // create a file
      DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
      String filestr = "/foo";
      Path filepath = new Path(filestr);
      DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
      assertTrue(dfs.exists(filepath));

      // get block info
      LocatedBlock locatedblock =
          getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
      DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
      assertTrue(datanodeinfo.length > 0);

      // connect to a data node
      DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
      InterDatanodeProtocol idp =
          DataNodeTestUtils.createInterDatanodeProtocolProxy(datanode, datanodeinfo[0], conf);

      // stop block scanner, so we could compare lastScanTime
      DataNodeTestUtils.shutdownBlockScanner(datanode);

      // verify BlockMetaDataInfo
      ExtendedBlock b = locatedblock.getBlock();
      InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
      checkMetaInfo(b, datanode);
      long recoveryId = b.getGenerationStamp() + 1;
      idp.initReplicaRecovery(new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));

      // verify updateBlock
      ExtendedBlock newblock =
          new ExtendedBlock(
              b.getBlockPoolId(), b.getBlockId(), b.getNumBytes() / 2, b.getGenerationStamp() + 1);
      idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
      checkMetaInfo(newblock, datanode);

      // Verify correct null response trying to init recovery for a missing block
      ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.getBlockId(), 0, 0);
      assertNull(
          idp.initReplicaRecovery(
              new RecoveringBlock(badBlock, locatedblock.getLocations(), recoveryId)));
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
 private void setupCluster() throws IOException {
   setupConf();
   // start the cluster with one datanode
   cluster = new MiniDFSCluster(conf, 6, true, racks, hosts);
   cluster.waitActive();
   fs = cluster.getFileSystem();
   placementMonitor = new PlacementMonitor(conf);
   placementMonitor.start();
   blockMover = placementMonitor.blockMover;
   namenode = cluster.getNameNode();
   datanodes = namenode.getDatanodeReport(DatanodeReportType.LIVE);
 }
 @Before
 public void setUp() throws Exception {
   // bring up a cluster of 2
   conf = new HdfsConfiguration();
   conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size);
   // Allow a single volume failure (there are two volumes)
   conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dn_num).build();
   cluster.waitActive();
   fs = cluster.getFileSystem();
   dataDir = new File(cluster.getDataDirectory());
 }
  /**
   * Test whether we can delay the deletion of unknown blocks in DataNode's first several block
   * reports.
   */
  @Test
  public void testPendingDeleteUnknownBlocks() throws Exception {
    final int fileNum = 5; // 5 files
    final Path[] files = new Path[fileNum];
    final DataNodeProperties[] dnprops = new DataNodeProperties[REPLICATION];
    // create a group of files, each file contains 1 block
    for (int i = 0; i < fileNum; i++) {
      files[i] = new Path("/file" + i);
      DFSTestUtil.createFile(dfs, files[i], BLOCKSIZE, REPLICATION, i);
    }
    // wait until all DataNodes have replicas
    waitForReplication();
    for (int i = REPLICATION - 1; i >= 0; i--) {
      dnprops[i] = cluster.stopDataNode(i);
    }
    Thread.sleep(2000);
    // delete 2 files, we still have 3 files remaining so that we can cover
    // every DN storage
    for (int i = 0; i < 2; i++) {
      dfs.delete(files[i], true);
    }

    // restart NameNode
    cluster.restartNameNode(false);
    InvalidateBlocks invalidateBlocks =
        (InvalidateBlocks)
            Whitebox.getInternalState(
                cluster.getNamesystem().getBlockManager(), "invalidateBlocks");
    InvalidateBlocks mockIb = Mockito.spy(invalidateBlocks);
    Mockito.doReturn(1L).when(mockIb).getInvalidationDelay();
    Whitebox.setInternalState(
        cluster.getNamesystem().getBlockManager(), "invalidateBlocks", mockIb);

    Assert.assertEquals(0L, cluster.getNamesystem().getPendingDeletionBlocks());
    // restart DataNodes
    for (int i = 0; i < REPLICATION; i++) {
      cluster.restartDataNode(dnprops[i], true);
    }
    cluster.waitActive();

    for (int i = 0; i < REPLICATION; i++) {
      DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(i));
    }
    Thread.sleep(2000);
    // make sure we have received block reports by checking the total block #
    Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
    Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());

    cluster.restartNameNode(true);
    Thread.sleep(6000);
    Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
    Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
  }
 @Before
 public void setUp() throws IOException {
   conf = new HdfsConfiguration();
   conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
   conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 100);
   cluster =
       new MiniDFSCluster.Builder(conf)
           .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
           .build();
   for (int i = 0; i < 3; i++) {
     cluster.waitActive(i);
   }
 }
  @Before
  public void setUp() throws Exception {
    config = new Configuration();
    config.setClass(
        "hadoop.security.group.mapping",
        TestRefreshUserMappings.MockUnixGroupsMapping.class,
        GroupMappingServiceProvider.class);
    config.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec);
    Groups.getUserToGroupsMappingService(config);

    FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
    cluster = new MiniDFSCluster(0, config, 1, true, true, true, null, null, null, null);
    cluster.waitActive();
  }
  private void mySetup(int stripeLength) throws Exception {
    if (System.getProperty("hadoop.log.dir") == null) {
      String base = new File(".").getAbsolutePath();
      System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs");
    }

    new File(TEST_DIR).mkdirs(); // Make sure data directory exists
    conf = new Configuration();

    conf.set("raid.config.file", CONFIG_FILE);
    conf.setBoolean("raid.config.reload", true);
    conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

    // scan all policies once every 5 second
    conf.setLong("raid.policy.rescan.interval", 5000);

    // do not use map-reduce cluster for Raiding
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
    conf.set("raid.server.address", "localhost:" + MiniDFSCluster.getFreePort());
    conf.set("mapred.raid.http.address", "localhost:0");

    Utils.loadTestCodecs(
        conf, stripeLength, stripeLength, 1, 3, "/destraid", "/destraidrs", false, true);

    conf.setBoolean("dfs.permissions", false);
    // Make sure initial repl is smaller than NUM_DATANODES
    conf.setInt(RaidNode.RAID_PARITY_INITIAL_REPL_KEY, 1);

    dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfsCluster.waitActive();
    fileSys = dfsCluster.getFileSystem();
    namenode = fileSys.getUri().toString();

    FileSystem.setDefaultUri(conf, namenode);
    mr = new MiniMRCluster(4, namenode, 3);
    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
    hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort();

    FileSystem.setDefaultUri(conf, namenode);
    conf.set("mapred.job.tracker", jobTrackerName);
    conf.set(RaidNode.RAID_CHECKSUM_STORE_CLASS_KEY, "org.apache.hadoop.raid.LocalChecksumStore");
    conf.setBoolean(RaidNode.RAID_CHECKSUM_STORE_REQUIRED_KEY, true);
    conf.set(LocalChecksumStore.LOCAL_CHECK_STORE_DIR_KEY, CHECKSUM_STORE_DIR);
    conf.set(RaidNode.RAID_STRIPE_STORE_CLASS_KEY, "org.apache.hadoop.raid.LocalStripeStore");
    conf.set(LocalStripeStore.LOCAL_STRIPE_STORE_DIR_KEY, STRIPE_STORE_DIR);
    ConfigBuilder cb = new ConfigBuilder(CONFIG_FILE);
    cb.addPolicy("RaidTest1", "/user/dhruba/raidtest", 1, 1);
    cb.addPolicy("RaidTest2", "/user/dhruba/raidtestrs", 1, 1, "rs");
    cb.persist();
  }
Esempio n. 30
0
  @Test(timeout = 120000)
  public void testFadviseSkippedForSmallReads() throws Exception {
    // start a cluster
    LOG.info("testFadviseSkippedForSmallReads");
    tracker.clear();
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, true);
    MiniDFSCluster cluster = null;
    String TEST_PATH = "/test";
    int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
    FSDataInputStream fis = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();

      // create new file
      createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, null);
      // Since the DataNode was configured with drop-behind, and we didn't
      // specify any policy, we should have done drop-behind.
      ExtendedBlock block =
          cluster
              .getNameNode()
              .getRpcServer()
              .getBlockLocations(TEST_PATH, 0, Long.MAX_VALUE)
              .get(0)
              .getBlock();
      String fadvisedFileName = cluster.getBlockFile(0, block).getName();
      Stats stats = tracker.getStats(fadvisedFileName);
      stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
      stats.clear();
      stats.assertNotDroppedInRange(0, TEST_PATH_LEN);

      // read file
      fis = fs.open(new Path(TEST_PATH));
      byte buf[] = new byte[17];
      fis.readFully(4096, buf, 0, buf.length);

      // we should not have dropped anything because of the small read.
      stats = tracker.getStats(fadvisedFileName);
      stats.assertNotDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
    } finally {
      IOUtils.cleanup(null, fis);
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }