コード例 #1
0
  /**
   * RamDisk eviction should not happen on blocks that are not yet persisted on disk.
   *
   * @throws IOException
   * @throws InterruptedException
   */
  @Test
  public void testRamDiskEvictionBeforePersist() throws IOException, InterruptedException {
    startUpCluster(true, 1);
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
    Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
    final int SEED = 0XFADED;

    // Stop lazy writer to ensure block for path1 is not persisted to disk.
    FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));

    makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
    ensureFileReplicasOnStorageType(path1, RAM_DISK);

    // Create second file with a replica on RAM_DISK.
    makeTestFile(path2, BLOCK_SIZE, true);

    // Eviction should not happen for block of the first file that is not
    // persisted yet.
    ensureFileReplicasOnStorageType(path1, RAM_DISK);
    ensureFileReplicasOnStorageType(path2, DEFAULT);

    assert (fs.exists(path1));
    assert (fs.exists(path2));
    assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
  }
コード例 #2
0
  @Test
  public void testDnRestartWithUnsavedReplicas() throws IOException, InterruptedException {

    startUpCluster(true, 1);
    FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));

    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
    makeTestFile(path1, BLOCK_SIZE, true);
    ensureFileReplicasOnStorageType(path1, RAM_DISK);

    LOG.info("Restarting the DataNode");
    cluster.restartDataNode(0, true);
    cluster.waitActive();

    // Ensure that the replica is still on transient storage.
    ensureFileReplicasOnStorageType(path1, RAM_DISK);
  }
コード例 #3
0
  /**
   * Delete lazy-persist file that has not been persisted to disk. Memory is freed up and file is
   * gone.
   *
   * @throws IOException
   */
  @Test
  public void testDeleteBeforePersist() throws Exception {
    startUpCluster(true, -1);
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));

    Path path = new Path("/" + METHOD_NAME + ".dat");
    makeTestFile(path, BLOCK_SIZE, true);
    LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK);

    // Delete before persist
    client.delete(path.toString(), false);
    Assert.assertFalse(fs.exists(path));

    assertThat(verifyDeletedBlocks(locatedBlocks), is(true));

    verifyRamDiskJMXMetric("RamDiskBlocksDeletedBeforeLazyPersisted", 1);
  }
コード例 #4
0
  /** Test for {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} */
  @Test
  public void testUpdateReplicaUnderRecovery() throws IOException {
    MiniDFSCluster cluster = null;

    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
      cluster.waitActive();
      String bpid = cluster.getNamesystem().getBlockPoolId();

      // create a file
      DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
      String filestr = "/foo";
      Path filepath = new Path(filestr);
      DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);

      // get block info
      final LocatedBlock locatedblock =
          getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
      final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
      Assert.assertTrue(datanodeinfo.length > 0);

      // get DataNode and FSDataset objects
      final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
      Assert.assertTrue(datanode != null);

      // initReplicaRecovery
      final ExtendedBlock b = locatedblock.getBlock();
      final long recoveryid = b.getGenerationStamp() + 1;
      final long newlength = b.getNumBytes() - 1;
      final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
      final ReplicaRecoveryInfo rri =
          fsdataset.initReplicaRecovery(new RecoveringBlock(b, null, recoveryid));

      // check replica
      final ReplicaInfo replica =
          FsDatasetTestUtil.fetchReplicaInfo(fsdataset, bpid, b.getBlockId());
      Assert.assertEquals(ReplicaState.RUR, replica.getState());

      // check meta data before update
      FsDatasetImpl.checkReplicaFiles(replica);

      // case "THIS IS NOT SUPPOSED TO HAPPEN"
      // with (block length) != (stored replica's on disk length).
      {
        // create a block with same id and gs but different length.
        final ExtendedBlock tmp =
            new ExtendedBlock(
                b.getBlockPoolId(),
                rri.getBlockId(),
                rri.getNumBytes() - 1,
                rri.getGenerationStamp());
        try {
          // update should fail
          fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, newlength);
          Assert.fail();
        } catch (IOException ioe) {
          System.out.println("GOOD: getting " + ioe);
        }
      }

      // update
      final String storageID =
          fsdataset.updateReplicaUnderRecovery(
              new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, newlength);
      assertTrue(storageID != null);

    } finally {
      if (cluster != null) cluster.shutdown();
    }
  }