Exemplo n.º 1
0
  @Test(timeout = 120000)
  public void testFadviseSkippedForSmallReads() throws Exception {
    // start a cluster
    LOG.info("testFadviseSkippedForSmallReads");
    tracker.clear();
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, true);
    MiniDFSCluster cluster = null;
    String TEST_PATH = "/test";
    int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
    FSDataInputStream fis = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();

      // create new file
      createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, null);
      // Since the DataNode was configured with drop-behind, and we didn't
      // specify any policy, we should have done drop-behind.
      ExtendedBlock block =
          cluster
              .getNameNode()
              .getRpcServer()
              .getBlockLocations(TEST_PATH, 0, Long.MAX_VALUE)
              .get(0)
              .getBlock();
      String fadvisedFileName = cluster.getBlockFile(0, block).getName();
      Stats stats = tracker.getStats(fadvisedFileName);
      stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
      stats.clear();
      stats.assertNotDroppedInRange(0, TEST_PATH_LEN);

      // read file
      fis = fs.open(new Path(TEST_PATH));
      byte buf[] = new byte[17];
      fis.readFully(4096, buf, 0, buf.length);

      // we should not have dropped anything because of the small read.
      stats = tracker.getStats(fadvisedFileName);
      stats.assertNotDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
    } finally {
      IOUtils.cleanup(null, fis);
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
Exemplo n.º 2
0
  /**
   * * Test the scenario where the DataNode defaults to not dropping the cache, but our client
   * defaults are set.
   */
  @Test(timeout = 120000)
  public void testClientDefaults() throws Exception {
    // start a cluster
    LOG.info("testClientDefaults");
    tracker.clear();
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, false);
    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, false);
    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS, true);
    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, true);
    MiniDFSCluster cluster = null;
    String TEST_PATH = "/test";
    int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();

      // create new file
      createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, null);
      // verify that we dropped everything from the cache during file creation.
      ExtendedBlock block =
          cluster
              .getNameNode()
              .getRpcServer()
              .getBlockLocations(TEST_PATH, 0, Long.MAX_VALUE)
              .get(0)
              .getBlock();
      String fadvisedFileName = cluster.getBlockFile(0, block).getName();
      Stats stats = tracker.getStats(fadvisedFileName);
      stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
      stats.clear();

      // read file
      readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, null);
      // verify that we dropped everything from the cache.
      Assert.assertNotNull(stats);
      stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
Exemplo n.º 3
0
  @Test(timeout = 120000)
  public void testNoFadviseAfterWriteThenRead() throws Exception {
    // start a cluster
    LOG.info("testNoFadviseAfterWriteThenRead");
    tracker.clear();
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    String TEST_PATH = "/test";
    int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();

      // create new file
      createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
      // verify that we did not drop everything from the cache during file creation.
      ExtendedBlock block =
          cluster
              .getNameNode()
              .getRpcServer()
              .getBlockLocations(TEST_PATH, 0, Long.MAX_VALUE)
              .get(0)
              .getBlock();
      String fadvisedFileName = cluster.getBlockFile(0, block).getName();
      Stats stats = tracker.getStats(fadvisedFileName);
      Assert.assertNull(stats);

      // read file
      readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, false);
      // verify that we dropped everything from the cache.
      Assert.assertNull(stats);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }