/** Test deleteOnExit */
  public void testDeleteOnExit() throws IOException {
    Configuration conf = new Configuration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = cluster.getFileSystem();
    FileSystem localfs = FileSystem.getLocal(conf);

    try {

      // Creates files in HDFS and local file system.
      //
      Path file1 = new Path("filestatus.dat");
      Path file2 = new Path("filestatus2.dat");
      Path file3 = new Path("filestatus3.dat");
      FSDataOutputStream stm1 = createFile(fs, file1, 1);
      FSDataOutputStream stm2 = createFile(fs, file2, 1);
      FSDataOutputStream stm3 = createFile(localfs, file3, 1);
      System.out.println("DeleteOnExit: Created files.");

      // write to files and close. Purposely, do not close file2.
      writeFile(stm1);
      writeFile(stm3);
      stm1.close();
      stm2.close();
      stm3.close();

      // set delete on exit flag on files.
      fs.deleteOnExit(file1);
      fs.deleteOnExit(file2);
      localfs.deleteOnExit(file3);

      // close the file system. This should make the above files
      // disappear.
      fs.close();
      localfs.close();
      fs = null;
      localfs = null;

      // reopen file system and verify that file does not exist.
      fs = cluster.getFileSystem();
      localfs = FileSystem.getLocal(conf);

      assertTrue(file1 + " still exists inspite of deletOnExit set.", !fs.exists(file1));
      assertTrue(file2 + " still exists inspite of deletOnExit set.", !fs.exists(file2));
      assertTrue(file3 + " still exists inspite of deletOnExit set.", !localfs.exists(file3));
      System.out.println("DeleteOnExit successful.");

    } finally {
      IOUtils.closeStream(fs);
      IOUtils.closeStream(localfs);
      cluster.shutdown();
    }
  }
  /** Test the updation of NeededReplications for the Appended Block */
  @Test(timeout = 60000)
  public void testUpdateNeededReplicationsForAppendedFile() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    DistributedFileSystem fileSystem = null;
    try {
      // create a file.
      fileSystem = cluster.getFileSystem();
      Path f = new Path("/testAppend");
      FSDataOutputStream create = fileSystem.create(f, (short) 2);
      create.write("/testAppend".getBytes());
      create.close();

      // Append to the file.
      FSDataOutputStream append = fileSystem.append(f);
      append.write("/testAppend".getBytes());
      append.close();

      // Start a new datanode
      cluster.startDataNodes(conf, 1, true, null, null);

      // Check for replications
      DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
    } finally {
      if (null != fileSystem) {
        fileSystem.close();
      }
      cluster.shutdown();
    }
  }
  // test closing file system before all file handles are closed.
  public void testFsClose() throws Exception {
    System.out.println("test file system close start");
    final int DATANODE_NUM = 3;

    Configuration conf = new Configuration();

    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
    DistributedFileSystem dfs = null;
    try {
      cluster.waitActive();
      dfs = (DistributedFileSystem) cluster.getFileSystem();

      // create a new file.
      final String f = DIR + "foofs";
      final Path fpath = new Path(f);
      FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
      out.write("something".getBytes());

      // close file system without closing file
      dfs.close();
    } finally {
      System.out.println("testFsClose successful");
      cluster.shutdown();
    }
  }
Beispiel #4
0
 @Test
 public void testNonDefaultFS() throws IOException {
   FileSystem fs = cluster.getFileSystem();
   Configuration conf = fs.getConf();
   conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
   TestTrash.trashNonDefaultFS(conf);
 }
Beispiel #5
0
  public void testLease() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
    try {
      FileSystem fs = cluster.getFileSystem();
      assertTrue(fs.mkdirs(dir));

      Path a = new Path(dir, "a");
      Path b = new Path(dir, "b");

      DataOutputStream a_out = fs.create(a);
      a_out.writeBytes("something");

      assertTrue(hasLease(cluster, a));
      assertTrue(!hasLease(cluster, b));

      DataOutputStream b_out = fs.create(b);
      b_out.writeBytes("something");

      assertTrue(hasLease(cluster, a));
      assertTrue(hasLease(cluster, b));

      a_out.close();
      b_out.close();

      assertTrue(!hasLease(cluster, a));
      assertTrue(!hasLease(cluster, b));

      fs.delete(dir, true);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
  /** Test that all open files are closed when client dies abnormally. */
  public void testDFSClientDeath() throws IOException {
    Configuration conf = new Configuration();
    System.out.println("Testing adbornal client death.");
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = cluster.getFileSystem();
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    DFSClient dfsclient = dfs.dfs;
    try {

      // create a new file in home directory. Do not close it.
      //
      Path file1 = new Path("/clienttest.dat");
      FSDataOutputStream stm = createFile(fs, file1, 1);
      System.out.println("Created file clienttest.dat");

      // write to file
      writeFile(stm);

      // close the dfsclient before closing the output stream.
      // This should close all existing file.
      dfsclient.close();

      // reopen file system and verify that file exists.
      assertTrue(
          file1 + " does not exist.",
          AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1));
    } finally {
      cluster.shutdown();
    }
  }
 /** check if DFS can handle corrupted blocks properly */
 @Test
 public void testFileCorruption() throws Exception {
   MiniDFSCluster cluster = null;
   DFSTestUtil util =
       new DFSTestUtil.Builder().setName("TestFileCorruption").setNumFiles(20).build();
   try {
     Configuration conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     FileSystem fs = cluster.getFileSystem();
     util.createFiles(fs, "/srcdat");
     // Now deliberately remove the blocks
     File storageDir = cluster.getInstanceStorageDir(2, 0);
     String bpid = cluster.getNamesystem().getBlockPoolId();
     File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
     assertTrue("data directory does not exist", data_dir.exists());
     File[] blocks = data_dir.listFiles();
     assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
     for (int idx = 0; idx < blocks.length; idx++) {
       if (!blocks[idx].getName().startsWith("blk_")) {
         continue;
       }
       System.out.println("Deliberately removing file " + blocks[idx].getName());
       assertTrue("Cannot remove file.", blocks[idx].delete());
     }
     assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat"));
     util.cleanup(fs, "/srcdat");
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
 private void init(Configuration conf) throws IOException {
   if (cluster != null) {
     cluster.shutdown();
   }
   cluster = new MiniDFSCluster.Builder(conf).build();
   cluster.waitClusterUp();
   fileSystem = cluster.getFileSystem();
 }
 @Override
 protected void setUp() throws Exception {
   Configuration conf = new HdfsConfiguration();
   conf.set(
       CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, FileSystemContractBaseTest.TEST_UMASK);
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
   fs = cluster.getFileSystem();
   defaultWorkingDirectory = "/user/" + UserGroupInformation.getCurrentUser().getShortUserName();
 }
  /**
   * Test that an append with no locations fails with an exception showing insufficient locations.
   */
  @Test(timeout = 60000)
  public void testAppendInsufficientLocations() throws Exception {
    Configuration conf = new Configuration();

    // lower heartbeat interval for fast recognition of DN
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    DistributedFileSystem fileSystem = null;
    try {
      // create a file with replication 3
      fileSystem = cluster.getFileSystem();
      Path f = new Path("/testAppend");
      FSDataOutputStream create = fileSystem.create(f, (short) 2);
      create.write("/testAppend".getBytes());
      create.close();

      // Check for replications
      DFSTestUtil.waitReplication(fileSystem, f, (short) 2);

      // Shut down all DNs that have the last block location for the file
      LocatedBlocks lbs =
          fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
      List<DataNode> dnsOfCluster = cluster.getDataNodes();
      DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
      for (DataNode dn : dnsOfCluster) {
        for (DatanodeInfo loc : dnsWithLocations) {
          if (dn.getDatanodeId().equals(loc)) {
            dn.shutdown();
            DFSTestUtil.waitForDatanodeDeath(dn);
          }
        }
      }

      // Wait till 0 replication is recognized
      DFSTestUtil.waitReplication(fileSystem, f, (short) 0);

      // Append to the file, at this state there are 3 live DNs but none of them
      // have the block.
      try {
        fileSystem.append(f);
        fail("Append should fail because insufficient locations");
      } catch (IOException e) {
        LOG.info("Expected exception: ", e);
      }
      FSDirectory dir = cluster.getNamesystem().getFSDirectory();
      final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
      assertTrue("File should remain closed", !inode.isUnderConstruction());
    } finally {
      if (null != fileSystem) {
        fileSystem.close();
      }
      cluster.shutdown();
    }
  }
  /**
   * Create a file of the given size filled with random data.
   *
   * @return File data.
   */
  public byte[] writeFile(Path filepath, int sizeKB) throws IOException {
    FileSystem fs = cluster.getFileSystem();

    // Write a file with the specified amount of data
    DataOutputStream os = fs.create(filepath);
    byte data[] = new byte[1024 * sizeKB];
    new Random().nextBytes(data);
    os.write(data);
    os.close();
    return data;
  }
  /**
   * Test that appends to files at random offsets.
   *
   * @throws IOException an exception might be thrown
   */
  public void testComplexAppend() throws IOException {
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
    conf.setInt("dfs.heartbeat.interval", 2);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000);
    conf.setInt("dfs.datanode.socket.write.timeout", 30000);
    conf.setInt("dfs.datanode.handler.count", 50);
    conf.setBoolean("dfs.support.append", true);

    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    try {
      // create a bunch of test files with random replication factors.
      // Insert them into a linked list.
      //
      for (int i = 0; i < numberOfFiles; i++) {
        short replication = (short) (AppendTestUtil.nextInt(numDatanodes) + 1);
        Path testFile = new Path("/" + i + ".dat");
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, testFile, replication);
        stm.close();
        testFiles.add(testFile);
      }

      // Create threads and make them run workload concurrently.
      workload = new Workload[numThreads];
      for (int i = 0; i < numThreads; i++) {
        workload[i] = new Workload(cluster, i);
        workload[i].start();
      }

      // wait for all transactions to get over
      for (int i = 0; i < numThreads; i++) {
        try {
          System.out.println("Waiting for thread " + i + " to complete...");
          workload[i].join();
          System.out.println("Waiting for thread " + i + " complete.");
        } catch (InterruptedException e) {
          i--; // retry
        }
      }
    } finally {
      fs.close();
      cluster.shutdown();
    }

    // If any of the worker thread failed in their job, indicate that
    // this test failed.
    //
    assertTrue("testComplexAppend Worker encountered exceptions.", globalStatus);
  }
  /**
   * Test a simple flush on a simple HDFS file.
   *
   * @throws IOException an exception might be thrown
   */
  @Test
  public void testSimpleFlush() throws IOException {
    Configuration conf = new HdfsConfiguration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    try {

      // create a new file.
      Path file1 = new Path("/simpleFlush.dat");
      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
      System.out.println("Created file simpleFlush.dat");

      // write to file
      int mid = AppendTestUtil.FILE_SIZE / 2;
      stm.write(fileContents, 0, mid);
      stm.hflush();
      System.out.println("Wrote and Flushed first part of file.");

      // write the remainder of the file
      stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid);
      System.out.println("Written second part of file");
      stm.hflush();
      stm.hflush();
      System.out.println("Wrote and Flushed second part of file.");

      // verify that full blocks are sane
      checkFile(fs, file1, 1);

      stm.close();
      System.out.println("Closed file.");

      // verify that entire file is good
      AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");

    } catch (IOException e) {
      System.out.println("Exception :" + e);
      throw e;
    } catch (Throwable e) {
      System.out.println("Throwable :" + e);
      e.printStackTrace();
      throw new IOException("Throwable : " + e);
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
  @Before
  public void startUpCluster() throws IOException {
    if (System.getProperty("test.build.data") == null) { // to allow test to be
      // run outside of Ant
      System.setProperty("test.build.data", "build/test/data");
    }
    // disable block scanner
    conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    cluster.waitActive();
    dfs = (DistributedFileSystem) cluster.getFileSystem();
    buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
  }
  @Test
  public void testAppend() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final short REPLICATION = (short) 3;

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

    try {
      final DistributedFileSystem fs = cluster.getFileSystem();
      final Path f = new Path(DIR, "testAppend");

      {
        LOG.info("create an empty file " + f);
        fs.create(f, REPLICATION).close();
        final FileStatus status = fs.getFileStatus(f);
        Assert.assertEquals(REPLICATION, status.getReplication());
        Assert.assertEquals(0L, status.getLen());
      }

      final byte[] bytes = new byte[1000];
      {
        LOG.info("append " + bytes.length + " bytes to " + f);
        final FSDataOutputStream out = fs.append(f);
        out.write(bytes);
        out.close();

        final FileStatus status = fs.getFileStatus(f);
        Assert.assertEquals(REPLICATION, status.getReplication());
        Assert.assertEquals(bytes.length, status.getLen());
      }

      {
        LOG.info("append another " + bytes.length + " bytes to " + f);
        try {
          final FSDataOutputStream out = fs.append(f);
          out.write(bytes);
          out.close();

          Assert.fail();
        } catch (IOException ioe) {
          LOG.info("This exception is expected", ioe);
        }
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
  @BeforeClass
  public static void setUp() throws IOException {
    ((Log4JLogger) HftpFileSystem.LOG).getLogger().setLevel(Level.ALL);

    final long seed = RAN.nextLong();
    System.out.println("seed=" + seed);
    RAN.setSeed(seed);

    config = new Configuration();
    config.set("slave.host.name", "localhost");

    cluster = new MiniDFSCluster(config, 2, true, null);
    hdfs = cluster.getFileSystem();
    final String hftpuri = "hftp://" + config.get("dfs.http.address");
    hftpFs = (HftpFileSystem) new Path(hftpuri).getFileSystem(config);
  }
Beispiel #17
0
 /** Test if the seek bug exists in FSDataInputStream in DFS. */
 @Test
 public void testSeekBugDFS() throws IOException {
   Configuration conf = new HdfsConfiguration();
   MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   FileSystem fileSys = cluster.getFileSystem();
   try {
     Path file1 = new Path("seektest.dat");
     writeFile(fileSys, file1);
     seekReadFile(fileSys, file1);
     smallReadSeek(fileSys, file1);
     cleanupFile(fileSys, file1);
   } finally {
     fileSys.close();
     cluster.shutdown();
   }
 }
 /**
  * FileNotFoundException is expected for appending to a non-exisiting file
  *
  * @throws FileNotFoundException as the result
  */
 @Test(expected = FileNotFoundException.class)
 public void testFileNotFound() throws IOException {
   Configuration conf = new HdfsConfiguration();
   if (simulatedStorage) {
     conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
   }
   MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   FileSystem fs = cluster.getFileSystem();
   try {
     Path file1 = new Path("/nonexistingfile.dat");
     fs.append(file1);
   } finally {
     fs.close();
     cluster.shutdown();
   }
 }
 @Test(timeout = 100000)
 public void testIsClusterUpAfterShutdown() throws Throwable {
   Configuration conf = new HdfsConfiguration();
   File testDataCluster4 = new File(testDataPath, CLUSTER_4);
   String c4Path = testDataCluster4.getAbsolutePath();
   conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path);
   MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
   try {
     DistributedFileSystem dfs = (DistributedFileSystem) cluster4.getFileSystem();
     dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
     cluster4.shutdown();
   } finally {
     while (cluster4.isClusterUp()) {
       Thread.sleep(1000);
     }
   }
 }
  /**
   * This test creates three empty files and lets their leases expire. This triggers release of the
   * leases. The empty files are supposed to be closed by that without causing
   * ConcurrentModificationException.
   */
  public void testLeaseExpireEmptyFiles() throws Exception {
    final Thread.UncaughtExceptionHandler oldUEH = Thread.getDefaultUncaughtExceptionHandler();
    Thread.setDefaultUncaughtExceptionHandler(
        new Thread.UncaughtExceptionHandler() {
          public void uncaughtException(Thread t, Throwable e) {
            if (e instanceof ConcurrentModificationException) {
              FSNamesystem.LOG.error("t=" + t, e);
              isConcurrentModificationException = true;
            }
          }
        });

    System.out.println("testLeaseExpireEmptyFiles start");
    final long leasePeriod = 1000;
    final int DATANODE_NUM = 3;

    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt("dfs.heartbeat.interval", 1);

    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
    try {
      cluster.waitActive();
      DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();

      // create a new file.
      TestFileCreation.createFile(dfs, new Path("/foo"), DATANODE_NUM);
      TestFileCreation.createFile(dfs, new Path("/foo2"), DATANODE_NUM);
      TestFileCreation.createFile(dfs, new Path("/foo3"), DATANODE_NUM);

      // set the soft and hard limit to be 1 second so that the
      // namenode triggers lease recovery
      cluster.setLeasePeriod(leasePeriod, leasePeriod);
      // wait for the lease to expire
      try {
        Thread.sleep(5 * leasePeriod);
      } catch (InterruptedException e) {
      }

      assertFalse(isConcurrentModificationException);
    } finally {
      Thread.setDefaultUncaughtExceptionHandler(oldUEH);
      cluster.shutdown();
    }
  }
  @Before
  public void initJunitModeTest() throws Exception {
    LOG.info("initJunitModeTest");

    conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // 100K
    // blocksize

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();

    mfs = cluster.getFileSystem();
    mfc = FileContext.getFileContext();

    Path rootdir = new Path(ROOT_DIR);
    mfs.mkdirs(rootdir);
  }
  // test closing file after cluster is shutdown
  public void testFsCloseAfterClusterShutdown() throws IOException {
    System.out.println("test testFsCloseAfterClusterShutdown start");
    final int DATANODE_NUM = 3;

    Configuration conf = new Configuration();
    conf.setInt("dfs.replication.min", 3);
    conf.setBoolean("ipc.client.ping", false); // hdfs timeout is default 60 seconds
    conf.setInt("ipc.ping.interval", 10000); // hdfs timeout is now 10 second

    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
    DistributedFileSystem dfs = null;
    try {
      cluster.waitActive();
      dfs = (DistributedFileSystem) cluster.getFileSystem();

      // create a new file.
      final String f = DIR + "dhrubashutdown";
      final Path fpath = new Path(f);
      FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
      out.write("something_dhruba".getBytes());
      out.sync(); // ensure that block is allocated

      // shutdown last datanode in pipeline.
      cluster.stopDataNode(2);

      // close file. Since we have set the minReplication to 3 but have killed one
      // of the three datanodes, the close call will loop until the hdfsTimeout is
      // encountered.
      boolean hasException = false;
      try {
        out.close();
        System.out.println("testFsCloseAfterClusterShutdown: Error here");
      } catch (IOException e) {
        hasException = true;
      }
      assertTrue("Failed to close file after cluster shutdown", hasException);
    } finally {
      System.out.println("testFsCloseAfterClusterShutdown successful");
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
  /** Test that we can write to and read from large blocks */
  public void runTest(final long blockSize) throws IOException {

    // write a file that is slightly larger than 1 block
    final long fileSize = blockSize + 1L;

    Configuration conf = new Configuration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = cluster.getFileSystem();
    try {

      // create a new file in test data directory
      Path file1 =
          new Path(System.getProperty("test.build.data") + "/" + Long.toString(blockSize) + ".dat");
      FSDataOutputStream stm = createFile(fs, file1, 1, blockSize);
      System.out.println(
          "File " + file1 + " created with file size " + fileSize + " blocksize " + blockSize);

      // verify that file exists in FS namespace
      assertTrue(file1 + " should be a file and not a dir", !fs.getFileStatus(file1).isDir());

      // write to file
      writeFile(stm, fileSize);
      System.out.println("File " + file1 + " written to.");

      // close file
      stm.close();
      System.out.println("File " + file1 + " closed.");

      // Make sure a client can read it
      checkFullFile(fs, file1, fileSize);

      // verify that file size has changed
      long len = fs.getFileStatus(file1).getLen();
      assertTrue(
          file1 + " should be of size " + fileSize + " but found to be of size " + len,
          len == fileSize);

    } finally {
      cluster.shutdown();
    }
  }
  /**
   * Test that file data can be flushed.
   *
   * @throws IOException an exception might be thrown
   */
  @Test
  public void testComplexFlush() throws IOException {
    Configuration conf = new HdfsConfiguration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    try {

      // create a new file.
      Path file1 = new Path("/complexFlush.dat");
      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
      System.out.println("Created file complexFlush.dat");

      int start = 0;
      for (start = 0; (start + 29) < AppendTestUtil.FILE_SIZE; ) {
        stm.write(fileContents, start, 29);
        stm.hflush();
        start += 29;
      }
      stm.write(fileContents, start, AppendTestUtil.FILE_SIZE - start);

      // verify that full blocks are sane
      checkFile(fs, file1, 1);
      stm.close();

      // verify that entire file is good
      AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
    } catch (IOException e) {
      System.out.println("Exception :" + e);
      throw e;
    } catch (Throwable e) {
      System.out.println("Throwable :" + e);
      e.printStackTrace();
      throw new IOException("Throwable : " + e);
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
  public void testAbandonBlock() throws IOException {
    MiniDFSCluster cluster = new MiniDFSCluster(CONF, 2, true, null);
    FileSystem fs = cluster.getFileSystem();

    String src = FILE_NAME_PREFIX + "foo";
    FSDataOutputStream fout = null;
    try {
      // start writing a a file but not close it
      fout = fs.create(new Path(src), true, 4096, (short) 1, 512L);
      for (int i = 0; i < 1024; i++) {
        fout.write(123);
      }
      fout.sync();

      // try reading the block by someone
      final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF);
      LocatedBlocks blocks = dfsclient.namenode.getBlockLocations(src, 0, 1);
      LocatedBlock b = blocks.get(0);
      try {
        dfsclient.namenode.abandonBlock(b.getBlock(), src, "someone");
        // previous line should throw an exception.
        assertTrue(false);
      } catch (IOException ioe) {
        LOG.info("GREAT! " + StringUtils.stringifyException(ioe));
      }
    } finally {
      try {
        fout.close();
      } catch (Exception e) {
      }
      try {
        fs.close();
      } catch (Exception e) {
      }
      try {
        cluster.shutdown();
      } catch (Exception e) {
      }
    }
  }
  /** Make sure that the quota is decremented correctly when a block is abandoned */
  public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
    MiniDFSCluster cluster = new MiniDFSCluster(CONF, 2, true, null);
    FileSystem fs = cluster.getFileSystem();
    DistributedFileSystem dfs = (DistributedFileSystem) fs;

    try {
      // Setting diskspace quota to 3MB
      dfs.setQuota(new Path("/"), FSConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);

      // Start writing a file with 2 replicas to ensure each datanode has one.
      // Block Size is 1MB.
      String src = FILE_NAME_PREFIX + "test_quota1";
      FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short) 2, 1024 * 1024);
      for (int i = 0; i < 1024; i++) {
        fout.writeByte(123);
      }

      // Shutdown one datanode, causing the block abandonment.
      cluster.getDataNodes().get(0).shutdown();

      // Close the file, new block will be allocated with 2MB pending size.
      try {
        fout.close();
      } catch (QuotaExceededException e) {
        fail("Unexpected quota exception when closing fout");
      }
    } finally {
      try {
        fs.close();
      } catch (Exception e) {
      }
      try {
        cluster.shutdown();
      } catch (Exception e) {
      }
    }
  }
  @Test
  public void testBestEffort() throws Exception {
    final Configuration conf = new HdfsConfiguration();

    // always replace a datanode but do not throw exception
    ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

    try {
      final DistributedFileSystem fs = cluster.getFileSystem();
      final Path f = new Path(DIR, "testIgnoreReplaceFailure");

      final byte[] bytes = new byte[1000];
      {
        LOG.info("write " + bytes.length + " bytes to " + f);
        final FSDataOutputStream out = fs.create(f, REPLICATION);
        out.write(bytes);
        out.close();

        final FileStatus status = fs.getFileStatus(f);
        Assert.assertEquals(REPLICATION, status.getReplication());
        Assert.assertEquals(bytes.length, status.getLen());
      }

      {
        LOG.info("append another " + bytes.length + " bytes to " + f);
        final FSDataOutputStream out = fs.append(f);
        out.write(bytes);
        out.close();
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
  public void testBlocksScheduledCounter() throws IOException {

    MiniDFSCluster cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    // open a file an write a few bytes:
    FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
    for (int i = 0; i < 1024; i++) {
      out.write(i);
    }
    // flush to make sure a block is allocated.
    ((DFSOutputStream) (out.getWrappedStream())).sync();

    ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
    cluster.getNameNode().namesystem.DFSNodesStatus(dnList, dnList);
    DatanodeDescriptor dn = dnList.get(0);

    assertEquals(1, dn.getBlocksScheduled());

    // close the file and the counter should go to zero.
    out.close();
    assertEquals(0, dn.getBlocksScheduled());
  }
  /** Test creating two files at the same time. */
  public void testConcurrentFileCreation() throws IOException {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);

    try {
      FileSystem fs = cluster.getFileSystem();

      Path[] p = {new Path("/foo"), new Path("/bar")};

      // write 2 files at the same time
      FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
      int i = 0;
      for (; i < 100; i++) {
        out[0].write(i);
        out[1].write(i);
      }
      out[0].close();
      for (; i < 200; i++) {
        out[1].write(i);
      }
      out[1].close();

      // verify
      FSDataInputStream[] in = {fs.open(p[0]), fs.open(p[1])};
      for (i = 0; i < 100; i++) {
        assertEquals(i, in[0].read());
      }
      for (i = 0; i < 200; i++) {
        assertEquals(i, in[1].read());
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
Beispiel #30
0
  /**
   * Test case that stops a writer after finalizing a block but before calling completeFile,
   * recovers a file from another writer, starts writing from that writer, and then has the old
   * lease holder call completeFile
   */
  @Test(timeout = 60000)
  public void testCompleteOtherLeaseHoldersFile() throws Throwable {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

    try {
      cluster.waitActive();
      NameNode preSpyNN = cluster.getNameNode();
      NameNode spyNN = spy(preSpyNN);

      // Delay completeFile
      DelayAnswer delayer = new DelayAnswer();
      doAnswer(delayer).when(spyNN).complete(anyString(), anyString(), (Block) anyObject());

      DFSClient client = new DFSClient(null, spyNN, conf, null);
      file1 = new Path("/testCompleteOtherLease");
      final OutputStream stm = client.create("/testCompleteOtherLease", true);

      // write 1/2 block
      AppendTestUtil.write(stm, 0, 4096);
      final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
      Thread t =
          new Thread() {
            public void run() {
              try {
                stm.close();
              } catch (Throwable t) {
                err.set(t);
              }
            }
          };
      t.start();
      LOG.info("Waiting for close to get to latch...");
      delayer.waitForCall();

      // At this point, the block is finalized on the DNs, but the file
      // has not been completed in the NN.
      // Lose the leases
      LOG.info("Killing lease checker");
      client.leasechecker.interruptAndJoin();

      FileSystem fs1 = cluster.getFileSystem();
      FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());

      LOG.info("Recovering file");
      recoverFile(fs2);

      LOG.info("Opening file for append from new fs");
      FSDataOutputStream appenderStream = fs2.append(file1);

      LOG.info("Writing some data from new appender");
      AppendTestUtil.write(appenderStream, 0, 4096);

      LOG.info("Telling old close to proceed.");
      delayer.proceed();
      LOG.info("Waiting for close to finish.");
      t.join();
      LOG.info("Close finished.");

      // We expect that close will get a "Lease mismatch"
      // error.
      Throwable thrownByClose = err.get();
      assertNotNull(thrownByClose);
      assertTrue(thrownByClose instanceof IOException);
      if (!thrownByClose.getMessage().contains("Lease mismatch")) throw thrownByClose;

      // The appender should be able to close properly
      appenderStream.close();
    } finally {
      cluster.shutdown();
    }
  }