Example #1
0
  /** This test attempts to finalize the NameNode and DataNode. */
  public void testFinalize() throws Exception {
    UpgradeUtilities.initialize();

    for (int numDirs = 1; numDirs <= 2; numDirs++) {
      /* This test requires that "current" directory not change after
       * the upgrade. Actually it is ok for those contents to change.
       * For now disabling block verification so that the contents are
       * not changed.
       */
      conf = new Configuration();
      conf.setInt("dfs.datanode.scan.period.hours", -1);
      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");

      log("Finalize with existing previous dir", numDirs);
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
      cluster = new MiniDFSCluster(conf, 1, StartupOption.REGULAR);
      cluster.finalizeCluster(conf);
      checkResult(nameNodeDirs, dataNodeDirs);

      log("Finalize without existing previous dir", numDirs);
      cluster.finalizeCluster(conf);
      checkResult(nameNodeDirs, dataNodeDirs);

      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
    } // end numDir loop
  }
  /** Test the updation of NeededReplications for the Appended Block */
  @Test(timeout = 60000)
  public void testUpdateNeededReplicationsForAppendedFile() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    DistributedFileSystem fileSystem = null;
    try {
      // create a file.
      fileSystem = cluster.getFileSystem();
      Path f = new Path("/testAppend");
      FSDataOutputStream create = fileSystem.create(f, (short) 2);
      create.write("/testAppend".getBytes());
      create.close();

      // Append to the file.
      FSDataOutputStream append = fileSystem.append(f);
      append.write("/testAppend".getBytes());
      append.close();

      // Start a new datanode
      cluster.startDataNodes(conf, 1, true, null, null);

      // Check for replications
      DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
    } finally {
      if (null != fileSystem) {
        fileSystem.close();
      }
      cluster.shutdown();
    }
  }
  /** Test that all open files are closed when client dies abnormally. */
  public void testDFSClientDeath() throws IOException {
    Configuration conf = new Configuration();
    System.out.println("Testing adbornal client death.");
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = cluster.getFileSystem();
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    DFSClient dfsclient = dfs.dfs;
    try {

      // create a new file in home directory. Do not close it.
      //
      Path file1 = new Path("/clienttest.dat");
      FSDataOutputStream stm = createFile(fs, file1, 1);
      System.out.println("Created file clienttest.dat");

      // write to file
      writeFile(stm);

      // close the dfsclient before closing the output stream.
      // This should close all existing file.
      dfsclient.close();

      // reopen file system and verify that file exists.
      assertTrue(
          file1 + " does not exist.",
          AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1));
    } finally {
      cluster.shutdown();
    }
  }
  // test closing file system before all file handles are closed.
  public void testFsClose() throws Exception {
    System.out.println("test file system close start");
    final int DATANODE_NUM = 3;

    Configuration conf = new Configuration();

    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
    DistributedFileSystem dfs = null;
    try {
      cluster.waitActive();
      dfs = (DistributedFileSystem) cluster.getFileSystem();

      // create a new file.
      final String f = DIR + "foofs";
      final Path fpath = new Path(f);
      FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
      out.write("something".getBytes());

      // close file system without closing file
      dfs.close();
    } finally {
      System.out.println("testFsClose successful");
      cluster.shutdown();
    }
  }
Example #5
0
  public void testLease() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
    try {
      FileSystem fs = cluster.getFileSystem();
      assertTrue(fs.mkdirs(dir));

      Path a = new Path(dir, "a");
      Path b = new Path(dir, "b");

      DataOutputStream a_out = fs.create(a);
      a_out.writeBytes("something");

      assertTrue(hasLease(cluster, a));
      assertTrue(!hasLease(cluster, b));

      DataOutputStream b_out = fs.create(b);
      b_out.writeBytes("something");

      assertTrue(hasLease(cluster, a));
      assertTrue(hasLease(cluster, b));

      a_out.close();
      b_out.close();

      assertTrue(!hasLease(cluster, a));
      assertTrue(!hasLease(cluster, b));

      fs.delete(dir, true);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
Example #6
0
 /*
  * This test attempts to upgrade the datanode from federation
  * version -35 to upper version
  * This test is for non-federation cluster with single namenode
  */
 public void testNonFederationClusterUpgradeAfterFederationVersion() throws Exception {
   File[] baseDirs;
   UpgradeUtilities.initialize();
   for (int numDirs = 1; numDirs <= 2; numDirs++) {
     conf = new Configuration();
     conf.setInt("dfs.datanode.scan.period.hours", -1);
     conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
     String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
     String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
     log("DataNode upgrade with federation layout version in current", numDirs);
     UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
     try {
       cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createVersionFile(
           DATA_NODE,
           baseDirs,
           new StorageInfo(
               FSConstants.FEDERATION_VERSION,
               UpgradeUtilities.getCurrentNamespaceID(cluster),
               UpgradeUtilities.getCurrentFsscTime(cluster)),
           cluster.getNameNode().getNamespaceID());
       cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
       checkResult(DATA_NODE, dataNodeDirs, 0, false);
     } finally {
       if (cluster != null) cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
     }
   }
 }
 private void init(Configuration conf) throws IOException {
   if (cluster != null) {
     cluster.shutdown();
   }
   cluster = new MiniDFSCluster.Builder(conf).build();
   cluster.waitClusterUp();
   fileSystem = cluster.getFileSystem();
 }
  /**
   * Test that an append with no locations fails with an exception showing insufficient locations.
   */
  @Test(timeout = 60000)
  public void testAppendInsufficientLocations() throws Exception {
    Configuration conf = new Configuration();

    // lower heartbeat interval for fast recognition of DN
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    DistributedFileSystem fileSystem = null;
    try {
      // create a file with replication 3
      fileSystem = cluster.getFileSystem();
      Path f = new Path("/testAppend");
      FSDataOutputStream create = fileSystem.create(f, (short) 2);
      create.write("/testAppend".getBytes());
      create.close();

      // Check for replications
      DFSTestUtil.waitReplication(fileSystem, f, (short) 2);

      // Shut down all DNs that have the last block location for the file
      LocatedBlocks lbs =
          fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
      List<DataNode> dnsOfCluster = cluster.getDataNodes();
      DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
      for (DataNode dn : dnsOfCluster) {
        for (DatanodeInfo loc : dnsWithLocations) {
          if (dn.getDatanodeId().equals(loc)) {
            dn.shutdown();
            DFSTestUtil.waitForDatanodeDeath(dn);
          }
        }
      }

      // Wait till 0 replication is recognized
      DFSTestUtil.waitReplication(fileSystem, f, (short) 0);

      // Append to the file, at this state there are 3 live DNs but none of them
      // have the block.
      try {
        fileSystem.append(f);
        fail("Append should fail because insufficient locations");
      } catch (IOException e) {
        LOG.info("Expected exception: ", e);
      }
      FSDirectory dir = cluster.getNamesystem().getFSDirectory();
      final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
      assertTrue("File should remain closed", !inode.isUnderConstruction());
    } finally {
      if (null != fileSystem) {
        fileSystem.close();
      }
      cluster.shutdown();
    }
  }
Example #9
0
 /**
  * Attempts to start a DataNode with the given operation. Starting the DataNode should throw an
  * exception.
  */
 void startDataNodeShouldFail(StartupOption operation) {
   try {
     cluster.startDataNodes(conf, 1, false, operation, null); // should fail
     throw new AssertionError("DataNode should have failed to start");
   } catch (Exception expected) {
     // expected
     assertFalse(cluster.isDataNodeUp());
   }
 }
 /*
  * Returns the index of the first datanode which has a copy
  * of the given block, or -1 if no such datanode exists.
  */
 public static int firstDnWithBlock(MiniDFSCluster cluster, ExtendedBlock b) throws IOException {
   int numDatanodes = cluster.getDataNodes().size();
   for (int i = 0; i < numDatanodes; i++) {
     String blockContent = cluster.readBlockOnDataNode(i, b);
     if (blockContent != null) {
       return i;
     }
   }
   return -1;
 }
  /** Test deleteOnExit */
  public void testDeleteOnExit() throws IOException {
    Configuration conf = new Configuration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = cluster.getFileSystem();
    FileSystem localfs = FileSystem.getLocal(conf);

    try {

      // Creates files in HDFS and local file system.
      //
      Path file1 = new Path("filestatus.dat");
      Path file2 = new Path("filestatus2.dat");
      Path file3 = new Path("filestatus3.dat");
      FSDataOutputStream stm1 = createFile(fs, file1, 1);
      FSDataOutputStream stm2 = createFile(fs, file2, 1);
      FSDataOutputStream stm3 = createFile(localfs, file3, 1);
      System.out.println("DeleteOnExit: Created files.");

      // write to files and close. Purposely, do not close file2.
      writeFile(stm1);
      writeFile(stm3);
      stm1.close();
      stm2.close();
      stm3.close();

      // set delete on exit flag on files.
      fs.deleteOnExit(file1);
      fs.deleteOnExit(file2);
      localfs.deleteOnExit(file3);

      // close the file system. This should make the above files
      // disappear.
      fs.close();
      localfs.close();
      fs = null;
      localfs = null;

      // reopen file system and verify that file does not exist.
      fs = cluster.getFileSystem();
      localfs = FileSystem.getLocal(conf);

      assertTrue(file1 + " still exists inspite of deletOnExit set.", !fs.exists(file1));
      assertTrue(file2 + " still exists inspite of deletOnExit set.", !fs.exists(file2));
      assertTrue(file3 + " still exists inspite of deletOnExit set.", !localfs.exists(file3));
      System.out.println("DeleteOnExit successful.");

    } finally {
      IOUtils.closeStream(fs);
      IOUtils.closeStream(localfs);
      cluster.shutdown();
    }
  }
Example #12
0
  /**
   * Test that appends to files at random offsets.
   *
   * @throws IOException an exception might be thrown
   */
  public void testComplexAppend() throws IOException {
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
    conf.setInt("dfs.heartbeat.interval", 2);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000);
    conf.setInt("dfs.datanode.socket.write.timeout", 30000);
    conf.setInt("dfs.datanode.handler.count", 50);
    conf.setBoolean("dfs.support.append", true);

    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    try {
      // create a bunch of test files with random replication factors.
      // Insert them into a linked list.
      //
      for (int i = 0; i < numberOfFiles; i++) {
        short replication = (short) (AppendTestUtil.nextInt(numDatanodes) + 1);
        Path testFile = new Path("/" + i + ".dat");
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, testFile, replication);
        stm.close();
        testFiles.add(testFile);
      }

      // Create threads and make them run workload concurrently.
      workload = new Workload[numThreads];
      for (int i = 0; i < numThreads; i++) {
        workload[i] = new Workload(cluster, i);
        workload[i].start();
      }

      // wait for all transactions to get over
      for (int i = 0; i < numThreads; i++) {
        try {
          System.out.println("Waiting for thread " + i + " to complete...");
          workload[i].join();
          System.out.println("Waiting for thread " + i + " complete.");
        } catch (InterruptedException e) {
          i--; // retry
        }
      }
    } finally {
      fs.close();
      cluster.shutdown();
    }

    // If any of the worker thread failed in their job, indicate that
    // this test failed.
    //
    assertTrue("testComplexAppend Worker encountered exceptions.", globalStatus);
  }
  /**
   * TC11: Racing rename
   *
   * @throws IOException an exception might be thrown
   */
  public void testTC11() throws Exception {
    final Path p = new Path("/TC11/foo");
    System.out.println("p=" + p);

    // a. Create file and write one block of data. Close file.
    final int len1 = (int) BLOCK_SIZE;
    {
      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
      AppendTestUtil.write(out, 0, len1);
      out.close();
    }

    // b. Reopen file in "append" mode. Append half block of data.
    FSDataOutputStream out = fs.append(p);
    final int len2 = (int) BLOCK_SIZE / 2;
    AppendTestUtil.write(out, len1, len2);
    out.hflush();

    // c. Rename file to file.new.
    final Path pnew = new Path(p + ".new");
    assertTrue(fs.rename(p, pnew));

    // d. Close file handle that was opened in (b).
    try {
      out.close();
      fail("close() should throw an exception");
    } catch (Exception e) {
      AppendTestUtil.LOG.info("GOOD!", e);
    }

    // wait for the lease recovery
    cluster.setLeasePeriod(1000, 1000);
    AppendTestUtil.sleep(5000);

    // check block sizes
    final long len = fs.getFileStatus(pnew).getLen();
    final LocatedBlocks locatedblocks =
        fs.dfs.getNamenode().getBlockLocations(pnew.toString(), 0L, len);
    final int numblock = locatedblocks.locatedBlockCount();
    for (int i = 0; i < numblock; i++) {
      final LocatedBlock lb = locatedblocks.get(i);
      final Block blk = lb.getBlock();
      final long size = lb.getBlockSize();
      if (i < numblock - 1) {
        assertEquals(BLOCK_SIZE, size);
      }
      for (DatanodeInfo datanodeinfo : lb.getLocations()) {
        final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort());
        final Block metainfo = dn.data.getStoredBlock(blk.getBlockId());
        assertEquals(size, metainfo.getNumBytes());
      }
    }
  }
Example #14
0
  @Before
  public void startUpCluster() throws IOException {
    if (System.getProperty("test.build.data") == null) { // to allow test to be
      // run outside of Ant
      System.setProperty("test.build.data", "build/test/data");
    }
    // disable block scanner
    conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    cluster.waitActive();
    dfs = (DistributedFileSystem) cluster.getFileSystem();
    buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
  }
Example #15
0
 /**
  * Verify that the current and previous directories exist. Verify that previous hasn't been
  * modified by comparing the checksum of all it's containing files with their original checksum.
  * It is assumed that the server has recovered and upgraded. nsLevelUpgrade specify if the upgrade
  * is at top level or ns level nsLevelUpgrade=true, we search basedir/current/NS-id/previous
  * =false, we search basedir/previous
  */
 void checkResult(NodeType nodeType, String[] baseDirs, int nnIndex, boolean simulatedPrevious)
     throws IOException {
   switch (nodeType) {
     case NAME_NODE:
       for (int i = 0; i < baseDirs.length; i++) {
         assertTrue(new File(baseDirs[i], "current").isDirectory());
         assertTrue(new File(baseDirs[i], "current/VERSION").isFile());
         assertTrue(new File(baseDirs[i], "current/edits").isFile());
         assertTrue(new File(baseDirs[i], "current/fsimage").isFile());
         assertTrue(new File(baseDirs[i], "current/fstime").isFile());
       }
       break;
     case DATA_NODE:
       for (int i = 0; i < baseDirs.length; i++) {
         assertEquals(
             UpgradeUtilities.checksumContents(nodeType, new File(baseDirs[i], "current")),
             UpgradeUtilities.checksumMasterContents(nodeType));
         File nsBaseDir =
             NameSpaceSliceStorage.getNsRoot(
                 cluster.getNameNode(nnIndex).getNamespaceID(), new File(baseDirs[i], "current"));
         assertEquals(
             UpgradeUtilities.checksumContents(
                 nodeType, new File(nsBaseDir, MiniDFSCluster.FINALIZED_DIR_NAME)),
             UpgradeUtilities.checksumDatanodeNSStorageContents(nnIndex));
       }
       break;
   }
   for (int i = 0; i < baseDirs.length; i++) {
     switch (nodeType) {
       case NAME_NODE:
         assertTrue(new File(baseDirs[i], "previous").isDirectory());
         assertEquals(
             UpgradeUtilities.checksumContents(nodeType, new File(baseDirs[i], "previous")),
             UpgradeUtilities.checksumMasterContents(nodeType));
         break;
       case DATA_NODE:
         File nsBaseDir = null;
         nsBaseDir =
             NameSpaceSliceStorage.getNsRoot(
                 cluster.getNameNode(nnIndex).getNamespaceID(), new File(baseDirs[i], "current"));
         // Top level upgrade should not exist.
         assertFalse(new File(baseDirs[i], "previous").isDirectory() && !simulatedPrevious);
         assertTrue(new File(nsBaseDir, "previous").isDirectory());
         assertEquals(
             UpgradeUtilities.checksumContents(
                 nodeType, new File(nsBaseDir, "previous/finalized")),
             UpgradeUtilities.checksumDatanodeNSStorageContents(nnIndex));
     }
   }
 }
 /*
  * Check if the given block in the given file is corrupt.
  */
 public static boolean allBlockReplicasCorrupt(MiniDFSCluster cluster, Path file, int blockNo)
     throws IOException {
   DFSClient client =
       new DFSClient(
           new InetSocketAddress("localhost", cluster.getNameNodePort()),
           cluster.getConfiguration(0));
   LocatedBlocks blocks;
   try {
     blocks = client.getNamenode().getBlockLocations(file.toString(), 0, Long.MAX_VALUE);
   } finally {
     client.close();
   }
   return blocks.get(blockNo).isCorrupt();
 }
 /**
  * Verify that without system properties the cluster still comes up, provided the configuration is
  * set
  *
  * @throws Throwable on a failure
  */
 @Test
 public void testClusterWithoutSystemProperties() throws Throwable {
   System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
   Configuration conf = new HdfsConfiguration();
   File testDataCluster1 = new File(testDataPath, CLUSTER_1);
   String c1Path = testDataCluster1.getAbsolutePath();
   conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path);
   MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   try {
     Assert.assertEquals(c1Path + "/data", cluster.getDataDirectory());
   } finally {
     cluster.shutdown();
   }
 }
  /**
   * Test a simple flush on a simple HDFS file.
   *
   * @throws IOException an exception might be thrown
   */
  @Test
  public void testSimpleFlush() throws IOException {
    Configuration conf = new HdfsConfiguration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    try {

      // create a new file.
      Path file1 = new Path("/simpleFlush.dat");
      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
      System.out.println("Created file simpleFlush.dat");

      // write to file
      int mid = AppendTestUtil.FILE_SIZE / 2;
      stm.write(fileContents, 0, mid);
      stm.hflush();
      System.out.println("Wrote and Flushed first part of file.");

      // write the remainder of the file
      stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid);
      System.out.println("Written second part of file");
      stm.hflush();
      stm.hflush();
      System.out.println("Wrote and Flushed second part of file.");

      // verify that full blocks are sane
      checkFile(fs, file1, 1);

      stm.close();
      System.out.println("Closed file.");

      // verify that entire file is good
      AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");

    } catch (IOException e) {
      System.out.println("Exception :" + e);
      throw e;
    } catch (Throwable e) {
      System.out.println("Throwable :" + e);
      e.printStackTrace();
      throw new IOException("Throwable : " + e);
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
  @Test
  public void testAppend() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final short REPLICATION = (short) 3;

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

    try {
      final DistributedFileSystem fs = cluster.getFileSystem();
      final Path f = new Path(DIR, "testAppend");

      {
        LOG.info("create an empty file " + f);
        fs.create(f, REPLICATION).close();
        final FileStatus status = fs.getFileStatus(f);
        Assert.assertEquals(REPLICATION, status.getReplication());
        Assert.assertEquals(0L, status.getLen());
      }

      final byte[] bytes = new byte[1000];
      {
        LOG.info("append " + bytes.length + " bytes to " + f);
        final FSDataOutputStream out = fs.append(f);
        out.write(bytes);
        out.close();

        final FileStatus status = fs.getFileStatus(f);
        Assert.assertEquals(REPLICATION, status.getReplication());
        Assert.assertEquals(bytes.length, status.getLen());
      }

      {
        LOG.info("append another " + bytes.length + " bytes to " + f);
        try {
          final FSDataOutputStream out = fs.append(f);
          out.write(bytes);
          out.close();

          Assert.fail();
        } catch (IOException ioe) {
          LOG.info("This exception is expected", ioe);
        }
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
Example #20
0
 /** Test if the seek bug exists in FSDataInputStream in DFS. */
 @Test
 public void testSeekBugDFS() throws IOException {
   Configuration conf = new HdfsConfiguration();
   MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   FileSystem fileSys = cluster.getFileSystem();
   try {
     Path file1 = new Path("seektest.dat");
     writeFile(fileSys, file1);
     seekReadFile(fileSys, file1);
     smallReadSeek(fileSys, file1);
     cleanupFile(fileSys, file1);
   } finally {
     fileSys.close();
     cluster.shutdown();
   }
 }
 /**
  * FileNotFoundException is expected for appending to a non-exisiting file
  *
  * @throws FileNotFoundException as the result
  */
 @Test(expected = FileNotFoundException.class)
 public void testFileNotFound() throws IOException {
   Configuration conf = new HdfsConfiguration();
   if (simulatedStorage) {
     conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
   }
   MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   FileSystem fs = cluster.getFileSystem();
   try {
     Path file1 = new Path("/nonexistingfile.dat");
     fs.append(file1);
   } finally {
     fs.close();
     cluster.shutdown();
   }
 }
  @Override
  protected void tearDown() throws Exception {
    cluster.shutdown();
    cluster = null;

    super.tearDown();
  }
Example #23
0
  /*
   * Recover file.
   * Try and open file in append mode.
   * Doing this, we get a hold of the file that crashed writer
   * was writing to.  Once we have it, close it.  This will
   * allow subsequent reader to see up to last sync.
   * NOTE: This is the same algorithm that HBase uses for file recovery
   * @param fs
   * @throws Exception
   */
  private void recoverFile(final FileSystem fs) throws Exception {
    LOG.info("Recovering File Lease");

    // set the soft limit to be 1 second so that the
    // namenode triggers lease recovery upon append request
    cluster.setLeasePeriod(1000, FSConstants.LEASE_HARDLIMIT_PERIOD);

    // Trying recovery
    int tries = 60;
    boolean recovered = false;
    FSDataOutputStream out = null;
    while (!recovered && tries-- > 0) {
      try {
        out = fs.append(file1);
        LOG.info("Successfully opened for appends");
        recovered = true;
      } catch (IOException e) {
        LOG.info("Failed open for append, waiting on lease recovery");
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ex) {
          // ignore it and try again
        }
      }
    }
    if (out != null) {
      out.close();
    }
    if (!recovered) {
      fail("Recovery should take < 1 min");
    }
    LOG.info("Past out lease recovery");
  }
Example #24
0
 @Test
 public void testNonDefaultFS() throws IOException {
   FileSystem fs = cluster.getFileSystem();
   Configuration conf = fs.getConf();
   conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
   TestTrash.trashNonDefaultFS(conf);
 }
 @After
 public void shutdown() {
   if (cluster != null) {
     cluster.shutdown();
     cluster = null;
   }
 }
  @Test
  public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "1.0.0");
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();

      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();

      long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();

      DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
      doReturn("fake-storage-id").when(mockDnReg).getStorageID();
      doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();

      // Should succeed when software versions are the same and CTimes are the
      // same.
      doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
      rpcServer.registerDatanode(mockDnReg);

      // Should succeed when software versions are the same and CTimes are
      // different.
      doReturn(nnCTime + 1).when(mockStorageInfo).getCTime();
      rpcServer.registerDatanode(mockDnReg);

      // Should fail when software version of DN is different from NN and CTimes
      // are different.
      doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion();
      try {
        rpcServer.registerDatanode(mockDnReg);
        fail(
            "Should not have been able to register DN with different software"
                + " versions and CTimes");
      } catch (IncorrectVersionException ive) {
        GenericTestUtils.assertExceptionContains("does not match CTime of NN", ive);
        LOG.info("Got expected exception", ive);
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
 void waitLeaseRecovery(MiniDFSCluster cluster) {
   cluster.setLeasePeriod(LEASE_PERIOD, LEASE_PERIOD);
   // wait for the lease to expire
   try {
     Thread.sleep(2 * 3000); // 2 heartbeat intervals
   } catch (InterruptedException e) {
   }
 }
  @Before
  public void initJunitModeTest() throws Exception {
    LOG.info("initJunitModeTest");

    conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // 100K
    // blocksize

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();

    mfs = cluster.getFileSystem();
    mfc = FileContext.getFileContext();

    Path rootdir = new Path(ROOT_DIR);
    mfs.mkdirs(rootdir);
  }
 @Override
 protected void tearDown() throws Exception {
   super.tearDown();
   if (cluster != null) {
     cluster.shutdown();
     cluster = null;
   }
 }
 @Test(timeout = 100000)
 public void testIsClusterUpAfterShutdown() throws Throwable {
   Configuration conf = new HdfsConfiguration();
   File testDataCluster4 = new File(testDataPath, CLUSTER_4);
   String c4Path = testDataCluster4.getAbsolutePath();
   conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path);
   MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
   try {
     DistributedFileSystem dfs = (DistributedFileSystem) cluster4.getFileSystem();
     dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
     cluster4.shutdown();
   } finally {
     while (cluster4.isClusterUp()) {
       Thread.sleep(1000);
     }
   }
 }