@After public void shutdown() { if (cluster != null) { cluster.shutdown(); cluster = null; } }
/** check if DFS can handle corrupted blocks properly */ @Test public void testFileCorruption() throws Exception { MiniDFSCluster cluster = null; DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFileCorruption").setNumFiles(20).build(); try { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat"); // Now deliberately remove the blocks File storageDir = cluster.getInstanceStorageDir(2, 0); String bpid = cluster.getNamesystem().getBlockPoolId(); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); assertTrue("data directory does not exist", data_dir.exists()); File[] blocks = data_dir.listFiles(); assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0)); for (int idx = 0; idx < blocks.length; idx++) { if (!blocks[idx].getName().startsWith("blk_")) { continue; } System.out.println("Deliberately removing file " + blocks[idx].getName()); assertTrue("Cannot remove file.", blocks[idx].delete()); } assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat")); util.cleanup(fs, "/srcdat"); } finally { if (cluster != null) { cluster.shutdown(); } } }
@Override protected void tearDown() throws Exception { cluster.shutdown(); cluster = null; super.tearDown(); }
/** Test that all open files are closed when client dies abnormally. */ public void testDFSClientDeath() throws IOException { Configuration conf = new Configuration(); System.out.println("Testing adbornal client death."); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = cluster.getFileSystem(); DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSClient dfsclient = dfs.dfs; try { // create a new file in home directory. Do not close it. // Path file1 = new Path("/clienttest.dat"); FSDataOutputStream stm = createFile(fs, file1, 1); System.out.println("Created file clienttest.dat"); // write to file writeFile(stm); // close the dfsclient before closing the output stream. // This should close all existing file. dfsclient.close(); // reopen file system and verify that file exists. assertTrue( file1 + " does not exist.", AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1)); } finally { cluster.shutdown(); } }
/** This test attempts to finalize the NameNode and DataNode. */ public void testFinalize() throws Exception { UpgradeUtilities.initialize(); for (int numDirs = 1; numDirs <= 2; numDirs++) { /* This test requires that "current" directory not change after * the upgrade. Actually it is ok for those contents to change. * For now disabling block verification so that the contents are * not changed. */ conf = new Configuration(); conf.setInt("dfs.datanode.scan.period.hours", -1); conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf); String[] nameNodeDirs = conf.getStrings("dfs.name.dir"); String[] dataNodeDirs = conf.getStrings("dfs.data.dir"); log("Finalize with existing previous dir", numDirs); UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous"); UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current"); UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous"); cluster = new MiniDFSCluster(conf, 1, StartupOption.REGULAR); cluster.finalizeCluster(conf); checkResult(nameNodeDirs, dataNodeDirs); log("Finalize without existing previous dir", numDirs); cluster.finalizeCluster(conf); checkResult(nameNodeDirs, dataNodeDirs); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); } // end numDir loop }
public void testLease() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null); try { FileSystem fs = cluster.getFileSystem(); assertTrue(fs.mkdirs(dir)); Path a = new Path(dir, "a"); Path b = new Path(dir, "b"); DataOutputStream a_out = fs.create(a); a_out.writeBytes("something"); assertTrue(hasLease(cluster, a)); assertTrue(!hasLease(cluster, b)); DataOutputStream b_out = fs.create(b); b_out.writeBytes("something"); assertTrue(hasLease(cluster, a)); assertTrue(hasLease(cluster, b)); a_out.close(); b_out.close(); assertTrue(!hasLease(cluster, a)); assertTrue(!hasLease(cluster, b)); fs.delete(dir, true); } finally { if (cluster != null) { cluster.shutdown(); } } }
/* * This test attempts to upgrade the datanode from federation * version -35 to upper version * This test is for non-federation cluster with single namenode */ public void testNonFederationClusterUpgradeAfterFederationVersion() throws Exception { File[] baseDirs; UpgradeUtilities.initialize(); for (int numDirs = 1; numDirs <= 2; numDirs++) { conf = new Configuration(); conf.setInt("dfs.datanode.scan.period.hours", -1); conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf); String[] nameNodeDirs = conf.getStrings("dfs.name.dir"); String[] dataNodeDirs = conf.getStrings("dfs.data.dir"); log("DataNode upgrade with federation layout version in current", numDirs); UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); try { cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE); baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current"); UpgradeUtilities.createVersionFile( DATA_NODE, baseDirs, new StorageInfo( FSConstants.FEDERATION_VERSION, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster)), cluster.getNameNode().getNamespaceID()); cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null); checkResult(DATA_NODE, dataNodeDirs, 0, false); } finally { if (cluster != null) cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); } } }
// test closing file system before all file handles are closed. public void testFsClose() throws Exception { System.out.println("test file system close start"); final int DATANODE_NUM = 3; Configuration conf = new Configuration(); // create cluster MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null); DistributedFileSystem dfs = null; try { cluster.waitActive(); dfs = (DistributedFileSystem) cluster.getFileSystem(); // create a new file. final String f = DIR + "foofs"; final Path fpath = new Path(f); FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM); out.write("something".getBytes()); // close file system without closing file dfs.close(); } finally { System.out.println("testFsClose successful"); cluster.shutdown(); } }
/** Test the updation of NeededReplications for the Appended Block */ @Test(timeout = 60000) public void testUpdateNeededReplicationsForAppendedFile() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); DistributedFileSystem fileSystem = null; try { // create a file. fileSystem = cluster.getFileSystem(); Path f = new Path("/testAppend"); FSDataOutputStream create = fileSystem.create(f, (short) 2); create.write("/testAppend".getBytes()); create.close(); // Append to the file. FSDataOutputStream append = fileSystem.append(f); append.write("/testAppend".getBytes()); append.close(); // Start a new datanode cluster.startDataNodes(conf, 1, true, null, null); // Check for replications DFSTestUtil.waitReplication(fileSystem, f, (short) 2); } finally { if (null != fileSystem) { fileSystem.close(); } cluster.shutdown(); } }
@Override protected void tearDown() throws Exception { super.tearDown(); if (cluster != null) { cluster.shutdown(); cluster = null; } }
private void init(Configuration conf) throws IOException { if (cluster != null) { cluster.shutdown(); } cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitClusterUp(); fileSystem = cluster.getFileSystem(); }
/** * Test that an append with no locations fails with an exception showing insufficient locations. */ @Test(timeout = 60000) public void testAppendInsufficientLocations() throws Exception { Configuration conf = new Configuration(); // lower heartbeat interval for fast recognition of DN conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); DistributedFileSystem fileSystem = null; try { // create a file with replication 3 fileSystem = cluster.getFileSystem(); Path f = new Path("/testAppend"); FSDataOutputStream create = fileSystem.create(f, (short) 2); create.write("/testAppend".getBytes()); create.close(); // Check for replications DFSTestUtil.waitReplication(fileSystem, f, (short) 2); // Shut down all DNs that have the last block location for the file LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE); List<DataNode> dnsOfCluster = cluster.getDataNodes(); DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations(); for (DataNode dn : dnsOfCluster) { for (DatanodeInfo loc : dnsWithLocations) { if (dn.getDatanodeId().equals(loc)) { dn.shutdown(); DFSTestUtil.waitForDatanodeDeath(dn); } } } // Wait till 0 replication is recognized DFSTestUtil.waitReplication(fileSystem, f, (short) 0); // Append to the file, at this state there are 3 live DNs but none of them // have the block. try { fileSystem.append(f); fail("Append should fail because insufficient locations"); } catch (IOException e) { LOG.info("Expected exception: ", e); } FSDirectory dir = cluster.getNamesystem().getFSDirectory(); final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend"); assertTrue("File should remain closed", !inode.isUnderConstruction()); } finally { if (null != fileSystem) { fileSystem.close(); } cluster.shutdown(); } }
/** Test deleteOnExit */ public void testDeleteOnExit() throws IOException { Configuration conf = new Configuration(); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = cluster.getFileSystem(); FileSystem localfs = FileSystem.getLocal(conf); try { // Creates files in HDFS and local file system. // Path file1 = new Path("filestatus.dat"); Path file2 = new Path("filestatus2.dat"); Path file3 = new Path("filestatus3.dat"); FSDataOutputStream stm1 = createFile(fs, file1, 1); FSDataOutputStream stm2 = createFile(fs, file2, 1); FSDataOutputStream stm3 = createFile(localfs, file3, 1); System.out.println("DeleteOnExit: Created files."); // write to files and close. Purposely, do not close file2. writeFile(stm1); writeFile(stm3); stm1.close(); stm2.close(); stm3.close(); // set delete on exit flag on files. fs.deleteOnExit(file1); fs.deleteOnExit(file2); localfs.deleteOnExit(file3); // close the file system. This should make the above files // disappear. fs.close(); localfs.close(); fs = null; localfs = null; // reopen file system and verify that file does not exist. fs = cluster.getFileSystem(); localfs = FileSystem.getLocal(conf); assertTrue(file1 + " still exists inspite of deletOnExit set.", !fs.exists(file1)); assertTrue(file2 + " still exists inspite of deletOnExit set.", !fs.exists(file2)); assertTrue(file3 + " still exists inspite of deletOnExit set.", !localfs.exists(file3)); System.out.println("DeleteOnExit successful."); } finally { IOUtils.closeStream(fs); IOUtils.closeStream(localfs); cluster.shutdown(); } }
/** * Test that appends to files at random offsets. * * @throws IOException an exception might be thrown */ public void testComplexAppend() throws IOException { fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); conf.setInt("dfs.heartbeat.interval", 2); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2); conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000); conf.setInt("dfs.datanode.socket.write.timeout", 30000); conf.setInt("dfs.datanode.handler.count", 50); conf.setBoolean("dfs.support.append", true); MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { // create a bunch of test files with random replication factors. // Insert them into a linked list. // for (int i = 0; i < numberOfFiles; i++) { short replication = (short) (AppendTestUtil.nextInt(numDatanodes) + 1); Path testFile = new Path("/" + i + ".dat"); FSDataOutputStream stm = AppendTestUtil.createFile(fs, testFile, replication); stm.close(); testFiles.add(testFile); } // Create threads and make them run workload concurrently. workload = new Workload[numThreads]; for (int i = 0; i < numThreads; i++) { workload[i] = new Workload(cluster, i); workload[i].start(); } // wait for all transactions to get over for (int i = 0; i < numThreads; i++) { try { System.out.println("Waiting for thread " + i + " to complete..."); workload[i].join(); System.out.println("Waiting for thread " + i + " complete."); } catch (InterruptedException e) { i--; // retry } } } finally { fs.close(); cluster.shutdown(); } // If any of the worker thread failed in their job, indicate that // this test failed. // assertTrue("testComplexAppend Worker encountered exceptions.", globalStatus); }
/** * Test a simple flush on a simple HDFS file. * * @throws IOException an exception might be thrown */ @Test public void testSimpleFlush() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); try { // create a new file. Path file1 = new Path("/simpleFlush.dat"); FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1); System.out.println("Created file simpleFlush.dat"); // write to file int mid = AppendTestUtil.FILE_SIZE / 2; stm.write(fileContents, 0, mid); stm.hflush(); System.out.println("Wrote and Flushed first part of file."); // write the remainder of the file stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid); System.out.println("Written second part of file"); stm.hflush(); stm.hflush(); System.out.println("Wrote and Flushed second part of file."); // verify that full blocks are sane checkFile(fs, file1, 1); stm.close(); System.out.println("Closed file."); // verify that entire file is good AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2"); } catch (IOException e) { System.out.println("Exception :" + e); throw e; } catch (Throwable e) { System.out.println("Throwable :" + e); e.printStackTrace(); throw new IOException("Throwable : " + e); } finally { fs.close(); cluster.shutdown(); } }
/** * Verify that without system properties the cluster still comes up, provided the configuration is * set * * @throws Throwable on a failure */ @Test public void testClusterWithoutSystemProperties() throws Throwable { System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA); Configuration conf = new HdfsConfiguration(); File testDataCluster1 = new File(testDataPath, CLUSTER_1); String c1Path = testDataCluster1.getAbsolutePath(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { Assert.assertEquals(c1Path + "/data", cluster.getDataDirectory()); } finally { cluster.shutdown(); } }
@Test public void testAppend() throws Exception { final Configuration conf = new HdfsConfiguration(); final short REPLICATION = (short) 3; final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { final DistributedFileSystem fs = cluster.getFileSystem(); final Path f = new Path(DIR, "testAppend"); { LOG.info("create an empty file " + f); fs.create(f, REPLICATION).close(); final FileStatus status = fs.getFileStatus(f); Assert.assertEquals(REPLICATION, status.getReplication()); Assert.assertEquals(0L, status.getLen()); } final byte[] bytes = new byte[1000]; { LOG.info("append " + bytes.length + " bytes to " + f); final FSDataOutputStream out = fs.append(f); out.write(bytes); out.close(); final FileStatus status = fs.getFileStatus(f); Assert.assertEquals(REPLICATION, status.getReplication()); Assert.assertEquals(bytes.length, status.getLen()); } { LOG.info("append another " + bytes.length + " bytes to " + f); try { final FSDataOutputStream out = fs.append(f); out.write(bytes); out.close(); Assert.fail(); } catch (IOException ioe) { LOG.info("This exception is expected", ioe); } } } finally { if (cluster != null) { cluster.shutdown(); } } }
/** Test if the seek bug exists in FSDataInputStream in DFS. */ @Test public void testSeekBugDFS() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fileSys = cluster.getFileSystem(); try { Path file1 = new Path("seektest.dat"); writeFile(fileSys, file1); seekReadFile(fileSys, file1); smallReadSeek(fileSys, file1); cleanupFile(fileSys, file1); } finally { fileSys.close(); cluster.shutdown(); } }
/** * FileNotFoundException is expected for appending to a non-exisiting file * * @throws FileNotFoundException as the result */ @Test(expected = FileNotFoundException.class) public void testFileNotFound() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); try { Path file1 = new Path("/nonexistingfile.dat"); fs.append(file1); } finally { fs.close(); cluster.shutdown(); } }
@Test public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade() throws Exception { Configuration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "1.0.0"); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); NamenodeProtocols rpcServer = cluster.getNameNodeRpc(); long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime(); StorageInfo mockStorageInfo = mock(StorageInfo.class); doReturn(nnCTime).when(mockStorageInfo).getCTime(); DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion(); doReturn("fake-storage-id").when(mockDnReg).getStorageID(); doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo(); // Should succeed when software versions are the same and CTimes are the // same. doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion(); rpcServer.registerDatanode(mockDnReg); // Should succeed when software versions are the same and CTimes are // different. doReturn(nnCTime + 1).when(mockStorageInfo).getCTime(); rpcServer.registerDatanode(mockDnReg); // Should fail when software version of DN is different from NN and CTimes // are different. doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion(); try { rpcServer.registerDatanode(mockDnReg); fail( "Should not have been able to register DN with different software" + " versions and CTimes"); } catch (IncorrectVersionException ive) { GenericTestUtils.assertExceptionContains("does not match CTime of NN", ive); LOG.info("Got expected exception", ive); } } finally { if (cluster != null) { cluster.shutdown(); } } }
/** * Initialize the data structures used by this class. IMPORTANT NOTE: This method must be called * once before calling any other public method on this class. * * <p>Creates a singleton master populated storage directory for a Namenode (contains edits, * fsimage, version, and time files) and a Datanode (contains version and block files). This can * be a lengthy operation. */ public static void initialize() throws Exception { createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()}); Configuration config = new Configuration(); config.set("dfs.name.dir", namenodeStorage.toString()); config.set("dfs.data.dir", datanodeStorage.toString()); MiniDFSCluster cluster = null; try { // format data-node createEmptyDirs(new String[] {datanodeStorage.toString()}); // format and start NameNode and start DataNode NameNode.format(config); cluster = new MiniDFSCluster(config, 1, StartupOption.REGULAR); NameNode namenode = cluster.getNameNode(); namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID(); namenodeStorageFsscTime = namenode.versionRequest().getCTime(); FileSystem fs = FileSystem.get(config); Path baseDir = new Path("/TestUpgrade"); fs.mkdirs(baseDir); // write some files int bufferSize = 4096; byte[] buffer = new byte[bufferSize]; for (int i = 0; i < bufferSize; i++) buffer[i] = (byte) ('0' + i % 50); writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize); writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize); // save image namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER); namenode.saveNamespace(); namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); // write more files writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize); writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize); } finally { // shutdown if (cluster != null) cluster.shutdown(); FileUtil.fullyDelete(new File(namenodeStorage, "in_use.lock")); FileUtil.fullyDelete(new File(datanodeStorage, "in_use.lock")); } namenodeStorageChecksum = checksumContents(NAME_NODE, new File(namenodeStorage, "current")); datanodeStorageChecksum = checksumContents(DATA_NODE, new File(datanodeStorage, "current")); }
@Test(timeout = 100000) public void testIsClusterUpAfterShutdown() throws Throwable { Configuration conf = new HdfsConfiguration(); File testDataCluster4 = new File(testDataPath, CLUSTER_4); String c4Path = testDataCluster4.getAbsolutePath(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path); MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build(); try { DistributedFileSystem dfs = (DistributedFileSystem) cluster4.getFileSystem(); dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER); cluster4.shutdown(); } finally { while (cluster4.isClusterUp()) { Thread.sleep(1000); } } }
/** * This test creates three empty files and lets their leases expire. This triggers release of the * leases. The empty files are supposed to be closed by that without causing * ConcurrentModificationException. */ public void testLeaseExpireEmptyFiles() throws Exception { final Thread.UncaughtExceptionHandler oldUEH = Thread.getDefaultUncaughtExceptionHandler(); Thread.setDefaultUncaughtExceptionHandler( new Thread.UncaughtExceptionHandler() { public void uncaughtException(Thread t, Throwable e) { if (e instanceof ConcurrentModificationException) { FSNamesystem.LOG.error("t=" + t, e); isConcurrentModificationException = true; } } }); System.out.println("testLeaseExpireEmptyFiles start"); final long leasePeriod = 1000; final int DATANODE_NUM = 3; final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt("dfs.heartbeat.interval", 1); // create cluster MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); try { cluster.waitActive(); DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); // create a new file. TestFileCreation.createFile(dfs, new Path("/foo"), DATANODE_NUM); TestFileCreation.createFile(dfs, new Path("/foo2"), DATANODE_NUM); TestFileCreation.createFile(dfs, new Path("/foo3"), DATANODE_NUM); // set the soft and hard limit to be 1 second so that the // namenode triggers lease recovery cluster.setLeasePeriod(leasePeriod, leasePeriod); // wait for the lease to expire try { Thread.sleep(5 * leasePeriod); } catch (InterruptedException e) { } assertFalse(isConcurrentModificationException); } finally { Thread.setDefaultUncaughtExceptionHandler(oldUEH); cluster.shutdown(); } }
// test closing file after cluster is shutdown public void testFsCloseAfterClusterShutdown() throws IOException { System.out.println("test testFsCloseAfterClusterShutdown start"); final int DATANODE_NUM = 3; Configuration conf = new Configuration(); conf.setInt("dfs.replication.min", 3); conf.setBoolean("ipc.client.ping", false); // hdfs timeout is default 60 seconds conf.setInt("ipc.ping.interval", 10000); // hdfs timeout is now 10 second // create cluster MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null); DistributedFileSystem dfs = null; try { cluster.waitActive(); dfs = (DistributedFileSystem) cluster.getFileSystem(); // create a new file. final String f = DIR + "dhrubashutdown"; final Path fpath = new Path(f); FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM); out.write("something_dhruba".getBytes()); out.sync(); // ensure that block is allocated // shutdown last datanode in pipeline. cluster.stopDataNode(2); // close file. Since we have set the minReplication to 3 but have killed one // of the three datanodes, the close call will loop until the hdfsTimeout is // encountered. boolean hasException = false; try { out.close(); System.out.println("testFsCloseAfterClusterShutdown: Error here"); } catch (IOException e) { hasException = true; } assertTrue("Failed to close file after cluster shutdown", hasException); } finally { System.out.println("testFsCloseAfterClusterShutdown successful"); if (cluster != null) { cluster.shutdown(); } } }
/** Test that we can write to and read from large blocks */ public void runTest(final long blockSize) throws IOException { // write a file that is slightly larger than 1 block final long fileSize = blockSize + 1L; Configuration conf = new Configuration(); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = cluster.getFileSystem(); try { // create a new file in test data directory Path file1 = new Path(System.getProperty("test.build.data") + "/" + Long.toString(blockSize) + ".dat"); FSDataOutputStream stm = createFile(fs, file1, 1, blockSize); System.out.println( "File " + file1 + " created with file size " + fileSize + " blocksize " + blockSize); // verify that file exists in FS namespace assertTrue(file1 + " should be a file and not a dir", !fs.getFileStatus(file1).isDir()); // write to file writeFile(stm, fileSize); System.out.println("File " + file1 + " written to."); // close file stm.close(); System.out.println("File " + file1 + " closed."); // Make sure a client can read it checkFullFile(fs, file1, fileSize); // verify that file size has changed long len = fs.getFileStatus(file1).getLen(); assertTrue( file1 + " should be of size " + fileSize + " but found to be of size " + len, len == fileSize); } finally { cluster.shutdown(); } }
@Test public void testRegistrationWithDifferentSoftwareVersions() throws Exception { Configuration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0"); conf.set(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, "3.0.0"); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); NamenodeProtocols rpcServer = cluster.getNameNodeRpc(); long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime(); StorageInfo mockStorageInfo = mock(StorageInfo.class); doReturn(nnCTime).when(mockStorageInfo).getCTime(); DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion(); doReturn("fake-storage-id").when(mockDnReg).getStorageID(); doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo(); // Should succeed when software versions are the same. doReturn("3.0.0").when(mockDnReg).getSoftwareVersion(); rpcServer.registerDatanode(mockDnReg); // Should succeed when software version of DN is above minimum required by NN. doReturn("4.0.0").when(mockDnReg).getSoftwareVersion(); rpcServer.registerDatanode(mockDnReg); // Should fail when software version of DN is below minimum required by NN. doReturn("2.0.0").when(mockDnReg).getSoftwareVersion(); try { rpcServer.registerDatanode(mockDnReg); fail("Should not have been able to register DN with too-low version."); } catch (IncorrectVersionException ive) { GenericTestUtils.assertExceptionContains("The reported DataNode version is too low", ive); LOG.info("Got expected exception", ive); } } finally { if (cluster != null) { cluster.shutdown(); } } }
/** * Test that file data can be flushed. * * @throws IOException an exception might be thrown */ @Test public void testComplexFlush() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); try { // create a new file. Path file1 = new Path("/complexFlush.dat"); FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1); System.out.println("Created file complexFlush.dat"); int start = 0; for (start = 0; (start + 29) < AppendTestUtil.FILE_SIZE; ) { stm.write(fileContents, start, 29); stm.hflush(); start += 29; } stm.write(fileContents, start, AppendTestUtil.FILE_SIZE - start); // verify that full blocks are sane checkFile(fs, file1, 1); stm.close(); // verify that entire file is good AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2"); } catch (IOException e) { System.out.println("Exception :" + e); throw e; } catch (Throwable e) { System.out.println("Throwable :" + e); e.printStackTrace(); throw new IOException("Throwable : " + e); } finally { fs.close(); cluster.shutdown(); } }
public void testAbandonBlock() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster(CONF, 2, true, null); FileSystem fs = cluster.getFileSystem(); String src = FILE_NAME_PREFIX + "foo"; FSDataOutputStream fout = null; try { // start writing a a file but not close it fout = fs.create(new Path(src), true, 4096, (short) 1, 512L); for (int i = 0; i < 1024; i++) { fout.write(123); } fout.sync(); // try reading the block by someone final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF); LocatedBlocks blocks = dfsclient.namenode.getBlockLocations(src, 0, 1); LocatedBlock b = blocks.get(0); try { dfsclient.namenode.abandonBlock(b.getBlock(), src, "someone"); // previous line should throw an exception. assertTrue(false); } catch (IOException ioe) { LOG.info("GREAT! " + StringUtils.stringifyException(ioe)); } } finally { try { fout.close(); } catch (Exception e) { } try { fs.close(); } catch (Exception e) { } try { cluster.shutdown(); } catch (Exception e) { } } }
/* * This test attempts to upgrade the datanode from federation version -35 to * upper version This test is for federation cluster with 2 namenodes. It * changes the layout version and ctime. */ public void testFederationClusterUpgradeAfterFederationVersionWithCTimeChange() throws Exception { File[] baseDirs; Configuration baseConf = new Configuration(); UpgradeUtilities.initialize(2, baseConf, true); for (int numDirs = 1; numDirs <= 2; numDirs++) { conf = new Configuration(); conf.setInt("dfs.datanode.scan.period.hours", -1); conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf); String[] nameNodeDirs = conf.getStrings("dfs.name.dir"); String[] dataNodeDirs = conf.getStrings("dfs.data.dir"); log("DataNode upgrade with federation layout version in current and ctime change", numDirs); UpgradeUtilities.createFederatedNameNodeStorageDirs(nameNodeDirs); conf.set( FSConstants.DFS_FEDERATION_NAMESERVICES, baseConf.get(FSConstants.DFS_FEDERATION_NAMESERVICES)); try { cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE, false, 2); baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current"); for (int i = 0; i < 2; i++) { UpgradeUtilities.createVersionFile( DATA_NODE, baseDirs, new StorageInfo( FSConstants.FEDERATION_VERSION, cluster.getNameNode(i).getNamespaceID(), cluster.getNameNode(i).versionRequest().getCTime() - 1), cluster.getNameNode(i).getNamespaceID()); } cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null); for (int i = 0; i < 2; i++) { checkResult(DATA_NODE, dataNodeDirs, i, false); } } finally { if (cluster != null) cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); } } }
/** * Regression test for HDFS-894 ensures that, when datanodes are restarted, the new IPC port is * registered with the namenode. */ @Test public void testChangeIpcPort() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); DFSClient client = new DFSClient(addr, conf); // Restart datanodes cluster.restartDataNodes(); // Wait until we get a heartbeat from the new datanode DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL); long firstUpdateAfterRestart = report[0].getLastUpdate(); boolean gotHeartbeat = false; for (int i = 0; i < 10 && !gotHeartbeat; i++) { try { Thread.sleep(i * 1000); } catch (InterruptedException ie) { } report = client.datanodeReport(DatanodeReportType.ALL); gotHeartbeat = (report[0].getLastUpdate() > firstUpdateAfterRestart); } if (!gotHeartbeat) { fail("Never got a heartbeat from restarted datanode."); } int realIpcPort = cluster.getDataNodes().get(0).getIpcPort(); // Now make sure the reported IPC port is the correct one. assertEquals(realIpcPort, report[0].getIpcPort()); } finally { if (cluster != null) { cluster.shutdown(); } } }