/** * TC5: Only one simultaneous append. * * @throws IOException an exception might be thrown */ public void testTC5() throws Exception { final Path p = new Path("/TC5/foo"); System.out.println("p=" + p); // a. Create file on Machine M1. Write half block to it. Close file. { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, (int) (BLOCK_SIZE / 2)); out.close(); } // b. Reopen file in "append" mode on Machine M1. FSDataOutputStream out = fs.append(p); // c. On Machine M2, reopen file in "append" mode. This should fail. try { AppendTestUtil.createHdfsWithDifferentUsername(conf).append(p); fail("This should fail."); } catch (IOException ioe) { AppendTestUtil.LOG.info("GOOD: got an exception", ioe); } // d. On Machine M1, close file. out.close(); }
/** * TC2: Append on non-block boundary. * * @throws IOException an exception might be thrown */ public void testTC2() throws Exception { final Path p = new Path("/TC2/foo"); System.out.println("p=" + p); // a. Create file with one and a half block of data. Close file. final int len1 = (int) (BLOCK_SIZE + BLOCK_SIZE / 2); { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } AppendTestUtil.check(fs, p, len1); // Reopen file to append quarter block of data. Close file. final int len2 = (int) BLOCK_SIZE / 4; { FSDataOutputStream out = fs.append(p); AppendTestUtil.write(out, len1, len2); out.close(); } // b. Reopen file and read 1.75 blocks of data. Close file. AppendTestUtil.check(fs, p, len1 + len2); }
/** * TC12: Append to partial CRC chunk * * @throws IOException an exception might be thrown */ public void testTC12() throws Exception { final Path p = new Path("/TC12/foo"); System.out.println("p=" + p); // a. Create file with a block size of 64KB // and a default io.bytes.per.checksum of 512 bytes. // Write 25687 bytes of data. Close file. final int len1 = 25687; { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } // b. Reopen file in "append" mode. Append another 5877 bytes of data. Close file. final int len2 = 5877; { FSDataOutputStream out = fs.append(p); AppendTestUtil.write(out, len1, len2); out.close(); } // c. Reopen file and read 25687+5877 bytes of data from file. Close file. AppendTestUtil.check(fs, p, len1 + len2); }
/** Test the updation of NeededReplications for the Appended Block */ @Test(timeout = 60000) public void testUpdateNeededReplicationsForAppendedFile() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); DistributedFileSystem fileSystem = null; try { // create a file. fileSystem = cluster.getFileSystem(); Path f = new Path("/testAppend"); FSDataOutputStream create = fileSystem.create(f, (short) 2); create.write("/testAppend".getBytes()); create.close(); // Append to the file. FSDataOutputStream append = fileSystem.append(f); append.write("/testAppend".getBytes()); append.close(); // Start a new datanode cluster.startDataNodes(conf, 1, true, null, null); // Check for replications DFSTestUtil.waitReplication(fileSystem, f, (short) 2); } finally { if (null != fileSystem) { fileSystem.close(); } cluster.shutdown(); } }
// test closing file system before all file handles are closed. public void testFsClose() throws Exception { System.out.println("test file system close start"); final int DATANODE_NUM = 3; Configuration conf = new Configuration(); // create cluster MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null); DistributedFileSystem dfs = null; try { cluster.waitActive(); dfs = (DistributedFileSystem) cluster.getFileSystem(); // create a new file. final String f = DIR + "foofs"; final Path fpath = new Path(f); FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM); out.write("something".getBytes()); // close file system without closing file dfs.close(); } finally { System.out.println("testFsClose successful"); cluster.shutdown(); } }
/** * Test that an append with no locations fails with an exception showing insufficient locations. */ @Test(timeout = 60000) public void testAppendInsufficientLocations() throws Exception { Configuration conf = new Configuration(); // lower heartbeat interval for fast recognition of DN conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); DistributedFileSystem fileSystem = null; try { // create a file with replication 3 fileSystem = cluster.getFileSystem(); Path f = new Path("/testAppend"); FSDataOutputStream create = fileSystem.create(f, (short) 2); create.write("/testAppend".getBytes()); create.close(); // Check for replications DFSTestUtil.waitReplication(fileSystem, f, (short) 2); // Shut down all DNs that have the last block location for the file LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE); List<DataNode> dnsOfCluster = cluster.getDataNodes(); DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations(); for (DataNode dn : dnsOfCluster) { for (DatanodeInfo loc : dnsWithLocations) { if (dn.getDatanodeId().equals(loc)) { dn.shutdown(); DFSTestUtil.waitForDatanodeDeath(dn); } } } // Wait till 0 replication is recognized DFSTestUtil.waitReplication(fileSystem, f, (short) 0); // Append to the file, at this state there are 3 live DNs but none of them // have the block. try { fileSystem.append(f); fail("Append should fail because insufficient locations"); } catch (IOException e) { LOG.info("Expected exception: ", e); } FSDirectory dir = cluster.getNamesystem().getFSDirectory(); final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend"); assertTrue("File should remain closed", !inode.isUnderConstruction()); } finally { if (null != fileSystem) { fileSystem.close(); } cluster.shutdown(); } }
/** * TC11: Racing rename * * @throws IOException an exception might be thrown */ public void testTC11() throws Exception { final Path p = new Path("/TC11/foo"); System.out.println("p=" + p); // a. Create file and write one block of data. Close file. final int len1 = (int) BLOCK_SIZE; { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } // b. Reopen file in "append" mode. Append half block of data. FSDataOutputStream out = fs.append(p); final int len2 = (int) BLOCK_SIZE / 2; AppendTestUtil.write(out, len1, len2); out.hflush(); // c. Rename file to file.new. final Path pnew = new Path(p + ".new"); assertTrue(fs.rename(p, pnew)); // d. Close file handle that was opened in (b). try { out.close(); fail("close() should throw an exception"); } catch (Exception e) { AppendTestUtil.LOG.info("GOOD!", e); } // wait for the lease recovery cluster.setLeasePeriod(1000, 1000); AppendTestUtil.sleep(5000); // check block sizes final long len = fs.getFileStatus(pnew).getLen(); final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(pnew.toString(), 0L, len); final int numblock = locatedblocks.locatedBlockCount(); for (int i = 0; i < numblock; i++) { final LocatedBlock lb = locatedblocks.get(i); final Block blk = lb.getBlock(); final long size = lb.getBlockSize(); if (i < numblock - 1) { assertEquals(BLOCK_SIZE, size); } for (DatanodeInfo datanodeinfo : lb.getLocations()) { final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort()); final Block metainfo = dn.data.getStoredBlock(blk.getBlockId()); assertEquals(size, metainfo.getNumBytes()); } } }
/** * Append to a partial CRC chunk and the first write does not fill up the partial CRC trunk * * * @throws IOException */ public void testAppendToPartialChunk() throws IOException { final Path p = new Path("/partialChunk/foo"); final int fileLen = 513; System.out.println("p=" + p); byte[] fileContents = AppendTestUtil.initBuffer(fileLen); // create a new file. FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, 1); // create 1 byte file stm.write(fileContents, 0, 1); stm.close(); System.out.println("Wrote 1 byte and closed the file " + p); // append to file stm = fs.append(p); // Append to a partial CRC trunk stm.write(fileContents, 1, 1); stm.hflush(); // The partial CRC trunk is not full yet and close the file stm.close(); System.out.println("Append 1 byte and closed the file " + p); // write the remainder of the file stm = fs.append(p); // ensure getPos is set to reflect existing size of the file assertEquals(2, stm.getPos()); // append to a partial CRC trunk stm.write(fileContents, 2, 1); // The partial chunk is not full yet, force to send a packet to DN stm.hflush(); System.out.println("Append and flush 1 byte"); // The partial chunk is not full yet, force to send another packet to DN stm.write(fileContents, 3, 2); stm.hflush(); System.out.println("Append and flush 2 byte"); // fill up the partial chunk and close the file stm.write(fileContents, 5, fileLen - 5); stm.close(); System.out.println("Flush 508 byte and closed the file " + p); // verify that entire file is good AppendTestUtil.checkFullFile( fs, p, fileLen, fileContents, "Failed to append to a partial chunk"); }
@Override protected void setUp() throws Exception { cluster = new MiniDFSCluster(CONF, 1, true, null); cluster.waitActive(); fs = (DistributedFileSystem) cluster.getFileSystem(); metrics = fs.getClient().getDFSClientMetrics(); }
@Test(timeout = 100000) public void testIsClusterUpAfterShutdown() throws Throwable { Configuration conf = new HdfsConfiguration(); File testDataCluster4 = new File(testDataPath, CLUSTER_4); String c4Path = testDataCluster4.getAbsolutePath(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path); MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build(); try { DistributedFileSystem dfs = (DistributedFileSystem) cluster4.getFileSystem(); dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER); cluster4.shutdown(); } finally { while (cluster4.isClusterUp()) { Thread.sleep(1000); } } }
public void testCreateWriteDelete() throws Exception { for (int i = 0; i < TEST_DIR_NUM; ++i) { String path = "testDirectory" + i; fs.mkdirs(getTestPath(path)); fs.delete(getTestPath(path), true); } assertEquals(TEST_DIR_NUM, metrics.numCreateDirOps.getCurrentIntervalValue()); for (int i = 0; i < TEST_FILE_NUM; ++i) { String file = "/tmp" + i + ".txt"; DFSTestUtil.createFile(fs, new Path(file), FILE_LEN, (short) 1, 1L); fs.delete(new Path(file), false); } assertEquals(TEST_FILE_NUM, metrics.writeOps.getCurrentIntervalValue()); assertEquals(FILE_LEN * TEST_FILE_NUM, metrics.writeSize.getCurrentIntervalValue()); assertEquals(TEST_FILE_NUM, metrics.numCreateFileOps.getCurrentIntervalValue()); }
/** * TC7: Corrupted replicas are present. * * @throws IOException an exception might be thrown */ public void testTC7() throws Exception { final short repl = 2; final Path p = new Path("/TC7/foo"); System.out.println("p=" + p); // a. Create file with replication factor of 2. Write half block of data. Close file. final int len1 = (int) (BLOCK_SIZE / 2); { FSDataOutputStream out = fs.create(p, false, buffersize, repl, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } DFSTestUtil.waitReplication(fs, p, repl); // b. Log into one datanode that has one replica of this block. // Find the block file on this datanode and truncate it to zero size. final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(p.toString(), 0L, len1); assertEquals(1, locatedblocks.locatedBlockCount()); final LocatedBlock lb = locatedblocks.get(0); final Block blk = lb.getBlock(); assertEquals(len1, lb.getBlockSize()); DatanodeInfo[] datanodeinfos = lb.getLocations(); assertEquals(repl, datanodeinfos.length); final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort()); final FSDataset data = (FSDataset) dn.getFSDataset(); final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw"); AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")"); assertEquals(len1, raf.length()); raf.setLength(0); raf.close(); // c. Open file in "append mode". Append a new block worth of data. Close file. final int len2 = (int) BLOCK_SIZE; { FSDataOutputStream out = fs.append(p); AppendTestUtil.write(out, len1, len2); out.close(); } // d. Reopen file and read two blocks worth of data. AppendTestUtil.check(fs, p, len1 + len2); }
/* * Wait up to 20s for the given DN (IP:port) to be decommissioned */ public static void waitForDecommission(FileSystem fs, String name) throws IOException, InterruptedException, TimeoutException { DatanodeInfo dn = null; int count = 0; final int ATTEMPTS = 20; do { Thread.sleep(1000); DistributedFileSystem dfs = (DistributedFileSystem) fs; for (DatanodeInfo info : dfs.getDataNodeStats()) { if (name.equals(info.getXferAddr())) { dn = info; } } count++; } while ((dn == null || dn.isDecommissionInProgress() || !dn.isDecommissioned()) && count < ATTEMPTS); if (count == ATTEMPTS) { throw new TimeoutException("Timed out waiting for datanode " + name + " to decommission."); } }
public void testRead() throws Exception { for (int i = 0; i < TEST_FILE_NUM; ++i) { String file = "/tmp" + i + ".txt"; DFSTestUtil.createFile(fs, new Path(file), FILE_LEN, (short) 5, 1L); DFSDataInputStream in = (DFSDataInputStream) fs.open(new Path(file)); int numOfRead = 0; while (in.read() > 0) { numOfRead++; } assertEquals(FILE_LEN * (i + 1), metrics.readSize.getCurrentIntervalValue()); assertEquals(numOfRead * (i + 1), metrics.readOps.getCurrentIntervalValue()); } }
@Test public void testBestEffort() throws Exception { final Configuration conf = new HdfsConfiguration(); // always replace a datanode but do not throw exception ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { final DistributedFileSystem fs = cluster.getFileSystem(); final Path f = new Path(DIR, "testIgnoreReplaceFailure"); final byte[] bytes = new byte[1000]; { LOG.info("write " + bytes.length + " bytes to " + f); final FSDataOutputStream out = fs.create(f, REPLICATION); out.write(bytes); out.close(); final FileStatus status = fs.getFileStatus(f); Assert.assertEquals(REPLICATION, status.getReplication()); Assert.assertEquals(bytes.length, status.getLen()); } { LOG.info("append another " + bytes.length + " bytes to " + f); final FSDataOutputStream out = fs.append(f); out.write(bytes); out.close(); } } finally { if (cluster != null) { cluster.shutdown(); } } }
/** Make sure that the quota is decremented correctly when a block is abandoned */ public void testQuotaUpdatedWhenBlockAbandoned() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster(CONF, 2, true, null); FileSystem fs = cluster.getFileSystem(); DistributedFileSystem dfs = (DistributedFileSystem) fs; try { // Setting diskspace quota to 3MB dfs.setQuota(new Path("/"), FSConstants.QUOTA_DONT_SET, 3 * 1024 * 1024); // Start writing a file with 2 replicas to ensure each datanode has one. // Block Size is 1MB. String src = FILE_NAME_PREFIX + "test_quota1"; FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short) 2, 1024 * 1024); for (int i = 0; i < 1024; i++) { fout.writeByte(123); } // Shutdown one datanode, causing the block abandonment. cluster.getDataNodes().get(0).shutdown(); // Close the file, new block will be allocated with 2MB pending size. try { fout.close(); } catch (QuotaExceededException e) { fail("Unexpected quota exception when closing fout"); } } finally { try { fs.close(); } catch (Exception e) { } try { cluster.shutdown(); } catch (Exception e) { } } }
@Test public void testAppend() throws Exception { final Configuration conf = new HdfsConfiguration(); final short REPLICATION = (short) 3; final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { final DistributedFileSystem fs = cluster.getFileSystem(); final Path f = new Path(DIR, "testAppend"); { LOG.info("create an empty file " + f); fs.create(f, REPLICATION).close(); final FileStatus status = fs.getFileStatus(f); Assert.assertEquals(REPLICATION, status.getReplication()); Assert.assertEquals(0L, status.getLen()); } final byte[] bytes = new byte[1000]; { LOG.info("append " + bytes.length + " bytes to " + f); final FSDataOutputStream out = fs.append(f); out.write(bytes); out.close(); final FileStatus status = fs.getFileStatus(f); Assert.assertEquals(REPLICATION, status.getReplication()); Assert.assertEquals(bytes.length, status.getLen()); } { LOG.info("append another " + bytes.length + " bytes to " + f); try { final FSDataOutputStream out = fs.append(f); out.write(bytes); out.close(); Assert.fail(); } catch (IOException ioe) { LOG.info("This exception is expected", ioe); } } } finally { if (cluster != null) { cluster.shutdown(); } } }
/** Test replace datanode on failure. */ @Test public void testReplaceDatanodeOnFailure() throws Exception { final Configuration conf = new HdfsConfiguration(); // do not consider load factor when selecting a data node conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); // always replace a datanode ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf); final String[] racks = new String[REPLICATION]; Arrays.fill(racks, RACK0); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).racks(racks).numDataNodes(REPLICATION).build(); try { cluster.waitActive(); final DistributedFileSystem fs = cluster.getFileSystem(); final Path dir = new Path(DIR); final int NUM_WRITERS = 10; final int FIRST_BATCH = 5; final SlowWriter[] slowwriters = new SlowWriter[NUM_WRITERS]; for (int i = 1; i <= slowwriters.length; i++) { // create slow writers in different speed slowwriters[i - 1] = new SlowWriter(fs, new Path(dir, "file" + i), i * 200L); } for (int i = 0; i < FIRST_BATCH; i++) { slowwriters[i].start(); } // Let slow writers write something. // Some of them are too slow and will be not yet started. sleepSeconds(3); // start new datanodes cluster.startDataNodes(conf, 2, true, null, new String[] {RACK1, RACK1}); cluster.waitActive(); // wait for first block reports for up to 10 seconds cluster.waitFirstBRCompleted(0, 10000); // stop an old datanode MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION)); for (int i = FIRST_BATCH; i < slowwriters.length; i++) { slowwriters[i].start(); } waitForBlockReplication(slowwriters); // check replication and interrupt. for (SlowWriter s : slowwriters) { s.checkReplication(); s.interruptRunning(); } // close files for (SlowWriter s : slowwriters) { s.joinAndClose(); } // Verify the file LOG.info("Verify the file"); for (int i = 0; i < slowwriters.length; i++) { LOG.info( slowwriters[i].filepath + ": length=" + fs.getFileStatus(slowwriters[i].filepath).getLen()); FSDataInputStream in = null; try { in = fs.open(slowwriters[i].filepath); for (int j = 0, x; (x = in.read()) != -1; j++) { Assert.assertEquals(j, x); } } finally { IOUtils.closeStream(in); } } } finally { if (cluster != null) { cluster.shutdown(); } } }
SlowWriter(DistributedFileSystem fs, Path filepath, final long sleepms) throws IOException { super(SlowWriter.class.getSimpleName() + ":" + filepath); this.filepath = filepath; this.out = (HdfsDataOutputStream) fs.create(filepath, REPLICATION); this.sleepms = sleepms; }
public void testBlockSynchronization() throws Exception { final long softLease = 1000; final long hardLease = 60 * 60 * 1000; final short repl = 3; final Configuration conf = new Configuration(); final int bufferSize = conf.getInt("io.file.buffer.size", 4096); conf.setLong("dfs.block.size", BLOCK_SIZE); conf.setInt("dfs.heartbeat.interval", 1); // conf.setInt("io.bytes.per.checksum", 16); MiniDFSCluster cluster = null; byte[] actual = new byte[FILE_SIZE]; try { cluster = new MiniDFSCluster(conf, 5, true, null); cluster.waitActive(); // create a file DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); // create a random file name String filestr = "/foo" + AppendTestUtil.nextInt(); System.out.println("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.create(filepath, true, bufferSize, repl, BLOCK_SIZE); assertTrue(dfs.dfs.exists(filestr)); // write random number of bytes into it. int size = AppendTestUtil.nextInt(FILE_SIZE); System.out.println("size=" + size); stm.write(buffer, 0, size); // sync file AppendTestUtil.LOG.info("sync"); stm.sync(); AppendTestUtil.LOG.info("leasechecker.interrupt()"); dfs.dfs.leaseChecker.interrupt(); // set the soft limit to be 1 second so that the // namenode triggers lease recovery on next attempt to write-for-open. cluster.setLeasePeriod(softLease, hardLease); // try to re-open the file before closing the previous handle. This // should fail but will trigger lease recovery. { Configuration conf2 = new Configuration(conf); String username = UserGroupInformation.getCurrentUGI().getUserName() + "_1"; UnixUserGroupInformation.saveToConf( conf2, UnixUserGroupInformation.UGI_PROPERTY_NAME, new UnixUserGroupInformation(username, new String[] {"supergroup"})); FileSystem dfs2 = FileSystem.get(conf2); boolean done = false; for (int i = 0; i < 10 && !done; i++) { AppendTestUtil.LOG.info("i=" + i); try { dfs2.create(filepath, false, bufferSize, repl, BLOCK_SIZE); fail("Creation of an existing file should never succeed."); } catch (IOException ioe) { final String message = ioe.getMessage(); if (message.contains("file exists")) { AppendTestUtil.LOG.info("done", ioe); done = true; } else if (message.contains(AlreadyBeingCreatedException.class.getSimpleName())) { AppendTestUtil.LOG.info("GOOD! got " + message); } else { AppendTestUtil.LOG.warn("UNEXPECTED IOException", ioe); } } if (!done) { AppendTestUtil.LOG.info("sleep " + 5000 + "ms"); try { Thread.sleep(5000); } catch (InterruptedException e) { } } } assertTrue(done); } AppendTestUtil.LOG.info( "Lease for file " + filepath + " is recovered. " + "Validating its contents now..."); // verify that file-size matches assertTrue( "File should be " + size + " bytes, but is actually " + " found to be " + dfs.getFileStatus(filepath).getLen() + " bytes", dfs.getFileStatus(filepath).getLen() == size); // verify that there is enough data to read. System.out.println("File size is good. Now validating sizes from datanodes..."); FSDataInputStream stmin = dfs.open(filepath); stmin.readFully(0, actual, 0, size); stmin.close(); } finally { try { if (cluster != null) { cluster.shutdown(); } } catch (Exception e) { // ignore } } }
/** * The following test first creates a file with a few blocks. It randomly truncates the replica of * the last block stored in each datanode. Finally, it triggers block synchronization to * synchronize all stored block. */ public void testBlockSynchronization() throws Exception { final int ORG_FILE_SIZE = 3000; Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); cluster.waitActive(); // create a file DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); assertTrue(dfs.exists(filepath)); DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM); // get block info for the last block LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(), filestr); DatanodeInfo[] datanodeinfos = locatedblock.getLocations(); assertEquals(REPLICATION_NUM, datanodeinfos.length); // connect to data nodes DataNode[] datanodes = new DataNode[REPLICATION_NUM]; for (int i = 0; i < REPLICATION_NUM; i++) { datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort()); assertTrue(datanodes[i] != null); } // verify Block Info ExtendedBlock lastblock = locatedblock.getBlock(); DataNode.LOG.info("newblocks=" + lastblock); for (int i = 0; i < REPLICATION_NUM; i++) { checkMetaInfo(lastblock, datanodes[i]); } DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName); // expire lease to trigger block recovery. waitLeaseRecovery(cluster); Block[] updatedmetainfo = new Block[REPLICATION_NUM]; long oldSize = lastblock.getNumBytes(); lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(), filestr).getBlock(); long currentGS = lastblock.getGenerationStamp(); for (int i = 0; i < REPLICATION_NUM; i++) { updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]) .getStoredBlock(lastblock.getBlockPoolId(), lastblock.getBlockId()); assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId()); assertEquals(oldSize, updatedmetainfo[i].getNumBytes()); assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp()); } // verify that lease recovery does not occur when namenode is in safemode System.out.println("Testing that lease recovery cannot happen during safemode."); filestr = "/foo.safemode"; filepath = new Path(filestr); dfs.create(filepath, (short) 1); cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); assertTrue(dfs.dfs.exists(filestr)); DFSTestUtil.waitReplication(dfs, filepath, (short) 1); waitLeaseRecovery(cluster); // verify that we still cannot recover the lease LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem()); assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1); cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); } finally { if (cluster != null) { cluster.shutdown(); } } }
@After public void shutDownCluster() throws IOException { dfs.close(); cluster.shutdown(); }