/** * TC5: Only one simultaneous append. * * @throws IOException an exception might be thrown */ public void testTC5() throws Exception { final Path p = new Path("/TC5/foo"); System.out.println("p=" + p); // a. Create file on Machine M1. Write half block to it. Close file. { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, (int) (BLOCK_SIZE / 2)); out.close(); } // b. Reopen file in "append" mode on Machine M1. FSDataOutputStream out = fs.append(p); // c. On Machine M2, reopen file in "append" mode. This should fail. try { AppendTestUtil.createHdfsWithDifferentUsername(conf).append(p); fail("This should fail."); } catch (IOException ioe) { AppendTestUtil.LOG.info("GOOD: got an exception", ioe); } // d. On Machine M1, close file. out.close(); }
/** * TC12: Append to partial CRC chunk * * @throws IOException an exception might be thrown */ public void testTC12() throws Exception { final Path p = new Path("/TC12/foo"); System.out.println("p=" + p); // a. Create file with a block size of 64KB // and a default io.bytes.per.checksum of 512 bytes. // Write 25687 bytes of data. Close file. final int len1 = 25687; { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } // b. Reopen file in "append" mode. Append another 5877 bytes of data. Close file. final int len2 = 5877; { FSDataOutputStream out = fs.append(p); AppendTestUtil.write(out, len1, len2); out.close(); } // c. Reopen file and read 25687+5877 bytes of data from file. Close file. AppendTestUtil.check(fs, p, len1 + len2); }
/** Test the updation of NeededReplications for the Appended Block */ @Test(timeout = 60000) public void testUpdateNeededReplicationsForAppendedFile() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); DistributedFileSystem fileSystem = null; try { // create a file. fileSystem = cluster.getFileSystem(); Path f = new Path("/testAppend"); FSDataOutputStream create = fileSystem.create(f, (short) 2); create.write("/testAppend".getBytes()); create.close(); // Append to the file. FSDataOutputStream append = fileSystem.append(f); append.write("/testAppend".getBytes()); append.close(); // Start a new datanode cluster.startDataNodes(conf, 1, true, null, null); // Check for replications DFSTestUtil.waitReplication(fileSystem, f, (short) 2); } finally { if (null != fileSystem) { fileSystem.close(); } cluster.shutdown(); } }
/** * TC2: Append on non-block boundary. * * @throws IOException an exception might be thrown */ public void testTC2() throws Exception { final Path p = new Path("/TC2/foo"); System.out.println("p=" + p); // a. Create file with one and a half block of data. Close file. final int len1 = (int) (BLOCK_SIZE + BLOCK_SIZE / 2); { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } AppendTestUtil.check(fs, p, len1); // Reopen file to append quarter block of data. Close file. final int len2 = (int) BLOCK_SIZE / 4; { FSDataOutputStream out = fs.append(p); AppendTestUtil.write(out, len1, len2); out.close(); } // b. Reopen file and read 1.75 blocks of data. Close file. AppendTestUtil.check(fs, p, len1 + len2); }
/** * Test that an append with no locations fails with an exception showing insufficient locations. */ @Test(timeout = 60000) public void testAppendInsufficientLocations() throws Exception { Configuration conf = new Configuration(); // lower heartbeat interval for fast recognition of DN conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); DistributedFileSystem fileSystem = null; try { // create a file with replication 3 fileSystem = cluster.getFileSystem(); Path f = new Path("/testAppend"); FSDataOutputStream create = fileSystem.create(f, (short) 2); create.write("/testAppend".getBytes()); create.close(); // Check for replications DFSTestUtil.waitReplication(fileSystem, f, (short) 2); // Shut down all DNs that have the last block location for the file LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE); List<DataNode> dnsOfCluster = cluster.getDataNodes(); DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations(); for (DataNode dn : dnsOfCluster) { for (DatanodeInfo loc : dnsWithLocations) { if (dn.getDatanodeId().equals(loc)) { dn.shutdown(); DFSTestUtil.waitForDatanodeDeath(dn); } } } // Wait till 0 replication is recognized DFSTestUtil.waitReplication(fileSystem, f, (short) 0); // Append to the file, at this state there are 3 live DNs but none of them // have the block. try { fileSystem.append(f); fail("Append should fail because insufficient locations"); } catch (IOException e) { LOG.info("Expected exception: ", e); } FSDirectory dir = cluster.getNamesystem().getFSDirectory(); final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend"); assertTrue("File should remain closed", !inode.isUnderConstruction()); } finally { if (null != fileSystem) { fileSystem.close(); } cluster.shutdown(); } }
/** * TC11: Racing rename * * @throws IOException an exception might be thrown */ public void testTC11() throws Exception { final Path p = new Path("/TC11/foo"); System.out.println("p=" + p); // a. Create file and write one block of data. Close file. final int len1 = (int) BLOCK_SIZE; { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } // b. Reopen file in "append" mode. Append half block of data. FSDataOutputStream out = fs.append(p); final int len2 = (int) BLOCK_SIZE / 2; AppendTestUtil.write(out, len1, len2); out.hflush(); // c. Rename file to file.new. final Path pnew = new Path(p + ".new"); assertTrue(fs.rename(p, pnew)); // d. Close file handle that was opened in (b). try { out.close(); fail("close() should throw an exception"); } catch (Exception e) { AppendTestUtil.LOG.info("GOOD!", e); } // wait for the lease recovery cluster.setLeasePeriod(1000, 1000); AppendTestUtil.sleep(5000); // check block sizes final long len = fs.getFileStatus(pnew).getLen(); final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(pnew.toString(), 0L, len); final int numblock = locatedblocks.locatedBlockCount(); for (int i = 0; i < numblock; i++) { final LocatedBlock lb = locatedblocks.get(i); final Block blk = lb.getBlock(); final long size = lb.getBlockSize(); if (i < numblock - 1) { assertEquals(BLOCK_SIZE, size); } for (DatanodeInfo datanodeinfo : lb.getLocations()) { final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort()); final Block metainfo = dn.data.getStoredBlock(blk.getBlockId()); assertEquals(size, metainfo.getNumBytes()); } } }
@Test public void testAppend() throws Exception { final Configuration conf = new HdfsConfiguration(); final short REPLICATION = (short) 3; final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { final DistributedFileSystem fs = cluster.getFileSystem(); final Path f = new Path(DIR, "testAppend"); { LOG.info("create an empty file " + f); fs.create(f, REPLICATION).close(); final FileStatus status = fs.getFileStatus(f); Assert.assertEquals(REPLICATION, status.getReplication()); Assert.assertEquals(0L, status.getLen()); } final byte[] bytes = new byte[1000]; { LOG.info("append " + bytes.length + " bytes to " + f); final FSDataOutputStream out = fs.append(f); out.write(bytes); out.close(); final FileStatus status = fs.getFileStatus(f); Assert.assertEquals(REPLICATION, status.getReplication()); Assert.assertEquals(bytes.length, status.getLen()); } { LOG.info("append another " + bytes.length + " bytes to " + f); try { final FSDataOutputStream out = fs.append(f); out.write(bytes); out.close(); Assert.fail(); } catch (IOException ioe) { LOG.info("This exception is expected", ioe); } } } finally { if (cluster != null) { cluster.shutdown(); } } }
/** * TC7: Corrupted replicas are present. * * @throws IOException an exception might be thrown */ public void testTC7() throws Exception { final short repl = 2; final Path p = new Path("/TC7/foo"); System.out.println("p=" + p); // a. Create file with replication factor of 2. Write half block of data. Close file. final int len1 = (int) (BLOCK_SIZE / 2); { FSDataOutputStream out = fs.create(p, false, buffersize, repl, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } DFSTestUtil.waitReplication(fs, p, repl); // b. Log into one datanode that has one replica of this block. // Find the block file on this datanode and truncate it to zero size. final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(p.toString(), 0L, len1); assertEquals(1, locatedblocks.locatedBlockCount()); final LocatedBlock lb = locatedblocks.get(0); final Block blk = lb.getBlock(); assertEquals(len1, lb.getBlockSize()); DatanodeInfo[] datanodeinfos = lb.getLocations(); assertEquals(repl, datanodeinfos.length); final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort()); final FSDataset data = (FSDataset) dn.getFSDataset(); final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw"); AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")"); assertEquals(len1, raf.length()); raf.setLength(0); raf.close(); // c. Open file in "append mode". Append a new block worth of data. Close file. final int len2 = (int) BLOCK_SIZE; { FSDataOutputStream out = fs.append(p); AppendTestUtil.write(out, len1, len2); out.close(); } // d. Reopen file and read two blocks worth of data. AppendTestUtil.check(fs, p, len1 + len2); }
@Test public void testBestEffort() throws Exception { final Configuration conf = new HdfsConfiguration(); // always replace a datanode but do not throw exception ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { final DistributedFileSystem fs = cluster.getFileSystem(); final Path f = new Path(DIR, "testIgnoreReplaceFailure"); final byte[] bytes = new byte[1000]; { LOG.info("write " + bytes.length + " bytes to " + f); final FSDataOutputStream out = fs.create(f, REPLICATION); out.write(bytes); out.close(); final FileStatus status = fs.getFileStatus(f); Assert.assertEquals(REPLICATION, status.getReplication()); Assert.assertEquals(bytes.length, status.getLen()); } { LOG.info("append another " + bytes.length + " bytes to " + f); final FSDataOutputStream out = fs.append(f); out.write(bytes); out.close(); } } finally { if (cluster != null) { cluster.shutdown(); } } }
/** * The following test first creates a file with a few blocks. It randomly truncates the replica of * the last block stored in each datanode. Finally, it triggers block synchronization to * synchronize all stored block. */ public void testBlockSynchronization() throws Exception { final int ORG_FILE_SIZE = 3000; Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); cluster.waitActive(); // create a file DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); assertTrue(dfs.exists(filepath)); DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM); // get block info for the last block LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(), filestr); DatanodeInfo[] datanodeinfos = locatedblock.getLocations(); assertEquals(REPLICATION_NUM, datanodeinfos.length); // connect to data nodes DataNode[] datanodes = new DataNode[REPLICATION_NUM]; for (int i = 0; i < REPLICATION_NUM; i++) { datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort()); assertTrue(datanodes[i] != null); } // verify Block Info ExtendedBlock lastblock = locatedblock.getBlock(); DataNode.LOG.info("newblocks=" + lastblock); for (int i = 0; i < REPLICATION_NUM; i++) { checkMetaInfo(lastblock, datanodes[i]); } DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName); // expire lease to trigger block recovery. waitLeaseRecovery(cluster); Block[] updatedmetainfo = new Block[REPLICATION_NUM]; long oldSize = lastblock.getNumBytes(); lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(), filestr).getBlock(); long currentGS = lastblock.getGenerationStamp(); for (int i = 0; i < REPLICATION_NUM; i++) { updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]) .getStoredBlock(lastblock.getBlockPoolId(), lastblock.getBlockId()); assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId()); assertEquals(oldSize, updatedmetainfo[i].getNumBytes()); assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp()); } // verify that lease recovery does not occur when namenode is in safemode System.out.println("Testing that lease recovery cannot happen during safemode."); filestr = "/foo.safemode"; filepath = new Path(filestr); dfs.create(filepath, (short) 1); cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); assertTrue(dfs.dfs.exists(filestr)); DFSTestUtil.waitReplication(dfs, filepath, (short) 1); waitLeaseRecovery(cluster); // verify that we still cannot recover the lease LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem()); assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1); cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); } finally { if (cluster != null) { cluster.shutdown(); } } }
public void testBlockSynchronization() throws Exception { final long softLease = 1000; final long hardLease = 60 * 60 * 1000; final short repl = 3; final Configuration conf = new Configuration(); final int bufferSize = conf.getInt("io.file.buffer.size", 4096); conf.setLong("dfs.block.size", BLOCK_SIZE); conf.setInt("dfs.heartbeat.interval", 1); // conf.setInt("io.bytes.per.checksum", 16); MiniDFSCluster cluster = null; byte[] actual = new byte[FILE_SIZE]; try { cluster = new MiniDFSCluster(conf, 5, true, null); cluster.waitActive(); // create a file DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); // create a random file name String filestr = "/foo" + AppendTestUtil.nextInt(); System.out.println("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.create(filepath, true, bufferSize, repl, BLOCK_SIZE); assertTrue(dfs.dfs.exists(filestr)); // write random number of bytes into it. int size = AppendTestUtil.nextInt(FILE_SIZE); System.out.println("size=" + size); stm.write(buffer, 0, size); // sync file AppendTestUtil.LOG.info("sync"); stm.sync(); AppendTestUtil.LOG.info("leasechecker.interrupt()"); dfs.dfs.leaseChecker.interrupt(); // set the soft limit to be 1 second so that the // namenode triggers lease recovery on next attempt to write-for-open. cluster.setLeasePeriod(softLease, hardLease); // try to re-open the file before closing the previous handle. This // should fail but will trigger lease recovery. { Configuration conf2 = new Configuration(conf); String username = UserGroupInformation.getCurrentUGI().getUserName() + "_1"; UnixUserGroupInformation.saveToConf( conf2, UnixUserGroupInformation.UGI_PROPERTY_NAME, new UnixUserGroupInformation(username, new String[] {"supergroup"})); FileSystem dfs2 = FileSystem.get(conf2); boolean done = false; for (int i = 0; i < 10 && !done; i++) { AppendTestUtil.LOG.info("i=" + i); try { dfs2.create(filepath, false, bufferSize, repl, BLOCK_SIZE); fail("Creation of an existing file should never succeed."); } catch (IOException ioe) { final String message = ioe.getMessage(); if (message.contains("file exists")) { AppendTestUtil.LOG.info("done", ioe); done = true; } else if (message.contains(AlreadyBeingCreatedException.class.getSimpleName())) { AppendTestUtil.LOG.info("GOOD! got " + message); } else { AppendTestUtil.LOG.warn("UNEXPECTED IOException", ioe); } } if (!done) { AppendTestUtil.LOG.info("sleep " + 5000 + "ms"); try { Thread.sleep(5000); } catch (InterruptedException e) { } } } assertTrue(done); } AppendTestUtil.LOG.info( "Lease for file " + filepath + " is recovered. " + "Validating its contents now..."); // verify that file-size matches assertTrue( "File should be " + size + " bytes, but is actually " + " found to be " + dfs.getFileStatus(filepath).getLen() + " bytes", dfs.getFileStatus(filepath).getLen() == size); // verify that there is enough data to read. System.out.println("File size is good. Now validating sizes from datanodes..."); FSDataInputStream stmin = dfs.open(filepath); stmin.readFully(0, actual, 0, size); stmin.close(); } finally { try { if (cluster != null) { cluster.shutdown(); } } catch (Exception e) { // ignore } } }
SlowWriter(DistributedFileSystem fs, Path filepath, final long sleepms) throws IOException { super(SlowWriter.class.getSimpleName() + ":" + filepath); this.filepath = filepath; this.out = (HdfsDataOutputStream) fs.create(filepath, REPLICATION); this.sleepms = sleepms; }