private void corruptBlock( MiniDFSCluster cluster, FileSystem fs, final Path fileName, int dnIndex, Block block) throws IOException { // corrupt the block on datanode dnIndex // the indexes change once the nodes are restarted. // But the datadirectory will not change assertTrue(cluster.corruptReplica(block.getBlockName(), dnIndex)); DataNodeProperties dnProps = cluster.stopDataNode(0); // Each datanode has multiple data dirs, check each for (int dn = dnIndex * 2; dn < dnIndex * 2 + 2; dn++) { File dataDir = new File(MiniDFSCluster.getBaseDirectory() + "data"); File scanLogFile = new File( dataDir, "data" + (dn + 1) + MiniDFSCluster.FINALIZED_DIR_NAME + "dncp_block_verification.log.curr"); if (scanLogFile.exists()) { // wait for one minute for deletion to succeed; for (int i = 0; !scanLogFile.delete(); i++) { assertTrue("Could not delete log file in one minute", i < 60); try { Thread.sleep(1000); } catch (InterruptedException ignored) { } } } } // restart the detained so the corrupt replica will be detected cluster.restartDataNode(dnProps); }
public class TestBestEffortLongFile { private static final File FILE = new File(MiniDFSCluster.getBaseDirectory() + File.separatorChar + "TestBestEffortLongFile"); @Before public void cleanup() { if (FILE.exists()) { assertTrue(FILE.delete()); } FILE.getParentFile().mkdirs(); } @Test public void testGetSet() throws IOException { BestEffortLongFile f = new BestEffortLongFile(FILE, 12345L); try { // Before the file exists, should return default. assertEquals(12345L, f.get()); // And first access should open it. assertTrue(FILE.exists()); Random r = new Random(); for (int i = 0; i < 100; i++) { long newVal = r.nextLong(); // Changing the value should be reflected in the next get() call. f.set(newVal); assertEquals(newVal, f.get()); // And should be reflected in a new instance (ie it actually got // written to the file) BestEffortLongFile f2 = new BestEffortLongFile(FILE, 999L); try { assertEquals(newVal, f2.get()); } finally { IOUtils.closeStream(f2); } } } finally { IOUtils.closeStream(f); } } @Test public void testTruncatedFileReturnsDefault() throws IOException { assertTrue(FILE.createNewFile()); assertEquals(0, FILE.length()); BestEffortLongFile f = new BestEffortLongFile(FILE, 12345L); try { assertEquals(12345L, f.get()); } finally { f.close(); } } }
/** Test that FSNamesystem#clear clears all leases. */ @Test public void testFSNamespaceClearLeases() throws Exception { Configuration conf = new HdfsConfiguration(); File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); conf.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); LeaseManager leaseMan = fsn.getLeaseManager(); leaseMan.addLease("client1", "importantFile"); assertEquals(1, leaseMan.countLease()); fsn.clear(); leaseMan = fsn.getLeaseManager(); assertEquals(0, leaseMan.countLease()); }
@Before public void setup() throws Exception { File editsDir = new File(MiniDFSCluster.getBaseDirectory(null) + File.separator + "TestJournalNode"); FileUtil.fullyDelete(editsDir); conf.set(JournalConfigKeys.DFS_JOURNALNODE_DIR_KEY, editsDir.getAbsolutePath()); conf.set(JournalConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "0.0.0.0:0"); int port = MiniJournalCluster.getFreeHttpPortAndUpdateConf(conf, true); httpAddress = "http://localhost:" + port; jn = new JournalNode(); jn.setConf(conf); jn.start(); journalId = "test-journalid-" + QJMTestUtil.uniqueSequenceId(); journal = jn.getOrCreateJournal(QuorumJournalManager.journalIdStringToBytes(journalId)); journal.transitionJournal(FAKE_NSINFO, Transition.FORMAT, null); journal.transitionImage(FAKE_NSINFO, Transition.FORMAT, null); }
private MiniJournalCluster(Builder b) throws IOException { LOG.info("Starting MiniJournalCluster with " + b.numJournalNodes + " journal nodes"); if (b.baseDir != null) { this.baseDir = new File(b.baseDir); } else { this.baseDir = new File(MiniDFSCluster.getBaseDirectory()); } nodes = new JNInfo[b.numJournalNodes]; for (int i = 0; i < b.numJournalNodes; i++) { if (b.format) { File dir = getStorageDir(i); LOG.debug("Fully deleting JN directory " + dir); FileUtil.fullyDelete(dir); } JournalNode jn = new JournalNode(); jn.setConf(createConfForNode(b, i)); jn.start(); nodes[i] = new JNInfo(jn); } }
@After public void cleanUp() { FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory())); }