@Override protected void setUp() throws Exception { super.setUp(); System.setProperty(HDFSStoreImpl.ALLOW_STANDALONE_HDFS_FILESYSTEM_PROP, "true"); // This is logged by HDFS when it is stopped. TestUtils.addExpectedException("sleep interrupted"); TestUtils.addExpectedException("java.io.InterruptedIOException"); testDataDir = new Path("test-case"); cache = createCache(); configureHdfsStoreFactory(); hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME); regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS); regionfactory.setHDFSStoreName(HDFS_STORE_NAME); region = regionfactory.create(getName()); // disable compaction by default and clear existing queues HDFSCompactionManager compactionManager = HDFSCompactionManager.getInstance(hdfsStore); compactionManager.reset(); director = HDFSRegionDirector.getInstance(); director.setCache(cache); regionManager = ((LocalRegion) region).getHdfsRegionManager(); stats = director.getHdfsRegionStats("/" + getName()); storeStats = hdfsStore.getStats(); blockCache = hdfsStore.getBlockCache(); AbstractHoplogOrganizer.JUNIT_TEST_RUN = true; }
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception { System.setProperty("test.build.data", "hdfs-test-cluster"); Configuration hconf = new HdfsConfiguration(); for (Entry<String, String> entry : map.entrySet()) { hconf.set(entry.getKey(), entry.getValue()); } hconf.set("dfs.namenode.fs-limits.min-block-size", "1024"); Builder builder = new MiniDFSCluster.Builder(hconf); builder.numDataNodes(numDN); builder.nameNodePort(port); MiniDFSCluster cluster = builder.build(); return cluster; }