@Override
  protected void setUp() throws Exception {
    super.setUp();
    System.setProperty(HDFSStoreImpl.ALLOW_STANDALONE_HDFS_FILESYSTEM_PROP, "true");

    // This is logged by HDFS when it is stopped.
    TestUtils.addExpectedException("sleep interrupted");
    TestUtils.addExpectedException("java.io.InterruptedIOException");

    testDataDir = new Path("test-case");

    cache = createCache();

    configureHdfsStoreFactory();
    hdfsStore = (HDFSStoreImpl) hsf.create(HDFS_STORE_NAME);

    regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION_HDFS);
    regionfactory.setHDFSStoreName(HDFS_STORE_NAME);
    region = regionfactory.create(getName());

    // disable compaction by default and clear existing queues
    HDFSCompactionManager compactionManager = HDFSCompactionManager.getInstance(hdfsStore);
    compactionManager.reset();

    director = HDFSRegionDirector.getInstance();
    director.setCache(cache);
    regionManager = ((LocalRegion) region).getHdfsRegionManager();
    stats = director.getHdfsRegionStats("/" + getName());
    storeStats = hdfsStore.getStats();
    blockCache = hdfsStore.getBlockCache();
    AbstractHoplogOrganizer.JUNIT_TEST_RUN = true;
  }
  @Override
  protected void tearDown() throws Exception {
    if (region != null) {
      region.destroyRegion();
    }

    if (hdfsStore != null) {
      hdfsStore.getFileSystem().delete(testDataDir, true);
      hdfsStore.destroy();
    }

    if (cache != null) {
      cache.close();
    }
    super.tearDown();
    TestUtils.removeExpectedException("sleep interrupted");
    TestUtils.removeExpectedException("java.io.InterruptedIOException");
  }