コード例 #1
0
  /** test savenamespace in the middle of a checkpoint */
  @Test
  public void testCheckpointWithSavenamespace() throws Exception {
    Configuration conf = getConf();
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    cluster.waitActive();
    FSNamesystem fsn = FSNamesystem.getFSNamesystem();

    // Replace the FSImage with a spy
    final FSImage originalImage = fsn.dir.fsImage;

    try {
      doAnEdit(fsn, 1);
      CheckpointSignature sig = fsn.rollEditLog();
      LOG.warn("Checkpoint signature: " + sig);

      // Do another edit
      doAnEdit(fsn, 2);

      // Save namespace
      fsn.saveNamespace(true, false);

      // try to do a rollFSImage, this should fail because the
      // saveNamespace have already occured after the call to
      // rollFSEdit
      try {
        fsn.rollFSImage(sig);
        assertTrue(
            "The rollFSImage immediately folloing the saveName " + " command should fail. ", false);
      } catch (IOException e) {
        LOG.info(
            "Expected exception while invoking rollFSImage "
                + " after a successful call to saveNamespace."
                + e);
      }

      // Now shut down and restart the NN
      originalImage.close();
      fsn.close();
      cluster.shutdown();
      fsn = null;

      // Start a new namesystem, which should be able to recover
      // the namespace from the previous incarnation.
      cluster = new MiniDFSCluster(conf, 1, false, null);
      cluster.waitActive();
      fsn = FSNamesystem.getFSNamesystem();

      // Make sure the image loaded including our edits.
      checkEditExists(cluster, 1);
      checkEditExists(cluster, 2);
    } finally {
      if (fsn != null) {
        fsn.close();
        cluster.shutdown();
      }
    }
  }
コード例 #2
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
  public void testMapCount() throws Exception {
    String namenode = null;
    MiniDFSCluster dfs = null;
    MiniDFSCluster mr = null;
    try {
      Configuration conf = new Configuration();

      dfs = new MiniDFSCluster.Builder(conf).numDataNodes(3).format(true).build();

      FileSystem fs = dfs.getFileSystem();
      final FsShell shell = new FsShell(conf);
      namenode = fs.getUri().toString();
      MyFile[] files = createFiles(fs.getUri(), "/srcdat");
      long totsize = 0;
      for (MyFile f : files) {
        totsize += f.getSize();
      }

      Configuration job = new JobConf(conf);
      job.setLong("distcp.bytes.per.map", totsize / 3);
      ToolRunner.run(
          new DistCpV1(job),
          new String[] {
            "-m", "100", "-log", namenode + "/logs", namenode + "/srcdat", namenode + "/destdat"
          });
      assertTrue(
          "Source and destination directories do not match.", checkFiles(fs, "/destdat", files));

      String logdir = namenode + "/logs";
      System.out.println(execCmd(shell, "-lsr", logdir));
      FileStatus[] logs = fs.listStatus(new Path(logdir));
      // rare case where splits are exact, logs.length can be 4
      assertTrue(logs.length == 2);

      deldir(fs, "/destdat");
      deldir(fs, "/logs");
      ToolRunner.run(
          new DistCpV1(job),
          new String[] {
            "-m", "1", "-log", namenode + "/logs", namenode + "/srcdat", namenode + "/destdat"
          });

      System.out.println(execCmd(shell, "-lsr", logdir));
      logs = fs.globStatus(new Path(namenode + "/logs/part*"));
      assertTrue("Unexpected map count, logs.length=" + logs.length, logs.length == 1);
    } finally {
      if (dfs != null) {
        dfs.shutdown();
      }
      if (mr != null) {
        mr.shutdown();
      }
    }
  }
コード例 #3
0
  private void testSaveWhileEditsRolled(boolean dosafemode, boolean force, boolean uncompressed)
      throws Exception {
    Configuration conf = getConf();
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    cluster.waitActive();
    FSNamesystem fsn = FSNamesystem.getFSNamesystem();

    // Replace the FSImage with a spy
    FSImage originalImage = fsn.dir.fsImage;
    FSImage spyImage = spy(originalImage);
    spyImage.setStorageDirectories(
        FSNamesystem.getNamespaceDirs(conf), FSNamesystem.getNamespaceEditsDirs(conf));
    fsn.dir.fsImage = spyImage;

    try {
      doAnEdit(fsn, 1);
      CheckpointSignature sig = fsn.rollEditLog();
      LOG.warn("Checkpoint signature: " + sig);
      // Do another edit
      doAnEdit(fsn, 2);

      // Save namespace
      if (dosafemode) {
        fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      }
      fsn.saveNamespace(force, uncompressed);

      // Now shut down and restart the NN
      originalImage.close();
      originalImage = null;
      fsn.close();
      fsn = null;
      cluster.shutdown();

      // Start a new namesystem, which should be able to recover
      // the namespace from the previous incarnation.
      cluster = new MiniDFSCluster(conf, 1, false, null);
      cluster.waitActive();
      fsn = FSNamesystem.getFSNamesystem();

      // Make sure the image loaded including our edits.
      checkEditExists(cluster, 1);
      checkEditExists(cluster, 2);
    } finally {
      if (originalImage != null) {
        originalImage.close();
      }
      if (fsn != null) {
        fsn.close();
        cluster.shutdown();
      }
    }
  }
コード例 #4
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
 /** tests basedir option copying files from dfs file system to dfs file system */
 public void testBasedir() throws Exception {
   String namenode = null;
   MiniDFSCluster cluster = null;
   try {
     Configuration conf = new Configuration();
     cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem hdfs = cluster.getFileSystem();
     namenode = FileSystem.getDefaultUri(conf).toString();
     if (namenode.startsWith("hdfs://")) {
       MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat");
       ToolRunner.run(
           new DistCpV1(conf),
           new String[] {
             "-basedir", "/basedir", namenode + "/basedir/middle/srcdat", namenode + "/destdat"
           });
       assertTrue(
           "Source and destination directories do not match.",
           checkFiles(hdfs, "/destdat/middle/srcdat", files));
       deldir(hdfs, "/destdat");
       deldir(hdfs, "/basedir");
       deldir(hdfs, "/logs");
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
コード例 #5
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
 /** copy files from dfs file system to local file system */
 public void testCopyFromDfsToLocal() throws Exception {
   MiniDFSCluster cluster = null;
   try {
     Configuration conf = new Configuration();
     final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
     cluster = new MiniDFSCluster(conf, 1, true, null);
     final FileSystem hdfs = cluster.getFileSystem();
     final String namenode = FileSystem.getDefaultUri(conf).toString();
     if (namenode.startsWith("hdfs://")) {
       MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
       ToolRunner.run(
           new DistCpV1(conf),
           new String[] {
             "-log", "/logs", namenode + "/srcdat", "file:///" + TEST_ROOT_DIR + "/destdat"
           });
       assertTrue(
           "Source and destination directories do not match.",
           checkFiles(localfs, TEST_ROOT_DIR + "/destdat", files));
       assertTrue("Log directory does not exist.", hdfs.exists(new Path("/logs")));
       deldir(localfs, TEST_ROOT_DIR + "/destdat");
       deldir(hdfs, "/logs");
       deldir(hdfs, "/srcdat");
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
コード例 #6
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
  /** copy empty directory on dfs file system */
  public void testEmptyDir() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
      final FileSystem hdfs = cluster.getFileSystem();
      namenode = FileSystem.getDefaultUri(conf).toString();
      if (namenode.startsWith("hdfs://")) {

        FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
        fs.mkdirs(new Path("/empty"));

        ToolRunner.run(
            new DistCpV1(conf),
            new String[] {"-log", namenode + "/logs", namenode + "/empty", namenode + "/dest"});
        fs = FileSystem.get(URI.create(namenode + "/destdat"), conf);
        assertTrue(
            "Destination directory does not exist.", fs.exists(new Path(namenode + "/dest")));
        deldir(hdfs, "/dest");
        deldir(hdfs, "/empty");
        deldir(hdfs, "/logs");
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
コード例 #7
0
  /**
   * The corrupt block has to be removed when the number of valid replicas matches replication
   * factor for the file. In this test, the above condition is achieved by increasing the number of
   * good replicas by replicating on a new Datanode. The test strategy : Bring up Cluster with 3
   * DataNodes Create a file of replication factor 3 Corrupt one replica of a block of the file
   * Verify that there are still 2 good replicas and 1 corrupt replica (corrupt replica should not
   * be removed since number of good replicas (2) is less than replication factor (3)) Start a new
   * data node Verify that the a new replica is created and corrupt replica is removed.
   */
  @Test
  public void testByAddingAnExtraDataNode() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    FileSystem fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();
    DataNodeProperties dnPropsFourth = cluster.stopDataNode(3);

    try {
      final Path fileName = new Path("/foo1");
      DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
      DFSTestUtil.waitReplication(fs, fileName, (short) 3);

      Block block = DFSTestUtil.getFirstBlock(fs, fileName);
      corruptBlock(cluster, fs, fileName, 0, block);

      DFSTestUtil.waitReplication(fs, fileName, (short) 2);

      assertEquals(2, namesystem.blockManager.countNodes(block).liveReplicas());
      assertEquals(1, namesystem.blockManager.countNodes(block).corruptReplicas());

      cluster.restartDataNode(dnPropsFourth);

      DFSTestUtil.waitReplication(fs, fileName, (short) 3);

      assertEquals(3, namesystem.blockManager.countNodes(block).liveReplicas());
      assertEquals(0, namesystem.blockManager.countNodes(block).corruptReplicas());
    } finally {
      cluster.shutdown();
    }
  }
コード例 #8
0
 @After
 public void tearDown() throws Exception {
   IOUtils.cleanup(null, dfs);
   if (cluster != null) {
     cluster.shutdown();
   }
 }
コード例 #9
0
 @After
 public void tearDown() throws Exception {
   if (cluster != null) {
     cluster.shutdown();
     cluster = null;
   }
 }
コード例 #10
0
 @AfterClass
 public static void shutdown() throws Exception {
   IOUtils.cleanup(null, hdfs, hdfsAsUser1, hdfsAsUser2);
   if (cluster != null) {
     cluster.shutdown();
   }
 }
コード例 #11
0
 @AfterClass
 public static void afterClass() throws Exception {
   if (cluster == null) return;
   FileSystem fs = cluster.getFileSystem();
   bench.cleanup(fs);
   cluster.shutdown();
 }
コード例 #12
0
  @After
  public void shutDownCluster() throws Exception {

    // Dump all RamDisk JMX metrics before shutdown the cluster
    printRamDiskJMXMetrics();

    if (fs != null) {
      fs.close();
      fs = null;
      client = null;
    }

    if (cluster != null) {
      cluster.shutdownDataNodes();
      cluster.shutdown();
      cluster = null;
    }

    if (jmx != null) {
      jmx = null;
    }

    IOUtils.closeQuietly(sockDir);
    sockDir = null;
  }
コード例 #13
0
  /**
   * Test if fsck can return -1 in case of failure
   *
   * @throws Exception
   */
  public void testFsckError() throws Exception {
    MiniDFSCluster cluster = null;
    try {
      // bring up a one-node cluster
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 1, true, null);
      String fileName = "/test.txt";
      Path filePath = new Path(fileName);
      FileSystem fs = cluster.getFileSystem();

      // create a one-block file
      DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
      DFSTestUtil.waitReplication(fs, filePath, (short) 1);

      // intentionally corrupt NN data structure
      INodeFile node = (INodeFile) cluster.getNameNode().namesystem.dir.rootDir.getNode(fileName);
      assertEquals(node.blocks.length, 1);
      node.blocks[0].setNumBytes(-1L); // set the block length to be negative

      // run fsck and expect a failure with -1 as the error code
      String outStr = runFsck(conf, -1, true, fileName);
      System.out.println(outStr);
      assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));

      // clean up file system
      fs.delete(filePath, true);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
コード例 #14
0
 public void testFsckNonExistent() throws Exception {
   DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8 * 1024);
   MiniDFSCluster cluster = null;
   FileSystem fs = null;
   try {
     Configuration conf = new Configuration();
     conf.setLong("dfs.blockreport.intervalMsec", 10000L);
     cluster = new MiniDFSCluster(conf, 4, true, null);
     fs = cluster.getFileSystem();
     util.createFiles(fs, "/srcdat");
     util.waitReplication(fs, "/srcdat", (short) 3);
     String outStr = runFsck(conf, 0, true, "/non-existent");
     assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
     System.out.println(outStr);
     util.cleanup(fs, "/srcdat");
   } finally {
     if (fs != null) {
       try {
         fs.close();
       } catch (Exception e) {
       }
     }
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
コード例 #15
0
 /**
  * Test that {@link PlacementMonitor} can choose a correct datanode
  *
  * @throws Exception
  */
 @Test
 public void testChooseDatanode() throws Exception {
   setupCluster();
   try {
     Set<DatanodeInfo> excluded = new HashSet<DatanodeInfo>();
     for (int i = 0; i < 3; ++i) {
       excluded.add(datanodes[i]);
     }
     final int NUM_TESTS = 10;
     for (int i = 0; i < NUM_TESTS; ) {
       DatanodeInfo target = blockMover.cluster.getRandomNode(excluded);
       if (target == null) {
         continue;
       }
       Assert.assertFalse(excluded.contains(target));
       ++i;
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
     if (placementMonitor != null) {
       placementMonitor.stop();
     }
   }
 }
コード例 #16
0
  /* fill up a cluster with <code>numNodes</code> datanodes
   * whose used space to be <code>size</code>
   */
  private Block[] generateBlocks(long size, short numNodes) throws IOException {
    cluster = new MiniDFSCluster(CONF, numNodes, true, null);
    try {
      cluster.waitActive();
      client = DFSClient.createNamenode(CONF);

      short replicationFactor = (short) (numNodes - 1);
      long fileLen = size / replicationFactor;
      createFile(fileLen, replicationFactor);

      List<LocatedBlock> locatedBlocks =
          client.getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();

      int numOfBlocks = locatedBlocks.size();
      Block[] blocks = new Block[numOfBlocks];
      for (int i = 0; i < numOfBlocks; i++) {
        Block b = locatedBlocks.get(i).getBlock();
        blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
      }

      return blocks;
    } finally {
      cluster.shutdown();
    }
  }
コード例 #17
0
  /* This test start a one-node cluster, fill the node to be 30% full;
   * It then adds an empty node and start balancing.
   * @param newCapacity new node's capacity
   * @param new
   */
  private void test(long[] capacities, String[] racks, long newCapacity, String newRack)
      throws Exception {
    int numOfDatanodes = capacities.length;
    assertEquals(numOfDatanodes, racks.length);
    cluster = new MiniDFSCluster(0, CONF, capacities.length, true, true, null, racks, capacities);
    try {
      cluster.waitActive();
      client = DFSClient.createNamenode(CONF);

      long totalCapacity = 0L;
      for (long capacity : capacities) {
        totalCapacity += capacity;
      }
      // fill up the cluster to be 30% full
      long totalUsedSpace = totalCapacity * 3 / 10;
      createFile(totalUsedSpace / numOfDatanodes, (short) numOfDatanodes);
      // start up an empty node with the same capacity and on the same rack
      cluster.startDataNodes(CONF, 1, true, null, new String[] {newRack}, new long[] {newCapacity});

      totalCapacity += newCapacity;

      // run balancer and validate results
      runBalancer(totalUsedSpace, totalCapacity);
    } finally {
      cluster.shutdown();
    }
  }
コード例 #18
0
ファイル: TestStorageRestore.java プロジェクト: Shmuma/hadoop
  /**
   * test 1. create DFS cluster with 3 storage directories - 2 EDITS_IMAGE, 1 EDITS 2. create a
   * cluster and write a file 3. corrupt/disable one storage (or two) by removing 4. run
   * doCheckpoint - it will fail on removed dirs (which will invalidate the storages) 5. write
   * another file 6. check that edits and fsimage differ 7. run doCheckpoint 8. verify that all the
   * image and edits files are the same.
   */
  @Test
  public void testStorageRestore() throws Exception {
    int numDatanodes = 2;
    cluster =
        new MiniDFSCluster(0, config, numDatanodes, true, false, true, null, null, null, null);
    cluster.waitActive();

    SecondaryNameNode secondary = new SecondaryNameNode(config);

    FileSystem fs = cluster.getFileSystem();
    Path path = new Path("/", "test");
    writeFile(fs, path, 2);

    invalidateStorage(cluster.getNameNode().getFSImage());

    path = new Path("/", "test1");
    writeFile(fs, path, 2);

    checkFiles(false);

    secondary.doCheckpoint();

    checkFiles(true);
    secondary.shutdown();
    cluster.shutdown();
  }
コード例 #19
0
 @AfterClass
 public static void shutdown() throws Exception {
   IOUtils.cleanup(null, hdfs, fsAsBruce, fsAsDiana);
   if (cluster != null) {
     cluster.shutdown();
   }
 }
コード例 #20
0
 @AfterClass
 public static void tearDown() throws Exception {
   if (mrCluster != null) mrCluster.shutdown();
   mrCluster = null;
   if (dfsCluster != null) dfsCluster.shutdown();
   dfsCluster = null;
 }
コード例 #21
0
  /** Test DFS Raid */
  public void testBlockMissingException() throws Exception {
    LOG.info("Test testBlockMissingException started.");
    long blockSize = 1024L;
    int numBlocks = 4;
    conf = new Configuration();
    try {
      dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
      dfs.waitActive();
      fileSys = (DistributedFileSystem) dfs.getFileSystem();
      Path file1 = new Path("/user/dhruba/raidtest/file1");
      createOldFile(fileSys, file1, 1, numBlocks, blockSize);

      // extract block locations from File system. Wait till file is closed.
      LocatedBlocks locations = null;
      locations =
          fileSys.dfs.namenode.getBlockLocations(file1.toString(), 0, numBlocks * blockSize);
      // remove block of file
      LOG.info("Remove first block of file");
      corruptBlock(file1, locations.get(0).getBlock());

      // validate that the system throws BlockMissingException
      validateFile(fileSys, file1);
    } finally {
      if (fileSys != null) fileSys.close();
      if (dfs != null) dfs.shutdown();
    }
    LOG.info("Test testBlockMissingException completed.");
  }
コード例 #22
0
 @Test(timeout = 120000)
 public void testSeekAfterSetDropBehind() throws Exception {
   // start a cluster
   LOG.info("testSeekAfterSetDropBehind");
   Configuration conf = new HdfsConfiguration();
   MiniDFSCluster cluster = null;
   String TEST_PATH = "/test";
   int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
   try {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
     // verify that we can seek after setDropBehind
     FSDataInputStream fis = fs.open(new Path(TEST_PATH));
     try {
       Assert.assertTrue(fis.read() != -1); // create BlockReader
       fis.setDropBehind(false); // clear BlockReader
       fis.seek(2); // seek
     } finally {
       fis.close();
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
コード例 #23
0
  /** do fsck */
  public void testFsck() throws Exception {
    DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8 * 1024);
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      Configuration conf = new Configuration();
      conf.setLong("dfs.blockreport.intervalMsec", 10000L);
      cluster = new MiniDFSCluster(conf, 4, true, null);
      fs = cluster.getFileSystem();
      util.createFiles(fs, "/srcdat");
      util.waitReplication(fs, "/srcdat", (short) 3);
      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      System.out.println(outStr);
      if (fs != null) {
        try {
          fs.close();
        } catch (Exception e) {
        }
      }
      cluster.shutdown();

      // restart the cluster; bring up namenode but not the data nodes
      cluster = new MiniDFSCluster(conf, 0, false, null);
      outStr = runFsck(conf, 1, true, "/");
      // expect the result is corrupt
      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
      System.out.println(outStr);

      // bring up data nodes & cleanup cluster
      cluster.startDataNodes(conf, 4, true, null, null);
      cluster.waitActive();
      cluster.waitClusterUp();
      fs = cluster.getFileSystem();
      util.cleanup(fs, "/srcdat");
    } finally {
      if (fs != null) {
        try {
          fs.close();
        } catch (Exception e) {
        }
      }
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
コード例 #24
0
 /**
  * To shutdown the minidfscluster in teardown phase.
  *
  * @throws IOException
  */
 @Override
 public void shutdown() throws IOException {
   if (mIsStarted) {
     mDfsClient.close();
     mDfsCluster.shutdown();
     mIsStarted = false;
   }
 }
コード例 #25
0
 @After
 public void tearDown() throws Exception {
   for (int i = 0; i < 3; i++) {
     FileUtil.setExecutable(new File(dataDir, "data" + (2 * i + 1)), true);
     FileUtil.setExecutable(new File(dataDir, "data" + (2 * i + 2)), true);
   }
   cluster.shutdown();
 }
コード例 #26
0
 protected void closeCluster() throws IOException {
   if (null != fs) {
     fs.close();
   }
   if (null != cluster) {
     cluster.shutdown();
   }
 }
コード例 #27
0
 @AfterClass
 public static void cleanUpClass() throws IOException {
   System.clearProperty("sdc.resources.dir");
   if (miniDFS != null) {
     miniDFS.shutdown();
     miniDFS = null;
   }
 }
コード例 #28
0
ファイル: TestHDFSCLI.java プロジェクト: nourlcn/yarn-comment
 @After
 @Override
 public void tearDown() throws Exception {
   if (null != fs) fs.close();
   dfsCluster.shutdown();
   Thread.sleep(2000);
   super.tearDown();
 }
コード例 #29
0
 public void myShutDown() throws Exception {
   if (fileSys != null) {
     fileSys.close();
   }
   if (dfs != null) {
     dfs.shutdown();
   }
 }
コード例 #30
0
ファイル: HDFSTest.java プロジェクト: lizhiyong/flink
 @After
 public void destroyHDFS() {
   try {
     hdfs.delete(hdPath, false);
     hdfsCluster.shutdown();
   } catch (IOException e) {
     throw new RuntimeException(e);
   }
 }