@BeforeClass
  public static void clusterSetupAtBegining()
      throws IOException, LoginException, URISyntaxException {
    SupportsBlocks = true;
    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

    cluster =
        new MiniDFSCluster.Builder(CONF)
            .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
            .numDataNodes(2)
            .build();
    cluster.waitClusterUp();

    fHdfs = cluster.getFileSystem(0);
    fHdfs2 = cluster.getFileSystem(1);
    fHdfs
        .getConf()
        .set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());
    fHdfs2
        .getConf()
        .set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());

    defaultWorkingDirectory =
        fHdfs.makeQualified(
            new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
    defaultWorkingDirectory2 =
        fHdfs2.makeQualified(
            new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));

    fHdfs.mkdirs(defaultWorkingDirectory);
    fHdfs2.mkdirs(defaultWorkingDirectory2);
  }
예제 #2
0
 /** copy files from local file system to dfs file system */
 public void testCopyFromLocalToDfs() throws Exception {
   MiniDFSCluster cluster = null;
   try {
     Configuration conf = new Configuration();
     cluster = new MiniDFSCluster(conf, 1, true, null);
     final FileSystem hdfs = cluster.getFileSystem();
     final String namenode = hdfs.getUri().toString();
     if (namenode.startsWith("hdfs://")) {
       MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR + "/srcdat");
       ToolRunner.run(
           new DistCpV1(conf),
           new String[] {
             "-log",
             namenode + "/logs",
             "file:///" + TEST_ROOT_DIR + "/srcdat",
             namenode + "/destdat"
           });
       assertTrue(
           "Source and destination directories do not match.",
           checkFiles(cluster.getFileSystem(), "/destdat", files));
       assertTrue("Log directory does not exist.", hdfs.exists(new Path(namenode + "/logs")));
       deldir(hdfs, "/destdat");
       deldir(hdfs, "/logs");
       deldir(FileSystem.get(LOCAL_FS, conf), TEST_ROOT_DIR + "/srcdat");
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
  @BeforeClass
  public static void setUp() throws Exception {

    Configuration conf = new Configuration();
    conf.set("hadoop.security.auth_to_local", "RULE:[2:$1]");
    dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
    jConf = new JobConf(conf);
    mrCluster =
        new MiniMRCluster(
            0,
            0,
            numSlaves,
            dfsCluster.getFileSystem().getUri().toString(),
            1,
            null,
            null,
            null,
            jConf);

    createTokenFileJson();
    verifySecretKeysInJSONFile();
    NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
    FileSystem fs = dfsCluster.getFileSystem();

    p1 = new Path("file1");
    p2 = new Path("file2");

    p1 = fs.makeQualified(p1);
  }
  @BeforeClass
  public static void clusterSetupAtBeginning() throws IOException {
    cluster =
        new MiniDFSCluster.Builder(clusterConf)
            .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
            .numDataNodes(2)
            .build();
    cluster.waitClusterUp();

    fHdfs = cluster.getFileSystem(0);
    fHdfs2 = cluster.getFileSystem(1);
  }
예제 #5
0
 @After
 public void cleanupMetaFolder() {
   Path meta = new Path("/meta");
   try {
     if (cluster.getFileSystem().exists(meta)) {
       cluster.getFileSystem().delete(meta, true);
       Assert.fail("Expected meta folder to be deleted");
     }
   } catch (IOException e) {
     LOG.error("Exception encountered while cleaning up folder", e);
     Assert.fail("Unable to clean up meta folder");
   }
 }
예제 #6
0
 /**
  * Initialize the cluster, wait for it to become active, and get FileSystem instances for our test
  * users.
  *
  * @param format if true, format the NameNode and DataNodes before starting up
  * @throws Exception if any step fails
  */
 private static void initCluster(boolean format) throws Exception {
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format).build();
   cluster.waitActive();
   hdfs = cluster.getFileSystem();
   fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf);
   fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf);
 }
예제 #7
0
  /**
   * test 1. create DFS cluster with 3 storage directories - 2 EDITS_IMAGE, 1 EDITS 2. create a
   * cluster and write a file 3. corrupt/disable one storage (or two) by removing 4. run
   * doCheckpoint - it will fail on removed dirs (which will invalidate the storages) 5. write
   * another file 6. check that edits and fsimage differ 7. run doCheckpoint 8. verify that all the
   * image and edits files are the same.
   */
  @Test
  public void testStorageRestore() throws Exception {
    int numDatanodes = 2;
    cluster =
        new MiniDFSCluster(0, config, numDatanodes, true, false, true, null, null, null, null);
    cluster.waitActive();

    SecondaryNameNode secondary = new SecondaryNameNode(config);

    FileSystem fs = cluster.getFileSystem();
    Path path = new Path("/", "test");
    writeFile(fs, path, 2);

    invalidateStorage(cluster.getNameNode().getFSImage());

    path = new Path("/", "test1");
    writeFile(fs, path, 2);

    checkFiles(false);

    secondary.doCheckpoint();

    checkFiles(true);
    secondary.shutdown();
    cluster.shutdown();
  }
예제 #8
0
 /** tests basedir option copying files from dfs file system to dfs file system */
 public void testBasedir() throws Exception {
   String namenode = null;
   MiniDFSCluster cluster = null;
   try {
     Configuration conf = new Configuration();
     cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem hdfs = cluster.getFileSystem();
     namenode = FileSystem.getDefaultUri(conf).toString();
     if (namenode.startsWith("hdfs://")) {
       MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat");
       ToolRunner.run(
           new DistCpV1(conf),
           new String[] {
             "-basedir", "/basedir", namenode + "/basedir/middle/srcdat", namenode + "/destdat"
           });
       assertTrue(
           "Source and destination directories do not match.",
           checkFiles(hdfs, "/destdat/middle/srcdat", files));
       deldir(hdfs, "/destdat");
       deldir(hdfs, "/basedir");
       deldir(hdfs, "/logs");
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
  /** Test DFS Raid */
  public void testBlockMissingException() throws Exception {
    LOG.info("Test testBlockMissingException started.");
    long blockSize = 1024L;
    int numBlocks = 4;
    conf = new Configuration();
    try {
      dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
      dfs.waitActive();
      fileSys = (DistributedFileSystem) dfs.getFileSystem();
      Path file1 = new Path("/user/dhruba/raidtest/file1");
      createOldFile(fileSys, file1, 1, numBlocks, blockSize);

      // extract block locations from File system. Wait till file is closed.
      LocatedBlocks locations = null;
      locations =
          fileSys.dfs.namenode.getBlockLocations(file1.toString(), 0, numBlocks * blockSize);
      // remove block of file
      LOG.info("Remove first block of file");
      corruptBlock(file1, locations.get(0).getBlock());

      // validate that the system throws BlockMissingException
      validateFile(fileSys, file1);
    } finally {
      if (fileSys != null) fileSys.close();
      if (dfs != null) dfs.shutdown();
    }
    LOG.info("Test testBlockMissingException completed.");
  }
예제 #10
0
  /** copy empty directory on dfs file system */
  public void testEmptyDir() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
      final FileSystem hdfs = cluster.getFileSystem();
      namenode = FileSystem.getDefaultUri(conf).toString();
      if (namenode.startsWith("hdfs://")) {

        FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
        fs.mkdirs(new Path("/empty"));

        ToolRunner.run(
            new DistCpV1(conf),
            new String[] {"-log", namenode + "/logs", namenode + "/empty", namenode + "/dest"});
        fs = FileSystem.get(URI.create(namenode + "/destdat"), conf);
        assertTrue(
            "Destination directory does not exist.", fs.exists(new Path(namenode + "/dest")));
        deldir(hdfs, "/dest");
        deldir(hdfs, "/empty");
        deldir(hdfs, "/logs");
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
예제 #11
0
  /**
   * The corrupt block has to be removed when the number of valid replicas matches replication
   * factor for the file. In this test, the above condition is achieved by increasing the number of
   * good replicas by replicating on a new Datanode. The test strategy : Bring up Cluster with 3
   * DataNodes Create a file of replication factor 3 Corrupt one replica of a block of the file
   * Verify that there are still 2 good replicas and 1 corrupt replica (corrupt replica should not
   * be removed since number of good replicas (2) is less than replication factor (3)) Start a new
   * data node Verify that the a new replica is created and corrupt replica is removed.
   */
  @Test
  public void testByAddingAnExtraDataNode() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    FileSystem fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();
    DataNodeProperties dnPropsFourth = cluster.stopDataNode(3);

    try {
      final Path fileName = new Path("/foo1");
      DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
      DFSTestUtil.waitReplication(fs, fileName, (short) 3);

      Block block = DFSTestUtil.getFirstBlock(fs, fileName);
      corruptBlock(cluster, fs, fileName, 0, block);

      DFSTestUtil.waitReplication(fs, fileName, (short) 2);

      assertEquals(2, namesystem.blockManager.countNodes(block).liveReplicas());
      assertEquals(1, namesystem.blockManager.countNodes(block).corruptReplicas());

      cluster.restartDataNode(dnPropsFourth);

      DFSTestUtil.waitReplication(fs, fileName, (short) 3);

      assertEquals(3, namesystem.blockManager.countNodes(block).liveReplicas());
      assertEquals(0, namesystem.blockManager.countNodes(block).corruptReplicas());
    } finally {
      cluster.shutdown();
    }
  }
예제 #12
0
  @Before
  @Override
  public void setUp() throws Exception {
    super.setUp();
    conf.setClass(
        PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class);

    // Many of the tests expect a replication value of 1 in the output
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

    // Build racks and hosts configuration to test dfsAdmin -printTopology
    String[] racks = {
      "/rack1", "/rack1", "/rack2", "/rack2",
      "/rack2", "/rack3", "/rack4", "/rack4"
    };
    String[] hosts = {
      "host1", "host2", "host3", "host4",
      "host5", "host6", "host7", "host8"
    };
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8).racks(racks).hosts(hosts).build();
    dfsCluster.waitClusterUp();
    namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

    username = System.getProperty("user.name");

    fs = dfsCluster.getFileSystem();
    assertTrue("Not a HDFS: " + fs.getUri(), fs instanceof DistributedFileSystem);
  }
예제 #13
0
  @Test
  public void testGetTokensForViewFS() throws IOException, URISyntaxException {
    Configuration conf = new Configuration(jConf);
    FileSystem dfs = dfsCluster.getFileSystem();
    String serviceName = dfs.getCanonicalServiceName();

    Path p1 = new Path("/mount1");
    Path p2 = new Path("/mount2");
    p1 = dfs.makeQualified(p1);
    p2 = dfs.makeQualified(p2);

    conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString());
    conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString());
    Credentials credentials = new Credentials();
    Path lp1 = new Path("viewfs:///dir1");
    Path lp2 = new Path("viewfs:///dir2");
    Path[] paths = new Path[2];
    paths[0] = lp1;
    paths[1] = lp2;
    TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);

    Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens();
    assertEquals("number of tokens is not 1", 1, tns.size());

    boolean found = false;
    for (Token<? extends TokenIdentifier> tt : tns) {
      System.out.println("token=" + tt);
      if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
          && tt.getService().equals(new Text(serviceName))) {
        found = true;
      }
      assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found);
    }
  }
 public static void testWrite() throws Exception {
   FileSystem fs = cluster.getFileSystem();
   long tStart = System.currentTimeMillis();
   bench.writeTest(fs);
   long execTime = System.currentTimeMillis() - tStart;
   bench.analyzeResult(fs, TestType.TEST_TYPE_WRITE, execTime);
 }
 @AfterClass
 public static void afterClass() throws Exception {
   if (cluster == null) return;
   FileSystem fs = cluster.getFileSystem();
   bench.cleanup(fs);
   cluster.shutdown();
 }
예제 #16
0
  /**
   * Test if fsck can return -1 in case of failure
   *
   * @throws Exception
   */
  public void testFsckError() throws Exception {
    MiniDFSCluster cluster = null;
    try {
      // bring up a one-node cluster
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 1, true, null);
      String fileName = "/test.txt";
      Path filePath = new Path(fileName);
      FileSystem fs = cluster.getFileSystem();

      // create a one-block file
      DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
      DFSTestUtil.waitReplication(fs, filePath, (short) 1);

      // intentionally corrupt NN data structure
      INodeFile node = (INodeFile) cluster.getNameNode().namesystem.dir.rootDir.getNode(fileName);
      assertEquals(node.blocks.length, 1);
      node.blocks[0].setNumBytes(-1L); // set the block length to be negative

      // run fsck and expect a failure with -1 as the error code
      String outStr = runFsck(conf, -1, true, fileName);
      System.out.println(outStr);
      assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));

      // clean up file system
      fs.delete(filePath, true);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
예제 #17
0
 public void testFsckNonExistent() throws Exception {
   DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8 * 1024);
   MiniDFSCluster cluster = null;
   FileSystem fs = null;
   try {
     Configuration conf = new Configuration();
     conf.setLong("dfs.blockreport.intervalMsec", 10000L);
     cluster = new MiniDFSCluster(conf, 4, true, null);
     fs = cluster.getFileSystem();
     util.createFiles(fs, "/srcdat");
     util.waitReplication(fs, "/srcdat", (short) 3);
     String outStr = runFsck(conf, 0, true, "/non-existent");
     assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
     System.out.println(outStr);
     util.cleanup(fs, "/srcdat");
   } finally {
     if (fs != null) {
       try {
         fs.close();
       } catch (Exception e) {
       }
     }
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
  protected void setupCluster(boolean simulated, long minFileSize, String[] racks, String[] hosts)
      throws IOException {
    conf = new Configuration();
    localFileSys = FileSystem.getLocal(conf);
    conf.setLong("dfs.blockreport.intervalMsec", 1000L);
    conf.set("dfs.replication.pending.timeout.sec", "2");
    conf.setLong("dfs.block.size", 1L);
    conf.set(
        "dfs.block.replicator.classname",
        "org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid");
    conf.setLong("hdfs.raid.min.filesize", minFileSize);
    Utils.loadTestCodecs(conf, 5, 5, 1, 3, "/raid", "/raidrs", false, true);
    conf.setInt("io.bytes.per.checksum", 1);
    excludeFile = new Path(TEST_DIR, "exclude" + System.currentTimeMillis());
    cleanFile(excludeFile);
    conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
    writeConfigFile(excludeFile, null);

    if (!simulated) {
      cluster = new MiniDFSCluster(conf, hosts.length, true, racks, hosts);
    } else {
      long[] capacities = new long[] {CAPACITY, CAPACITY, CAPACITY};
      cluster = new MiniDFSCluster(0, conf, hosts.length, true, true, null, racks, capacities);
    }
    cluster.waitActive();
    namesystem = cluster.getNameNode().getNamesystem();
    Assert.assertTrue(
        "BlockPlacementPolicy type is not correct.",
        namesystem.replicator instanceof BlockPlacementPolicyRaid);
    policy = (BlockPlacementPolicyRaid) namesystem.replicator;
    fs = cluster.getFileSystem();
    dfs = (DistributedFileSystem) fs;
    TestDirectoryRaidDfs.setupStripeStore(conf, fs);
  }
 @Override
 protected void setUp() throws Exception {
   cluster = new MiniDFSCluster(CONF, 1, true, null);
   cluster.waitActive();
   fs = (DistributedFileSystem) cluster.getFileSystem();
   metrics = fs.getClient().getDFSClientMetrics();
 }
 @BeforeClass
 public static void setUpBeforeClass() throws Exception {
   File minidfsDir = new File("target/minidfs-" + UUID.randomUUID()).getAbsoluteFile();
   minidfsDir.mkdirs();
   Assert.assertTrue(minidfsDir.exists());
   System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
   Configuration conf = new HdfsConfiguration();
   conf.set("dfs.namenode.fs-limits.min-block-size", String.valueOf(32));
   EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
   miniDFS = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
   dir = new Path(miniDFS.getURI() + "/dir");
   FileSystem fs = miniDFS.getFileSystem();
   fs.mkdirs(dir);
   writeFile(fs, new Path(dir + "/forAllTests/" + "path"), 1000);
   dummyEtc = new File(minidfsDir, "dummy-etc");
   dummyEtc.mkdirs();
   Assert.assertTrue(dummyEtc.exists());
   Configuration dummyConf = new Configuration(false);
   for (String file : new String[] {"core", "hdfs", "mapred", "yarn"}) {
     File siteXml = new File(dummyEtc, file + "-site.xml");
     FileOutputStream out = new FileOutputStream(siteXml);
     dummyConf.writeXml(out);
     out.close();
   }
   resourcesDir = minidfsDir.getAbsolutePath();
   hadoopConfDir = dummyEtc.getName();
   System.setProperty("sdc.resources.dir", resourcesDir);
   ;
 }
예제 #21
0
 @Test(timeout = 120000)
 public void testSeekAfterSetDropBehind() throws Exception {
   // start a cluster
   LOG.info("testSeekAfterSetDropBehind");
   Configuration conf = new HdfsConfiguration();
   MiniDFSCluster cluster = null;
   String TEST_PATH = "/test";
   int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
   try {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
     // verify that we can seek after setDropBehind
     FSDataInputStream fis = fs.open(new Path(TEST_PATH));
     try {
       Assert.assertTrue(fis.read() != -1); // create BlockReader
       fis.setDropBehind(false); // clear BlockReader
       fis.seek(2); // seek
     } finally {
       fis.close();
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
예제 #22
0
  @Test(timeout = 60000)
  public void testSymlinkHdfsDisable() throws Exception {
    Configuration conf = new HdfsConfiguration();
    // disable symlink resolution
    conf.setBoolean(CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY, false);
    // spin up minicluster, get dfs and filecontext
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    DistributedFileSystem dfs = cluster.getFileSystem();
    FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
    // Create test files/links
    FileContextTestHelper helper = new FileContextTestHelper("/tmp/TestSymlinkHdfsDisable");
    Path root = helper.getTestRootPath(fc);
    Path target = new Path(root, "target");
    Path link = new Path(root, "link");
    DFSTestUtil.createFile(dfs, target, 4096, (short) 1, 0xDEADDEAD);
    fc.createSymlink(target, link, false);

    // Try to resolve links with FileSystem and FileContext
    try {
      fc.open(link);
      fail("Expected error when attempting to resolve link");
    } catch (IOException e) {
      GenericTestUtils.assertExceptionContains("resolution is disabled", e);
    }
    try {
      dfs.open(link);
      fail("Expected error when attempting to resolve link");
    } catch (IOException e) {
      GenericTestUtils.assertExceptionContains("resolution is disabled", e);
    }
  }
 @Before
 public void setUp() throws Exception {
   cluster = new MiniDFSCluster.Builder(CONF).build();
   cluster.waitActive();
   cluster.getNameNode();
   fs = (DistributedFileSystem) cluster.getFileSystem();
 }
예제 #24
0
  /** do fsck */
  public void testFsck() throws Exception {
    DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8 * 1024);
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      Configuration conf = new Configuration();
      conf.setLong("dfs.blockreport.intervalMsec", 10000L);
      cluster = new MiniDFSCluster(conf, 4, true, null);
      fs = cluster.getFileSystem();
      util.createFiles(fs, "/srcdat");
      util.waitReplication(fs, "/srcdat", (short) 3);
      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      System.out.println(outStr);
      if (fs != null) {
        try {
          fs.close();
        } catch (Exception e) {
        }
      }
      cluster.shutdown();

      // restart the cluster; bring up namenode but not the data nodes
      cluster = new MiniDFSCluster(conf, 0, false, null);
      outStr = runFsck(conf, 1, true, "/");
      // expect the result is corrupt
      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
      System.out.println(outStr);

      // bring up data nodes & cleanup cluster
      cluster.startDataNodes(conf, 4, true, null, null);
      cluster.waitActive();
      cluster.waitClusterUp();
      fs = cluster.getFileSystem();
      util.cleanup(fs, "/srcdat");
    } finally {
      if (fs != null) {
        try {
          fs.close();
        } catch (Exception e) {
        }
      }
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
예제 #25
0
 @Before
 public void setUp() throws Exception {
   conf = new HdfsConfiguration();
   conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
   cluster.waitActive();
   fs = cluster.getFileSystem();
 }
 @Test(timeout = 6000)
 public void testAppend() throws Exception {
   FileSystem fs = cluster.getFileSystem();
   long tStart = System.currentTimeMillis();
   bench.appendTest(fs);
   long execTime = System.currentTimeMillis() - tStart;
   bench.analyzeResult(fs, TestType.TEST_TYPE_APPEND, execTime);
 }
 @Override
 protected void setUp() throws Exception {
   cluster = new MiniDFSCluster(CONF, 1, true, null);
   cluster.waitActive();
   cluster.getNameNode();
   nnMetrics = NameNode.getNameNodeMetrics();
   fs = (DistributedFileSystem) cluster.getFileSystem();
 }
  /**
   * Setup a {@link MiniDFSCluster}. Create a block with both {@link State#NORMAL} and {@link
   * State#READ_ONLY_SHARED} replicas.
   */
  @Before
  public void setup() throws IOException, InterruptedException {
    conf = new HdfsConfiguration();
    SimulatedFSDataset.setFactory(conf);

    Configuration[] overlays = new Configuration[NUM_DATANODES];
    for (int i = 0; i < overlays.length; i++) {
      overlays[i] = new Configuration();
      if (i == RO_NODE_INDEX) {
        overlays[i].setEnum(
            SimulatedFSDataset.CONFIG_PROPERTY_STATE,
            i == RO_NODE_INDEX ? READ_ONLY_SHARED : NORMAL);
      }
    }

    cluster =
        new MiniDFSCluster.Builder(conf)
            .numDataNodes(NUM_DATANODES)
            .dataNodeConfOverlays(overlays)
            .build();
    fs = cluster.getFileSystem();
    blockManager = cluster.getNameNode().getNamesystem().getBlockManager();
    datanodeManager = blockManager.getDatanodeManager();
    client =
        new DFSClient(
            new InetSocketAddress("localhost", cluster.getNameNodePort()),
            cluster.getConfiguration(0));

    for (int i = 0; i < NUM_DATANODES; i++) {
      DataNode dataNode = cluster.getDataNodes().get(i);
      validateStorageState(
          BlockManagerTestUtil.getStorageReportsForDatanode(
              datanodeManager.getDatanode(dataNode.getDatanodeId())),
          i == RO_NODE_INDEX ? READ_ONLY_SHARED : NORMAL);
    }

    // Create a 1 block file
    DFSTestUtil.createFile(fs, PATH, BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE, (short) 1, seed);

    LocatedBlock locatedBlock = getLocatedBlock();
    extendedBlock = locatedBlock.getBlock();
    block = extendedBlock.getLocalBlock();

    assertThat(locatedBlock.getLocations().length, is(1));
    normalDataNode = locatedBlock.getLocations()[0];
    readOnlyDataNode =
        datanodeManager.getDatanode(cluster.getDataNodes().get(RO_NODE_INDEX).getDatanodeId());
    assertThat(normalDataNode, is(not(readOnlyDataNode)));

    validateNumberReplicas(1);

    // Inject the block into the datanode with READ_ONLY_SHARED storage
    cluster.injectBlocks(0, RO_NODE_INDEX, Collections.singleton(block));

    // There should now be 2 *locations* for the block
    // Must wait until the NameNode has processed the block report for the injected blocks
    waitForLocations(2);
  }
 @Test(timeout = 3000)
 public void testReadSkip() throws Exception {
   FileSystem fs = cluster.getFileSystem();
   long tStart = System.currentTimeMillis();
   bench.getConf().setLong("test.io.skip.size", 1);
   bench.randomReadTest(fs);
   long execTime = System.currentTimeMillis() - tStart;
   bench.analyzeResult(fs, TestType.TEST_TYPE_READ_SKIP, execTime);
 }
 private static void initCluster(boolean format) throws Exception {
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format).build();
   hdfs = cluster.getFileSystem();
   assertTrue(hdfs instanceof DistributedFileSystem);
   hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
   assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
   hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
   assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
 }