@Before
  public void setupCluster() throws Exception {
    conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK);
    // Bump up replication interval so that we only run replication
    // checks explicitly.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 600);
    // Increase max streams so that we re-replicate quickly.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 1000);
    // See RandomDeleterPolicy javadoc.
    conf.setClass(
        DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
        RandomDeleterPolicy.class,
        BlockPlacementPolicy.class);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    cluster =
        new MiniDFSCluster.Builder(conf)
            .nnTopology(MiniDFSNNTopology.simpleHATopology())
            .numDataNodes(3)
            .build();
    nn1 = cluster.getNameNode(0);
    nn2 = cluster.getNameNode(1);

    cluster.waitActive();
    cluster.transitionToActive(0);
    // Trigger block reports so that the first NN trusts all
    // of the DNs, and will issue deletions
    cluster.triggerBlockReports();
    fs = HATestUtil.configureFailoverFs(cluster, conf);
  }
  @BeforeClass
  public static void clusterSetupAtBegining()
      throws IOException, LoginException, URISyntaxException {
    SupportsBlocks = true;
    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

    cluster =
        new MiniDFSCluster.Builder(CONF)
            .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
            .numDataNodes(2)
            .build();
    cluster.waitClusterUp();

    fHdfs = cluster.getFileSystem(0);
    fHdfs2 = cluster.getFileSystem(1);
    fHdfs
        .getConf()
        .set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());
    fHdfs2
        .getConf()
        .set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());

    defaultWorkingDirectory =
        fHdfs.makeQualified(
            new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
    defaultWorkingDirectory2 =
        fHdfs2.makeQualified(
            new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));

    fHdfs.mkdirs(defaultWorkingDirectory);
    fHdfs2.mkdirs(defaultWorkingDirectory2);
  }
  @BeforeClass
  public static void clusterSetupAtBeginning() throws IOException {
    cluster =
        new MiniDFSCluster.Builder(clusterConf)
            .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
            .numDataNodes(2)
            .build();
    cluster.waitClusterUp();

    fHdfs = cluster.getFileSystem(0);
    fHdfs2 = cluster.getFileSystem(1);
  }
 @Before
 public void setUp() throws IOException {
   conf = new HdfsConfiguration();
   conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
   conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 100);
   cluster =
       new MiniDFSCluster.Builder(conf)
           .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
           .build();
   for (int i = 0; i < 3; i++) {
     cluster.waitActive(i);
   }
 }
示例#5
0
  @Test
  public void testHighAvailability() throws IOException {
    Configuration conf = new HdfsConfiguration();

    // Create cluster with 3 readers and 1 writer
    MiniDFSCluster cluster =
        new MiniDFSCluster.Builder(conf)
            .nnTopology(MiniDFSNNTopology.simpleHOPSTopology(4))
            .numDataNodes(2)
            .format(true)
            .build();
    cluster.waitActive();

    try {

      // Get the filesystem and create a directory
      FileSystem fs = cluster.getFileSystem(0);

      // Write operation should work since we have one writer
      assertTrue(fs.mkdirs(dir));

      // Write operation - Create a file and write something to it
      Path file1 = new Path(dir, "file1");
      createFile(fs, file1);

      // Read operation - The file should exist.
      assertTrue(fs.exists(file1));

      // Read operation - List files in this directory
      assertEquals(1, list(fs));

      // Read operation - Get file status
      FileStatus fileStatus = fs.listStatus(dir)[0];

      // Read operation - Get block locations
      assertNotSame(0, fs.getFileBlockLocations(file1, 0, 1).length);

      // Now we kill all namenodes except the last two
      cluster.getNameNode(0).stop();
      cluster.getNameNode(1).stop();

      // Now lets read again - These operations should be possible
      assertTrue(fs.exists(file1));

      // Writer operation - concat files
      Path file2 = new Path(dir, "file2");
      createFile(fs, file2);
      assertTrue(fs.exists(file2));
      Path file3 = new Path(dir, "file3");
      createFile(fs, file3);
      assertTrue(fs.exists(file3));
      Path file4 = new Path(dir, "file4");

      // Read operation - list files (3 files created now under this directory)
      assertEquals(3, list(fs));

      // Write operation - rename
      // [S] commented out because rename is not yet supported
      // ((DistributedFileSystem) fs).rename(file1, file4);

      // Kill another namenode
      cluster.getNameNode(2).stop();

      // Read operation - File status
      fs.getFileStatus(file2);

      // Write operation - Delete
      assertTrue(fs.delete(dir, true));

    } catch (IOException ex) {
      // In case we have any connectivity issues here, there is a problem
      // All connectivitiy issues are handled in the above piece of code
      LOG.error(ex);
      ex.printStackTrace();
      assertFalse("Cannot be any connectivity issues", ex instanceof ConnectException);
      fail();
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }