static {
    try {
      UnixUserGroupInformation ugi = UnixUserGroupInformation.login(DEFAULT_CONFIGURATION);
      DEFAULT_USERNAME = ugi.getUserName();
      // Do not use default groups, as these are pretty much useless
    } catch (Exception e) {
      // Should never happen but default to a reasonable value if it does
      DEFAULT_USERNAME = System.getProperty("user.name");
    }

    DEFAULT_GROUP = DEFAULT_CONFIGURATION.get("dfs.permissions.supergroup", "supergroup");
  }
 private static UnixUserGroupInformation getUGI(Configuration conf) throws IOException {
   UnixUserGroupInformation ugi = null;
   try {
     ugi = UnixUserGroupInformation.login(conf, true);
   } catch (LoginException e) {
     throw (IOException)
         (new IOException("Failed to get the current user's information.").initCause(e));
   }
   return ugi;
 }
 @Before
 public void startUpCluster() throws IOException {
   RaidCodecBuilder.loadDefaultFullBlocksCodecs(conf, numRSParityBlocks, numDataBlocks);
   cluster = new MiniDFSCluster(conf, 4, true, null);
   assertNotNull("Failed Cluster Creation", cluster);
   cluster.waitClusterUp();
   dfs = (DistributedFileSystem) cluster.getFileSystem();
   assertNotNull("Failed to get FileSystem", dfs);
   nn = cluster.getNameNode();
   assertNotNull("Failed to get NameNode", nn);
   Configuration newConf = new Configuration(conf);
   USER1 = new UnixUserGroupInformation("foo", new String[] {"bar"});
   UnixUserGroupInformation.saveToConf(newConf, UnixUserGroupInformation.UGI_PROPERTY_NAME, USER1);
   userdfs = (DistributedFileSystem) FileSystem.get(newConf); // login as ugi
   InjectionHandler h = new FakeBlockGeneratorInjectionHandler();
   InjectionHandler.set(h);
   rand.nextBytes(bytes);
 }
  public void testBlockSynchronization() throws Exception {
    final long softLease = 1000;
    final long hardLease = 60 * 60 * 1000;
    final short repl = 3;
    final Configuration conf = new Configuration();
    final int bufferSize = conf.getInt("io.file.buffer.size", 4096);
    conf.setLong("dfs.block.size", BLOCK_SIZE);
    conf.setInt("dfs.heartbeat.interval", 1);
    //  conf.setInt("io.bytes.per.checksum", 16);

    MiniDFSCluster cluster = null;
    byte[] actual = new byte[FILE_SIZE];

    try {
      cluster = new MiniDFSCluster(conf, 5, true, null);
      cluster.waitActive();

      // create a file
      DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
      // create a random file name
      String filestr = "/foo" + AppendTestUtil.nextInt();
      System.out.println("filestr=" + filestr);
      Path filepath = new Path(filestr);
      FSDataOutputStream stm = dfs.create(filepath, true, bufferSize, repl, BLOCK_SIZE);
      assertTrue(dfs.dfs.exists(filestr));

      // write random number of bytes into it.
      int size = AppendTestUtil.nextInt(FILE_SIZE);
      System.out.println("size=" + size);
      stm.write(buffer, 0, size);

      // sync file
      AppendTestUtil.LOG.info("sync");
      stm.sync();
      AppendTestUtil.LOG.info("leasechecker.interrupt()");
      dfs.dfs.leaseChecker.interrupt();

      // set the soft limit to be 1 second so that the
      // namenode triggers lease recovery on next attempt to write-for-open.
      cluster.setLeasePeriod(softLease, hardLease);

      // try to re-open the file before closing the previous handle. This
      // should fail but will trigger lease recovery.
      {
        Configuration conf2 = new Configuration(conf);
        String username = UserGroupInformation.getCurrentUGI().getUserName() + "_1";
        UnixUserGroupInformation.saveToConf(
            conf2,
            UnixUserGroupInformation.UGI_PROPERTY_NAME,
            new UnixUserGroupInformation(username, new String[] {"supergroup"}));
        FileSystem dfs2 = FileSystem.get(conf2);

        boolean done = false;
        for (int i = 0; i < 10 && !done; i++) {
          AppendTestUtil.LOG.info("i=" + i);
          try {
            dfs2.create(filepath, false, bufferSize, repl, BLOCK_SIZE);
            fail("Creation of an existing file should never succeed.");
          } catch (IOException ioe) {
            final String message = ioe.getMessage();
            if (message.contains("file exists")) {
              AppendTestUtil.LOG.info("done", ioe);
              done = true;
            } else if (message.contains(AlreadyBeingCreatedException.class.getSimpleName())) {
              AppendTestUtil.LOG.info("GOOD! got " + message);
            } else {
              AppendTestUtil.LOG.warn("UNEXPECTED IOException", ioe);
            }
          }

          if (!done) {
            AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
            try {
              Thread.sleep(5000);
            } catch (InterruptedException e) {
            }
          }
        }
        assertTrue(done);
      }

      AppendTestUtil.LOG.info(
          "Lease for file " + filepath + " is recovered. " + "Validating its contents now...");

      // verify that file-size matches
      assertTrue(
          "File should be "
              + size
              + " bytes, but is actually "
              + " found to be "
              + dfs.getFileStatus(filepath).getLen()
              + " bytes",
          dfs.getFileStatus(filepath).getLen() == size);

      // verify that there is enough data to read.
      System.out.println("File size is good. Now validating sizes from datanodes...");
      FSDataInputStream stmin = dfs.open(filepath);
      stmin.readFully(0, actual, 0, size);
      stmin.close();
    } finally {
      try {
        if (cluster != null) {
          cluster.shutdown();
        }
      } catch (Exception e) {
        // ignore
      }
    }
  }