Beispiel #1
0
  /*
   * Recover file.
   * Try and open file in append mode.
   * Doing this, we get a hold of the file that crashed writer
   * was writing to.  Once we have it, close it.  This will
   * allow subsequent reader to see up to last sync.
   * NOTE: This is the same algorithm that HBase uses for file recovery
   * @param fs
   * @throws Exception
   */
  private void recoverFile(final FileSystem fs) throws Exception {
    LOG.info("Recovering File Lease");

    // set the soft limit to be 1 second so that the
    // namenode triggers lease recovery upon append request
    cluster.setLeasePeriod(1000, FSConstants.LEASE_HARDLIMIT_PERIOD);

    // Trying recovery
    int tries = 60;
    boolean recovered = false;
    FSDataOutputStream out = null;
    while (!recovered && tries-- > 0) {
      try {
        out = fs.append(file1);
        LOG.info("Successfully opened for appends");
        recovered = true;
      } catch (IOException e) {
        LOG.info("Failed open for append, waiting on lease recovery");
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ex) {
          // ignore it and try again
        }
      }
    }
    if (out != null) {
      out.close();
    }
    if (!recovered) {
      fail("Recovery should take < 1 min");
    }
    LOG.info("Past out lease recovery");
  }
 protected static void saveRemainder(
     ArrayList<String[]> remainders, FileSystem fs, String outputPath) throws IOException {
   if (remainders.size() > 0) {
     System.out.println("Save remainder starts");
     FSDataOutputStream outputStrm = fs.append(new Path(outputPath + "/fusionkey-r-00000"));
     for (String[] keys : remainders) {
       if (keys.length > 1 && keys[1].length() > 0) {
         System.out.println(
             "\tStart writing remainder "
                 + keys[0]
                 + " ("
                 + keys[0].length()
                 + ") - "
                 + keys[1]
                 + "("
                 + keys[1].length()
                 + ")");
         outputStrm.write(Text.encode(keys[0] + '\t').array());
         outputStrm.write(Text.encode(keys[1] + '\n').array());
       } else {
         System.out.println(
             "\tStart writing remainder " + keys[0] + " (" + keys[0].length() + ")");
         outputStrm.write(Text.encode(keys[0] + '\n').array());
       }
     }
     outputStrm.close();
     System.out.println("Save remainder starts");
   }
 }
  /**
   * testing that APPEND operation can handle token expiration when re-establishing pipeline is
   * needed
   */
  @Test
  public void testAppend() throws Exception {
    MiniDFSCluster cluster = null;
    int numDataNodes = 2;
    Configuration conf = getConf(numDataNodes);

    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
      cluster.waitActive();
      assertEquals(numDataNodes, cluster.getDataNodes().size());

      final NameNode nn = cluster.getNameNode();
      final BlockManager bm = nn.getNamesystem().getBlockManager();
      final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();

      // set a short token lifetime (1 second)
      SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
      Path fileToAppend = new Path(FILE_TO_APPEND);
      FileSystem fs = cluster.getFileSystem();
      byte[] expected = generateBytes(FILE_SIZE);
      // write a one-byte file
      FSDataOutputStream stm = writeFile(fs, fileToAppend, (short) numDataNodes, BLOCK_SIZE);
      stm.write(expected, 0, 1);
      stm.close();
      // open the file again for append
      stm = fs.append(fileToAppend);
      int mid = expected.length - 1;
      stm.write(expected, 1, mid - 1);
      stm.hflush();

      /*
       * wait till token used in stm expires
       */
      Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
      while (!SecurityTestUtil.isBlockTokenExpired(token)) {
        try {
          Thread.sleep(10);
        } catch (InterruptedException ignored) {
        }
      }

      // remove a datanode to force re-establishing pipeline
      cluster.stopDataNode(0);
      // append the rest of the file
      stm.write(expected, mid, expected.length - mid);
      stm.close();
      // check if append is successful
      FSDataInputStream in5 = fs.open(fileToAppend);
      assertTrue(checkFile1(in5, expected));
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
  /**
   * Test that, when a block is re-opened for append, the related datanode messages are correctly
   * queued by the SBN because they have future states and genstamps.
   */
  @Test
  public void testQueueingWithAppend() throws Exception {
    int numQueued = 0;
    int numDN = cluster.getDataNodes().size();

    FSDataOutputStream out = fs.create(TEST_FILE_PATH);
    try {
      AppendTestUtil.write(out, 0, 10);
      out.hflush();

      // Opening the file will report RBW replicas, but will be
      // queued on the StandbyNode.
      numQueued += numDN; // RBW messages
    } finally {
      IOUtils.closeStream(out);
      numQueued += numDN; // blockReceived messages
    }

    cluster.triggerBlockReports();
    numQueued += numDN;

    try {
      out = fs.append(TEST_FILE_PATH);
      AppendTestUtil.write(out, 10, 10);
      // RBW replicas once it's opened for append
      numQueued += numDN;

    } finally {
      IOUtils.closeStream(out);
      numQueued += numDN; // blockReceived
    }

    cluster.triggerBlockReports();
    numQueued += numDN;

    assertEquals(
        numQueued, cluster.getNameNode(1).getNamesystem().getPendingDataNodeMessageCount());

    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);

    // Verify that no replicas are marked corrupt, and that the
    // file is readable from the failed-over standby.
    BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
    BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
    assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
    assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());

    AppendTestUtil.check(fs, TEST_FILE_PATH, 20);
  }
 /**
  * FileNotFoundException is expected for appending to a non-exisiting file
  *
  * @throws FileNotFoundException as the result
  */
 @Test(expected = FileNotFoundException.class)
 public void testFileNotFound() throws IOException {
   Configuration conf = new HdfsConfiguration();
   if (simulatedStorage) {
     conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
   }
   MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   FileSystem fs = cluster.getFileSystem();
   try {
     Path file1 = new Path("/nonexistingfile.dat");
     fs.append(file1);
   } finally {
     fs.close();
     cluster.shutdown();
   }
 }
  /**
   * Ensure that even if a file is in a directory with the sticky bit on, another user can write to
   * that file (assuming correct permissions).
   */
  private void confirmCanAppend(Configuration conf, Path p) throws Exception {
    // Write a file to the new tmp directory as a regular user
    Path file = new Path(p, "foo");
    writeFile(hdfsAsUser1, file);
    hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));

    // Log onto cluster as another user and attempt to append to file
    Path file2 = new Path(p, "foo");
    FSDataOutputStream h = null;
    try {
      h = hdfsAsUser2.append(file2);
      h.write("Some more data".getBytes());
      h.close();
      h = null;
    } finally {
      IOUtils.cleanup(null, h);
    }
  }
Beispiel #7
0
  /**
   * Test case that stops a writer after finalizing a block but before calling completeFile,
   * recovers a file from another writer, starts writing from that writer, and then has the old
   * lease holder call completeFile
   */
  @Test(timeout = 60000)
  public void testCompleteOtherLeaseHoldersFile() throws Throwable {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

    try {
      cluster.waitActive();
      NameNode preSpyNN = cluster.getNameNode();
      NameNode spyNN = spy(preSpyNN);

      // Delay completeFile
      DelayAnswer delayer = new DelayAnswer();
      doAnswer(delayer).when(spyNN).complete(anyString(), anyString(), (Block) anyObject());

      DFSClient client = new DFSClient(null, spyNN, conf, null);
      file1 = new Path("/testCompleteOtherLease");
      final OutputStream stm = client.create("/testCompleteOtherLease", true);

      // write 1/2 block
      AppendTestUtil.write(stm, 0, 4096);
      final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
      Thread t =
          new Thread() {
            public void run() {
              try {
                stm.close();
              } catch (Throwable t) {
                err.set(t);
              }
            }
          };
      t.start();
      LOG.info("Waiting for close to get to latch...");
      delayer.waitForCall();

      // At this point, the block is finalized on the DNs, but the file
      // has not been completed in the NN.
      // Lose the leases
      LOG.info("Killing lease checker");
      client.leasechecker.interruptAndJoin();

      FileSystem fs1 = cluster.getFileSystem();
      FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());

      LOG.info("Recovering file");
      recoverFile(fs2);

      LOG.info("Opening file for append from new fs");
      FSDataOutputStream appenderStream = fs2.append(file1);

      LOG.info("Writing some data from new appender");
      AppendTestUtil.write(appenderStream, 0, 4096);

      LOG.info("Telling old close to proceed.");
      delayer.proceed();
      LOG.info("Waiting for close to finish.");
      t.join();
      LOG.info("Close finished.");

      // We expect that close will get a "Lease mismatch"
      // error.
      Throwable thrownByClose = err.get();
      assertNotNull(thrownByClose);
      assertTrue(thrownByClose instanceof IOException);
      if (!thrownByClose.getMessage().contains("Lease mismatch")) throw thrownByClose;

      // The appender should be able to close properly
      appenderStream.close();
    } finally {
      cluster.shutdown();
    }
  }
  /**
   * Creates one file, writes a few bytes to it and then closed it. Reopens the same file for
   * appending, write all blocks and then close. Verify that all data exists in file.
   *
   * @throws IOException an exception might be thrown
   */
  public void testSimpleAppend() throws IOException {
    final Configuration conf = new HdfsConfiguration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    conf.setInt("dfs.datanode.handler.count", 50);
    conf.setBoolean("dfs.support.append", true);
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = cluster.getFileSystem();
    try {
      { // test appending to a file.

        // create a new file.
        Path file1 = new Path("/simpleAppend.dat");
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
        System.out.println("Created file simpleAppend.dat");

        // write to file
        int mid = 186; // io.bytes.per.checksum bytes
        System.out.println("Writing " + mid + " bytes to file " + file1);
        stm.write(fileContents, 0, mid);
        stm.close();
        System.out.println("Wrote and Closed first part of file.");

        // write to file
        int mid2 = 607; // io.bytes.per.checksum bytes
        System.out.println("Writing " + mid + " bytes to file " + file1);
        stm = fs.append(file1);
        stm.write(fileContents, mid, mid2 - mid);
        stm.close();
        System.out.println("Wrote and Closed second part of file.");

        // write the remainder of the file
        stm = fs.append(file1);

        // ensure getPos is set to reflect existing size of the file
        assertTrue(stm.getPos() > 0);

        System.out.println(
            "Writing " + (AppendTestUtil.FILE_SIZE - mid2) + " bytes to file " + file1);
        stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
        System.out.println("Written second part of file");
        stm.close();
        System.out.println("Wrote and Closed second part of file.");

        // verify that entire file is good
        AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
      }

      { // test appending to an non-existing file.
        FSDataOutputStream out = null;
        try {
          out = fs.append(new Path("/non-existing.dat"));
          fail("Expected to have FileNotFoundException");
        } catch (java.io.FileNotFoundException fnfe) {
          System.out.println("Good: got " + fnfe);
          fnfe.printStackTrace(System.out);
        } finally {
          IOUtils.closeStream(out);
        }
      }

      { // test append permission.

        // set root to all writable
        Path root = new Path("/");
        fs.setPermission(root, new FsPermission((short) 0777));
        fs.close();

        // login as a different user
        final UserGroupInformation superuser = UserGroupInformation.getCurrentUser();
        String username = "******";
        String group = "testappendgroup";
        assertFalse(superuser.getShortUserName().equals(username));
        assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
        UserGroupInformation appenduser =
            UserGroupInformation.createUserForTesting(username, new String[] {group});

        fs = DFSTestUtil.getFileSystemAs(appenduser, conf);

        // create a file
        Path dir = new Path(root, getClass().getSimpleName());
        Path foo = new Path(dir, "foo.dat");
        FSDataOutputStream out = null;
        int offset = 0;
        try {
          out = fs.create(foo);
          int len = 10 + AppendTestUtil.nextInt(100);
          out.write(fileContents, offset, len);
          offset += len;
        } finally {
          IOUtils.closeStream(out);
        }

        // change dir and foo to minimal permissions.
        fs.setPermission(dir, new FsPermission((short) 0100));
        fs.setPermission(foo, new FsPermission((short) 0200));

        // try append, should success
        out = null;
        try {
          out = fs.append(foo);
          int len = 10 + AppendTestUtil.nextInt(100);
          out.write(fileContents, offset, len);
          offset += len;
        } finally {
          IOUtils.closeStream(out);
        }

        // change dir and foo to all but no write on foo.
        fs.setPermission(foo, new FsPermission((short) 0577));
        fs.setPermission(dir, new FsPermission((short) 0777));

        // try append, should fail
        out = null;
        try {
          out = fs.append(foo);
          fail("Expected to have AccessControlException");
        } catch (AccessControlException ace) {
          System.out.println("Good: got " + ace);
          ace.printStackTrace(System.out);
        } finally {
          IOUtils.closeStream(out);
        }
      }
    } catch (IOException e) {
      System.out.println("Exception :" + e);
      throw e;
    } catch (Throwable e) {
      System.out.println("Throwable :" + e);
      e.printStackTrace();
      throw new IOException("Throwable : " + e);
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
    // create a bunch of files. Write to them and then verify.
    public void run() {
      System.out.println("Workload " + id + " starting... ");
      for (int i = 0; i < numAppendsPerThread; i++) {

        // pick a file at random and remove it from pool
        Path testfile;
        synchronized (testFiles) {
          if (testFiles.size() == 0) {
            System.out.println("Completed write to almost all files.");
            return;
          }
          int index = AppendTestUtil.nextInt(testFiles.size());
          testfile = testFiles.remove(index);
        }

        long len = 0;
        int sizeToAppend = 0;
        try {
          FileSystem fs = cluster.getFileSystem();

          // add a random number of bytes to file
          len = fs.getFileStatus(testfile).getLen();

          // if file is already full, then pick another file
          if (len >= AppendTestUtil.FILE_SIZE) {
            System.out.println("File " + testfile + " is full.");
            continue;
          }

          // do small size appends so that we can trigger multiple
          // appends to the same file.
          //
          int left = (int) (AppendTestUtil.FILE_SIZE - len) / 3;
          if (left <= 0) {
            left = 1;
          }
          sizeToAppend = AppendTestUtil.nextInt(left);

          System.out.println(
              "Workload thread "
                  + id
                  + " appending "
                  + sizeToAppend
                  + " bytes "
                  + " to file "
                  + testfile
                  + " of size "
                  + len);
          FSDataOutputStream stm = fs.append(testfile);
          stm.write(fileContents, (int) len, sizeToAppend);
          stm.close();

          // wait for the file size to be reflected in the namenode metadata
          while (fs.getFileStatus(testfile).getLen() != (len + sizeToAppend)) {
            try {
              System.out.println(
                  "Workload thread "
                      + id
                      + " file "
                      + testfile
                      + " size "
                      + fs.getFileStatus(testfile).getLen()
                      + " expected size "
                      + (len + sizeToAppend)
                      + " waiting for namenode metadata update.");
              Thread.sleep(5000);
            } catch (InterruptedException e) {;
            }
          }

          assertTrue(
              "File "
                  + testfile
                  + " size is "
                  + fs.getFileStatus(testfile).getLen()
                  + " but expected "
                  + (len + sizeToAppend),
              fs.getFileStatus(testfile).getLen() == (len + sizeToAppend));

          AppendTestUtil.checkFullFile(
              fs, testfile, (int) (len + sizeToAppend), fileContents, "Read 2");
        } catch (Throwable e) {
          globalStatus = false;
          if (e != null && e.toString() != null) {
            System.out.println("Workload exception " + id + " testfile " + testfile + " " + e);
            e.printStackTrace();
          }
          assertTrue(
              "Workload exception "
                  + id
                  + " testfile "
                  + testfile
                  + " expected size "
                  + (len + sizeToAppend),
              false);
        }

        // Add testfile back to the pool of files.
        synchronized (testFiles) {
          testFiles.add(testfile);
        }
      }
    }
Beispiel #10
0
  @Test
  public void testTruncatedLog() throws Exception {
    try {
      DirectUpdateHandler2.commitOnClose = false;
      final Semaphore logReplay = new Semaphore(0);
      final Semaphore logReplayFinish = new Semaphore(0);

      UpdateLog.testing_logReplayHook =
          new Runnable() {
            @Override
            public void run() {
              try {
                assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
              } catch (Exception e) {
                throw new RuntimeException(e);
              }
            }
          };

      UpdateLog.testing_logReplayFinishHook =
          new Runnable() {
            @Override
            public void run() {
              logReplayFinish.release();
            }
          };

      String logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();

      clearIndex();
      assertU(commit());

      assertU(adoc("id", "F1"));
      assertU(adoc("id", "F2"));
      assertU(adoc("id", "F3"));

      h.close();

      String[] files = HdfsUpdateLog.getLogList(fs, new Path(logDir));
      Arrays.sort(files);

      FSDataOutputStream dos = fs.append(new Path(logDir, files[files.length - 1]));

      dos.writeLong(0xffffffffffffffffL);
      dos.writeChars(
          "This should be appended to a good log file, representing a bad partially written record.");
      dos.close();

      logReplay.release(1000);
      logReplayFinish.drainPermits();
      ignoreException(
          "OutOfBoundsException"); // this is what the corrupted log currently produces... subject
      // to change.
      createCore();
      assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
      resetExceptionIgnores();
      assertJQ(req("q", "*:*"), "/response/numFound==3");

      //
      // Now test that the bad log file doesn't mess up retrieving latest versions
      //

      updateJ(
          jsonAdd(sdoc("id", "F4", "_version_", "104")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
      updateJ(
          jsonAdd(sdoc("id", "F5", "_version_", "105")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
      updateJ(
          jsonAdd(sdoc("id", "F6", "_version_", "106")), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));

      // This currently skips the bad log file and also returns the version of the clearIndex (del
      // *:*)
      // assertJQ(req("qt","/get", "getVersions","6"), "/versions==[106,105,104]");
      assertJQ(req("qt", "/get", "getVersions", "3"), "/versions==[106,105,104]");

    } finally {
      DirectUpdateHandler2.commitOnClose = true;
      UpdateLog.testing_logReplayHook = null;
      UpdateLog.testing_logReplayFinishHook = null;
    }
  }
Beispiel #11
0
  // in rare circumstances, two logs can be left uncapped (lacking a commit at the end signifying
  // that all the content in the log was committed)
  @Test
  public void testRecoveryMultipleLogs() throws Exception {
    try {
      DirectUpdateHandler2.commitOnClose = false;
      final Semaphore logReplay = new Semaphore(0);
      final Semaphore logReplayFinish = new Semaphore(0);

      UpdateLog.testing_logReplayHook =
          new Runnable() {
            @Override
            public void run() {
              try {
                assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
              } catch (Exception e) {
                throw new RuntimeException(e);
              }
            }
          };

      UpdateLog.testing_logReplayFinishHook =
          new Runnable() {
            @Override
            public void run() {
              logReplayFinish.release();
            }
          };

      String logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();

      clearIndex();
      assertU(commit());

      assertU(adoc("id", "AAAAAA"));
      assertU(adoc("id", "BBBBBB"));
      assertU(adoc("id", "CCCCCC"));

      h.close();
      String[] files = HdfsUpdateLog.getLogList(fs, new Path(logDir));
      Arrays.sort(files);
      String fname = files[files.length - 1];

      FSDataOutputStream dos = fs.append(new Path(logDir, files[files.length - 1]));
      dos.writeLong(0xffffffffffffffffL);
      dos.writeChars(
          "This should be appended to a good log file, representing a bad partially written record.");
      dos.close();

      FSDataInputStream dis = fs.open(new Path(logDir, files[files.length - 1]));
      byte[] content = new byte[(int) dis.available()];

      dis.readFully(content);

      dis.close();

      // Now make a newer log file with just the IDs changed.  NOTE: this may not work if log format
      // changes too much!
      findReplace(
          "AAAAAA".getBytes(StandardCharsets.UTF_8),
          "aaaaaa".getBytes(StandardCharsets.UTF_8),
          content);
      findReplace(
          "BBBBBB".getBytes(StandardCharsets.UTF_8),
          "bbbbbb".getBytes(StandardCharsets.UTF_8),
          content);
      findReplace(
          "CCCCCC".getBytes(StandardCharsets.UTF_8),
          "cccccc".getBytes(StandardCharsets.UTF_8),
          content);

      // WARNING... assumes format of .00000n where n is less than 9
      long logNumber = Long.parseLong(fname.substring(fname.lastIndexOf(".") + 1));
      String fname2 =
          String.format(
              Locale.ROOT, UpdateLog.LOG_FILENAME_PATTERN, UpdateLog.TLOG_NAME, logNumber + 1);

      dos = fs.create(new Path(logDir, fname2), (short) 1);
      dos.write(content);
      dos.close();

      logReplay.release(1000);
      logReplayFinish.drainPermits();
      ignoreException(
          "OutOfBoundsException"); // this is what the corrupted log currently produces... subject
      // to change.
      createCore();
      assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
      resetExceptionIgnores();
      assertJQ(req("q", "*:*"), "/response/numFound==6");

    } finally {
      DirectUpdateHandler2.commitOnClose = true;
      UpdateLog.testing_logReplayHook = null;
      UpdateLog.testing_logReplayFinishHook = null;
    }
  }
  /**
   * Common routine to do position read while open the file for write. After each iteration of
   * write, do a read of the file from begin to end. Return 0 on success, else number of failure.
   */
  private int testWriteAndRead(String fname, int loopN, int chunkSize, long readBeginPosition)
      throws IOException {

    int countOfFailures = 0;
    long byteVisibleToRead = 0;
    FSDataOutputStream out = null;

    byte[] outBuffer = new byte[BUFFER_SIZE];
    byte[] inBuffer = new byte[BUFFER_SIZE];

    for (int i = 0; i < BUFFER_SIZE; i++) {
      outBuffer[i] = (byte) (i & 0x00ff);
    }

    try {
      Path path = getFullyQualifiedPath(fname);
      long fileLengthBeforeOpen = 0;

      if (ifExists(path)) {
        if (truncateOption) {
          out =
              useFCOption
                  ? mfc.create(path, EnumSet.of(CreateFlag.OVERWRITE))
                  : mfs.create(path, truncateOption);
          LOG.info("File already exists. File open with Truncate mode: " + path);
        } else {
          out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.APPEND)) : mfs.append(path);
          fileLengthBeforeOpen = getFileLengthFromNN(path);
          LOG.info(
              "File already exists of size "
                  + fileLengthBeforeOpen
                  + " File open for Append mode: "
                  + path);
        }
      } else {
        out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.CREATE)) : mfs.create(path);
      }

      long totalByteWritten = fileLengthBeforeOpen;
      long totalByteVisible = fileLengthBeforeOpen;
      long totalByteWrittenButNotVisible = 0;

      boolean toFlush;
      for (int i = 0; i < loopN; i++) {
        toFlush = (i % 2) == 0;

        writeData(out, outBuffer, chunkSize);

        totalByteWritten += chunkSize;

        if (toFlush) {
          out.hflush();
          totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
          totalByteWrittenButNotVisible = 0;
        } else {
          totalByteWrittenButNotVisible += chunkSize;
        }

        if (verboseOption) {
          LOG.info(
              "TestReadWrite - Written "
                  + chunkSize
                  + ". Total written = "
                  + totalByteWritten
                  + ". TotalByteVisible = "
                  + totalByteVisible
                  + " to file "
                  + fname);
        }
        byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);

        String readmsg =
            "Written="
                + totalByteWritten
                + " ; Expected Visible="
                + totalByteVisible
                + " ; Got Visible="
                + byteVisibleToRead
                + " of file "
                + fname;

        if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) {
          readmsg = "pass: reader sees expected number of visible byte. " + readmsg + " [pass]";
        } else {
          countOfFailures++;
          readmsg = "fail: reader see different number of visible byte. " + readmsg + " [fail]";
          if (abortTestOnFailure) {
            throw new IOException(readmsg);
          }
        }
        LOG.info(readmsg);
      }

      // test the automatic flush after close
      writeData(out, outBuffer, chunkSize);
      totalByteWritten += chunkSize;
      totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
      totalByteWrittenButNotVisible += 0;

      out.close();

      byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);

      String readmsg2 =
          "Written="
              + totalByteWritten
              + " ; Expected Visible="
              + totalByteVisible
              + " ; Got Visible="
              + byteVisibleToRead
              + " of file "
              + fname;
      String readmsg;

      if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) {
        readmsg =
            "pass: reader sees expected number of visible byte on close. " + readmsg2 + " [pass]";
      } else {
        countOfFailures++;
        readmsg =
            "fail: reader sees different number of visible byte on close. " + readmsg2 + " [fail]";
        LOG.info(readmsg);
        if (abortTestOnFailure) throw new IOException(readmsg);
      }

      // now check if NN got the same length
      long lenFromFc = getFileLengthFromNN(path);
      if (lenFromFc != byteVisibleToRead) {
        readmsg =
            "fail: reader sees different number of visible byte from NN " + readmsg2 + " [fail]";
        throw new IOException(readmsg);
      }
    } catch (IOException e) {
      throw new IOException(
          "##### Caught Exception in testAppendWriteAndRead. Close file. "
              + "Total Byte Read so far = "
              + byteVisibleToRead,
          e);
    } finally {
      if (out != null) out.close();
    }
    return -countOfFailures;
  }
 /* Append the given string to the given file */
 public static void appendFile(FileSystem fs, Path p, String s) throws IOException {
   assert fs.exists(p);
   InputStream is = new ByteArrayInputStream(s.getBytes());
   FSDataOutputStream os = fs.append(p);
   IOUtils.copyBytes(is, os, s.length(), true);
 }