@Test
  public void testOpenFilesWithRename() throws Exception {
    Path path = new Path("/test");
    doWriteAndAbort(fs, path);

    // check for zero sized blocks
    Path fileWithEmptyBlock = new Path("/test/test/test4");
    fs.create(fileWithEmptyBlock);
    NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
    String clientName = fs.getClient().getClientName();
    // create one empty block
    nameNodeRpc.addBlock(
        fileWithEmptyBlock.toString(),
        clientName,
        null,
        null,
        HdfsConstants.GRANDFATHER_INODE_ID,
        null);
    fs.createSnapshot(path, "s2");

    fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
    fs.delete(new Path("/test/test-renamed"), true);
    NameNode nameNode = cluster.getNameNode();
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
    NameNodeAdapter.leaveSafeMode(nameNode);
    cluster.restartNameNode(true);
  }
예제 #2
0
  /*
   * Since NameNode will not persist any locations of the block, addBlock()
   * retry call after restart NN should re-select the locations and return to
   * client. refer HDFS-5257
   */
  @Test
  public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception {
    final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
    NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
    // create file
    nameNodeRpc.create(
        src,
        FsPermission.getFileDefault(),
        "clientName",
        new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
        true,
        (short) 3,
        1024,
        null);
    // start first addBlock()
    LOG.info("Starting first addBlock for " + src);
    LocatedBlock lb1 =
        nameNodeRpc.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null);
    assertTrue("Block locations should be present", lb1.getLocations().length > 0);

    cluster.restartNameNode();
    nameNodeRpc = cluster.getNameNodeRpc();
    LocatedBlock lb2 =
        nameNodeRpc.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null);
    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
    assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
  }
  static DatanodeInfo chooseDatanode(
      final NameNode namenode,
      final String path,
      final HttpOpParam.Op op,
      final long openOffset,
      final long blocksize,
      Configuration conf)
      throws IOException {
    final BlockManager bm = namenode.getNamesystem().getBlockManager();

    if (op == PutOpParam.Op.CREATE) {
      // choose a datanode near to client
      final DatanodeDescriptor clientNode =
          bm.getDatanodeManager().getDatanodeByHost(getRemoteAddress());
      if (clientNode != null) {
        final DatanodeDescriptor[] datanodes =
            bm.getBlockPlacementPolicy().chooseTarget(path, 1, clientNode, null, blocksize);
        if (datanodes.length > 0) {
          return datanodes[0];
        }
      }
    } else if (op == GetOpParam.Op.OPEN
        || op == GetOpParam.Op.GETFILECHECKSUM
        || op == PostOpParam.Op.APPEND) {
      // choose a datanode containing a replica
      final NamenodeProtocols np = namenode.getRpcServer();
      final HdfsFileStatus status = np.getFileInfo(path);
      if (status == null) {
        throw new FileNotFoundException("File " + path + " not found.");
      }
      final long len = status.getLen();
      if (op == GetOpParam.Op.OPEN) {
        if (openOffset < 0L || (openOffset >= len && len > 0)) {
          throw new IOException(
              "Offset="
                  + openOffset
                  + " out of the range [0, "
                  + len
                  + "); "
                  + op
                  + ", path="
                  + path);
        }
      }

      if (len > 0) {
        final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
        final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
        final int count = locations.locatedBlockCount();
        if (count > 0) {
          return JspHelper.bestNode(locations.get(0).getLocations(), false, conf);
        }
      }
    }

    return (DatanodeDescriptor)
        bm.getDatanodeManager().getNetworkTopology().chooseRandom(NodeBase.ROOT);
  }
예제 #4
0
  @Test
  public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "1.0.0");
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();

      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();

      long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();

      DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
      doReturn("fake-storage-id").when(mockDnReg).getStorageID();
      doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();

      // Should succeed when software versions are the same and CTimes are the
      // same.
      doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
      rpcServer.registerDatanode(mockDnReg);

      // Should succeed when software versions are the same and CTimes are
      // different.
      doReturn(nnCTime + 1).when(mockStorageInfo).getCTime();
      rpcServer.registerDatanode(mockDnReg);

      // Should fail when software version of DN is different from NN and CTimes
      // are different.
      doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion();
      try {
        rpcServer.registerDatanode(mockDnReg);
        fail(
            "Should not have been able to register DN with different software"
                + " versions and CTimes");
      } catch (IncorrectVersionException ive) {
        GenericTestUtils.assertExceptionContains("does not match CTime of NN", ive);
        LOG.info("Got expected exception", ive);
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
 private static DirectoryListing getDirectoryListing(
     final NamenodeProtocols np, final String p, byte[] startAfter) throws IOException {
   final DirectoryListing listing = np.getListing(p, startAfter, false);
   if (listing == null) { // the directory does not exist
     throw new FileNotFoundException("File " + p + " does not exist.");
   }
   return listing;
 }
  /**
   * go to each block on the 2nd DataNode until it fails...
   *
   * @param path
   * @param size
   * @throws IOException
   */
  private void triggerFailure(String path, long size) throws IOException {
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    List<LocatedBlock> locatedBlocks = nn.getBlockLocations(path, 0, size).getLocatedBlocks();

    for (LocatedBlock lb : locatedBlocks) {
      DatanodeInfo dinfo = lb.getLocations()[1];
      ExtendedBlock b = lb.getBlock();
      try {
        accessBlock(dinfo, lb);
      } catch (IOException e) {
        System.out.println(
            "Failure triggered, on block: "
                + b.getBlockId()
                + "; corresponding volume should be removed by now");
        break;
      }
    }
  }
예제 #7
0
  @Test
  public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, "3.0.0");
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();

      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();

      long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();

      DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
      doReturn("fake-storage-id").when(mockDnReg).getStorageID();
      doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();

      // Should succeed when software versions are the same.
      doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
      rpcServer.registerDatanode(mockDnReg);

      // Should succeed when software version of DN is above minimum required by NN.
      doReturn("4.0.0").when(mockDnReg).getSoftwareVersion();
      rpcServer.registerDatanode(mockDnReg);

      // Should fail when software version of DN is below minimum required by NN.
      doReturn("2.0.0").when(mockDnReg).getSoftwareVersion();
      try {
        rpcServer.registerDatanode(mockDnReg);
        fail("Should not have been able to register DN with too-low version.");
      } catch (IncorrectVersionException ive) {
        GenericTestUtils.assertExceptionContains("The reported DataNode version is too low", ive);
        LOG.info("Got expected exception", ive);
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
  /**
   * Count datanodes that have copies of the blocks for a file put it into the map
   *
   * @param map
   * @param path
   * @param size
   * @return
   * @throws IOException
   */
  private int countNNBlocks(Map<String, BlockLocs> map, String path, long size) throws IOException {
    int total = 0;

    NamenodeProtocols nn = cluster.getNameNodeRpc();
    List<LocatedBlock> locatedBlocks = nn.getBlockLocations(path, 0, size).getLocatedBlocks();
    // System.out.println("Number of blocks: " + locatedBlocks.size());

    for (LocatedBlock lb : locatedBlocks) {
      String blockId = "" + lb.getBlock().getBlockId();
      // System.out.print(blockId + ": ");
      DatanodeInfo[] dn_locs = lb.getLocations();
      BlockLocs bl = map.get(blockId);
      if (bl == null) {
        bl = new BlockLocs();
      }
      // System.out.print(dn_info.name+",");
      total += dn_locs.length;
      bl.num_locs += dn_locs.length;
      map.put(blockId, bl);
      // System.out.println();
    }
    return total;
  }
  private Response get(
      final UserGroupInformation ugi,
      final DelegationParam delegation,
      final UserParam username,
      final DoAsParam doAsUser,
      final String fullpath,
      final GetOpParam op,
      final OffsetParam offset,
      final LengthParam length,
      final RenewerParam renewer,
      final BufferSizeParam bufferSize)
      throws IOException, URISyntaxException {
    final NameNode namenode = (NameNode) context.getAttribute("name.node");
    final NamenodeProtocols np = namenode.getRpcServer();

    switch (op.getValue()) {
      case OPEN:
        {
          final URI uri =
              redirectURI(
                  namenode,
                  ugi,
                  delegation,
                  username,
                  doAsUser,
                  fullpath,
                  op.getValue(),
                  offset.getValue(),
                  -1L,
                  offset,
                  length,
                  bufferSize);
          return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
        }
      case GET_BLOCK_LOCATIONS:
        {
          final long offsetValue = offset.getValue();
          final Long lengthValue = length.getValue();
          final LocatedBlocks locatedblocks =
              np.getBlockLocations(
                  fullpath, offsetValue, lengthValue != null ? lengthValue : Long.MAX_VALUE);
          final String js = JsonUtil.toJsonString(locatedblocks);
          return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
        }
      case GETFILESTATUS:
        {
          final HdfsFileStatus status = np.getFileInfo(fullpath);
          if (status == null) {
            throw new FileNotFoundException("File does not exist: " + fullpath);
          }

          final String js = JsonUtil.toJsonString(status, true);
          return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
        }
      case LISTSTATUS:
        {
          final StreamingOutput streaming = getListingStream(np, fullpath);
          return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
        }
      case GETCONTENTSUMMARY:
        {
          final ContentSummary contentsummary = np.getContentSummary(fullpath);
          final String js = JsonUtil.toJsonString(contentsummary);
          return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
        }
      case GETFILECHECKSUM:
        {
          final URI uri =
              redirectURI(
                  namenode, ugi, delegation, username, doAsUser, fullpath, op.getValue(), -1L, -1L);
          return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
        }
      case GETDELEGATIONTOKEN:
        {
          if (delegation.getValue() != null) {
            throw new IllegalArgumentException(delegation.getName() + " parameter is not null.");
          }
          final Token<? extends TokenIdentifier> token =
              generateDelegationToken(namenode, ugi, renewer.getValue());
          final String js = JsonUtil.toJsonString(token);
          return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
        }
      case GETHOMEDIRECTORY:
        {
          final String js =
              JsonUtil.toJsonString(
                  org.apache.hadoop.fs.Path.class.getSimpleName(),
                  WebHdfsFileSystem.getHomeDirectoryString(ugi));
          return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
        }
      default:
        throw new UnsupportedOperationException(op + " is not supported");
    }
  }
예제 #10
0
  private Response put(
      final UserGroupInformation ugi,
      final DelegationParam delegation,
      final UserParam username,
      final DoAsParam doAsUser,
      final String fullpath,
      final PutOpParam op,
      final DestinationParam destination,
      final OwnerParam owner,
      final GroupParam group,
      final PermissionParam permission,
      final OverwriteParam overwrite,
      final BufferSizeParam bufferSize,
      final ReplicationParam replication,
      final BlockSizeParam blockSize,
      final ModificationTimeParam modificationTime,
      final AccessTimeParam accessTime,
      final RenameOptionSetParam renameOptions,
      final CreateParentParam createParent,
      final TokenArgumentParam delegationTokenArgument)
      throws IOException, URISyntaxException {

    final Configuration conf = (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
    final NameNode namenode = (NameNode) context.getAttribute("name.node");
    final NamenodeProtocols np = namenode.getRpcServer();

    switch (op.getValue()) {
      case CREATE:
        {
          final URI uri =
              redirectURI(
                  namenode,
                  ugi,
                  delegation,
                  username,
                  doAsUser,
                  fullpath,
                  op.getValue(),
                  -1L,
                  blockSize.getValue(conf),
                  permission,
                  overwrite,
                  bufferSize,
                  replication,
                  blockSize);
          return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
        }
      case MKDIRS:
        {
          final boolean b = np.mkdirs(fullpath, permission.getFsPermission(), true);
          final String js = JsonUtil.toJsonString("boolean", b);
          return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
        }
      case CREATESYMLINK:
        {
          np.createSymlink(
              destination.getValue(),
              fullpath,
              PermissionParam.getDefaultFsPermission(),
              createParent.getValue());
          return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
        }
      case RENAME:
        {
          final EnumSet<Options.Rename> s = renameOptions.getValue();
          if (s.isEmpty()) {
            final boolean b = np.rename(fullpath, destination.getValue());
            final String js = JsonUtil.toJsonString("boolean", b);
            return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
          } else {
            np.rename2(fullpath, destination.getValue(), s.toArray(new Options.Rename[s.size()]));
            return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
          }
        }
      case SETREPLICATION:
        {
          final boolean b = np.setReplication(fullpath, replication.getValue(conf));
          final String js = JsonUtil.toJsonString("boolean", b);
          return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
        }
      case SETOWNER:
        {
          if (owner.getValue() == null && group.getValue() == null) {
            throw new IllegalArgumentException("Both owner and group are empty.");
          }

          np.setOwner(fullpath, owner.getValue(), group.getValue());
          return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
        }
      case SETPERMISSION:
        {
          np.setPermission(fullpath, permission.getFsPermission());
          return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
        }
      case SETTIMES:
        {
          np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
          return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
        }
      case RENEWDELEGATIONTOKEN:
        {
          final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
          token.decodeFromUrlString(delegationTokenArgument.getValue());
          final long expiryTime = np.renewDelegationToken(token);
          final String js = JsonUtil.toJsonString("long", expiryTime);
          return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
        }
      case CANCELDELEGATIONTOKEN:
        {
          final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
          token.decodeFromUrlString(delegationTokenArgument.getValue());
          np.cancelDelegationToken(token);
          return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
        }
      default:
        throw new UnsupportedOperationException(op + " is not supported");
    }
  }
예제 #11
0
  /** Retry addBlock() while another thread is in chooseTarget(). See HDFS-4452. */
  @Test
  public void testRetryAddBlockWhileInChooseTarget() throws Exception {
    final String src = "/testRetryAddBlockWhileInChooseTarget";

    FSNamesystem ns = cluster.getNamesystem();
    BlockManager spyBM = spy(ns.getBlockManager());
    final NamenodeProtocols nn = cluster.getNameNodeRpc();

    // substitute mocked BlockManager into FSNamesystem
    Class<? extends FSNamesystem> nsClass = ns.getClass();
    Field bmField = nsClass.getDeclaredField("blockManager");
    bmField.setAccessible(true);
    bmField.set(ns, spyBM);

    doAnswer(
            new Answer<DatanodeStorageInfo[]>() {
              @Override
              public DatanodeStorageInfo[] answer(InvocationOnMock invocation) throws Throwable {
                LOG.info("chooseTarget for " + src);
                DatanodeStorageInfo[] ret = (DatanodeStorageInfo[]) invocation.callRealMethod();
                count++;
                if (count == 1) { // run second addBlock()
                  LOG.info("Starting second addBlock for " + src);
                  nn.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null);
                  LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
                  assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
                  lb2 = lbs.get(0);
                  assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
                }
                return ret;
              }
            })
        .when(spyBM)
        .chooseTarget4NewBlock(
            Mockito.anyString(),
            Mockito.anyInt(),
            Mockito.<DatanodeDescriptor>any(),
            Mockito.<HashSet<Node>>any(),
            Mockito.anyLong(),
            Mockito.<List<String>>any(),
            Mockito.anyByte());

    // create file
    nn.create(
        src,
        FsPermission.getFileDefault(),
        "clientName",
        new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
        true,
        (short) 3,
        1024,
        null);

    // start first addBlock()
    LOG.info("Starting first addBlock for " + src);
    nn.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null);

    // check locations
    LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
    assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
    lb1 = lbs.get(0);
    assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
  }
  protected void doTestRead(Configuration conf, MiniDFSCluster cluster, boolean isStriped)
      throws Exception {
    final int numDataNodes = cluster.getDataNodes().size();
    final NameNode nn = cluster.getNameNode();
    final NamenodeProtocols nnProto = nn.getRpcServer();
    final BlockManager bm = nn.getNamesystem().getBlockManager();
    final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();

    // set a short token lifetime (1 second) initially
    SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);

    Path fileToRead = new Path(FILE_TO_READ);
    FileSystem fs = cluster.getFileSystem();
    byte[] expected = generateBytes(FILE_SIZE);
    createFile(fs, fileToRead, expected);

    /*
     * setup for testing expiration handling of cached tokens
     */

    // read using blockSeekTo(). Acquired tokens are cached in in1
    FSDataInputStream in1 = fs.open(fileToRead);
    assertTrue(checkFile1(in1, expected));
    // read using blockSeekTo(). Acquired tokens are cached in in2
    FSDataInputStream in2 = fs.open(fileToRead);
    assertTrue(checkFile1(in2, expected));
    // read using fetchBlockByteRange(). Acquired tokens are cached in in3
    FSDataInputStream in3 = fs.open(fileToRead);
    assertTrue(checkFile2(in3, expected));

    /*
     * testing READ interface on DN using a BlockReader
     */
    DFSClient client = null;
    try {
      client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    } finally {
      if (client != null) client.close();
    }
    List<LocatedBlock> locatedBlocks =
        nnProto.getBlockLocations(FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
    LocatedBlock lblock = locatedBlocks.get(0); // first block
    // verify token is not expired
    assertFalse(isBlockTokenExpired(lblock));
    // read with valid token, should succeed
    tryRead(conf, lblock, true);

    /*
     * wait till myToken and all cached tokens in in1, in2 and in3 expire
     */

    while (!isBlockTokenExpired(lblock)) {
      try {
        Thread.sleep(10);
      } catch (InterruptedException ignored) {
      }
    }

    /*
     * continue testing READ interface on DN using a BlockReader
     */

    // verify token is expired
    assertTrue(isBlockTokenExpired(lblock));
    // read should fail
    tryRead(conf, lblock, false);
    // use a valid new token
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
    // read should succeed
    tryRead(conf, lblock, true);
    // use a token with wrong blockID
    long rightId = lblock.getBlock().getBlockId();
    long wrongId = rightId + 1;
    lblock.getBlock().setBlockId(wrongId);
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
    lblock.getBlock().setBlockId(rightId);
    // read should fail
    tryRead(conf, lblock, false);
    // use a token with wrong access modes
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.WRITE);
    // read should fail
    tryRead(conf, lblock, false);

    // set a long token lifetime for future tokens
    SecurityTestUtil.setBlockTokenLifetime(sm, 600 * 1000L);

    /*
     * testing that when cached tokens are expired, DFSClient will re-fetch
     * tokens transparently for READ.
     */

    // confirm all tokens cached in in1 are expired by now
    List<LocatedBlock> lblocks = DFSTestUtil.getAllBlocks(in1);
    for (LocatedBlock blk : lblocks) {
      assertTrue(isBlockTokenExpired(blk));
    }
    // verify blockSeekTo() is able to re-fetch token transparently
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));

    // confirm all tokens cached in in2 are expired by now
    List<LocatedBlock> lblocks2 = DFSTestUtil.getAllBlocks(in2);
    for (LocatedBlock blk : lblocks2) {
      assertTrue(isBlockTokenExpired(blk));
    }
    // verify blockSeekTo() is able to re-fetch token transparently (testing
    // via another interface method)
    if (isStriped) {
      // striped block doesn't support seekToNewSource
      in2.seek(0);
    } else {
      assertTrue(in2.seekToNewSource(0));
    }
    assertTrue(checkFile1(in2, expected));

    // confirm all tokens cached in in3 are expired by now
    List<LocatedBlock> lblocks3 = DFSTestUtil.getAllBlocks(in3);
    for (LocatedBlock blk : lblocks3) {
      assertTrue(isBlockTokenExpired(blk));
    }
    // verify fetchBlockByteRange() is able to re-fetch token transparently
    assertTrue(checkFile2(in3, expected));

    /*
     * testing that after datanodes are restarted on the same ports, cached
     * tokens should still work and there is no need to fetch new tokens from
     * namenode. This test should run while namenode is down (to make sure no
     * new tokens can be fetched from namenode).
     */

    // restart datanodes on the same ports that they currently use
    assertTrue(cluster.restartDataNodes(true));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());
    cluster.shutdownNameNode(0);

    // confirm tokens cached in in1 are still valid
    lblocks = DFSTestUtil.getAllBlocks(in1);
    for (LocatedBlock blk : lblocks) {
      assertFalse(isBlockTokenExpired(blk));
    }
    // verify blockSeekTo() still works (forced to use cached tokens)
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));

    // confirm tokens cached in in2 are still valid
    lblocks2 = DFSTestUtil.getAllBlocks(in2);
    for (LocatedBlock blk : lblocks2) {
      assertFalse(isBlockTokenExpired(blk));
    }

    // verify blockSeekTo() still works (forced to use cached tokens)
    if (isStriped) {
      in2.seek(0);
    } else {
      in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));

    // confirm tokens cached in in3 are still valid
    lblocks3 = DFSTestUtil.getAllBlocks(in3);
    for (LocatedBlock blk : lblocks3) {
      assertFalse(isBlockTokenExpired(blk));
    }
    // verify fetchBlockByteRange() still works (forced to use cached tokens)
    assertTrue(checkFile2(in3, expected));

    /*
     * testing that when namenode is restarted, cached tokens should still
     * work and there is no need to fetch new tokens from namenode. Like the
     * previous test, this test should also run while namenode is down. The
     * setup for this test depends on the previous test.
     */

    // restart the namenode and then shut it down for test
    cluster.restartNameNode(0);
    cluster.shutdownNameNode(0);

    // verify blockSeekTo() still works (forced to use cached tokens)
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    // verify again blockSeekTo() still works (forced to use cached tokens)
    if (isStriped) {
      in2.seek(0);
    } else {
      in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));

    // verify fetchBlockByteRange() still works (forced to use cached tokens)
    assertTrue(checkFile2(in3, expected));

    /*
     * testing that after both namenode and datanodes got restarted (namenode
     * first, followed by datanodes), DFSClient can't access DN without
     * re-fetching tokens and is able to re-fetch tokens transparently. The
     * setup of this test depends on the previous test.
     */

    // restore the cluster and restart the datanodes for test
    cluster.restartNameNode(0);
    assertTrue(cluster.restartDataNodes(true));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());

    // shutdown namenode so that DFSClient can't get new tokens from namenode
    cluster.shutdownNameNode(0);

    // verify blockSeekTo() fails (cached tokens become invalid)
    in1.seek(0);
    assertFalse(checkFile1(in1, expected));
    // verify fetchBlockByteRange() fails (cached tokens become invalid)
    assertFalse(checkFile2(in3, expected));

    // restart the namenode to allow DFSClient to re-fetch tokens
    cluster.restartNameNode(0);
    // verify blockSeekTo() works again (by transparently re-fetching
    // tokens from namenode)
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    if (isStriped) {
      in2.seek(0);
    } else {
      in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    // verify fetchBlockByteRange() works again (by transparently
    // re-fetching tokens from namenode)
    assertTrue(checkFile2(in3, expected));

    /*
     * testing that when datanodes are restarted on different ports, DFSClient
     * is able to re-fetch tokens transparently to connect to them
     */

    // restart datanodes on newly assigned ports
    assertTrue(cluster.restartDataNodes(false));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());
    // verify blockSeekTo() is able to re-fetch token transparently
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    // verify blockSeekTo() is able to re-fetch token transparently
    if (isStriped) {
      in2.seek(0);
    } else {
      in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    // verify fetchBlockByteRange() is able to re-fetch token transparently
    assertTrue(checkFile2(in3, expected));
  }