Ejemplo n.º 1
0
 @Override
 public List<HdfsFileStatusWithId> listLocatedHdfsStatus(FileSystem fs, Path p, PathFilter filter)
     throws IOException {
   DistributedFileSystem dfs = ensureDfs(fs);
   DFSClient dfsc = dfs.getClient();
   final String src = p.toUri().getPath();
   DirectoryListing current =
       dfsc.listPaths(src, org.apache.hadoop.hdfs.protocol.HdfsFileStatus.EMPTY_NAME, true);
   if (current == null) { // the directory does not exist
     throw new FileNotFoundException("File " + p + " does not exist.");
   }
   final URI fsUri = fs.getUri();
   List<HdfsFileStatusWithId> result =
       new ArrayList<HdfsFileStatusWithId>(current.getPartialListing().length);
   while (current != null) {
     org.apache.hadoop.hdfs.protocol.HdfsFileStatus[] hfss = current.getPartialListing();
     for (int i = 0; i < hfss.length; ++i) {
       HdfsLocatedFileStatus next = (HdfsLocatedFileStatus) (hfss[i]);
       if (filter != null) {
         Path filterPath = next.getFullPath(p).makeQualified(fsUri, null);
         if (!filter.accept(filterPath)) continue;
       }
       LocatedFileStatus lfs = next.makeQualifiedLocated(fsUri, p);
       result.add(new HdfsFileStatusWithIdImpl(lfs, next.getFileId()));
     }
     current = current.hasMore() ? dfsc.listPaths(src, current.getLastName(), true) : null;
   }
   return result;
 }
  private void lostFoundInit(DFSClient dfs) {
    lfInited = true;
    try {
      String lfName = "/lost+found";

      final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
      if (lfStatus == null) { // not exists
        lfInitedOk = dfs.mkdirs(lfName, null, true);
        lostFound = lfName;
      } else if (!lfStatus.isDir()) { // exists but not a directory
        LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
        lfInitedOk = false;
      } else { // exists and is a directory
        lostFound = lfName;
        lfInitedOk = true;
      }
    } catch (Exception e) {
      e.printStackTrace();
      lfInitedOk = false;
    }
    if (lostFound == null) {
      LOG.warn("Cannot initialize /lost+found .");
      lfInitedOk = false;
      internalError = true;
    }
  }
Ejemplo n.º 3
0
  private void onCreate(ChannelHandlerContext ctx) throws IOException, URISyntaxException {
    writeContinueHeader(ctx);

    final String nnId = params.namenodeId();
    final int bufferSize = params.bufferSize();
    final short replication = params.replication();
    final long blockSize = params.blockSize();
    final FsPermission permission = params.permission();

    EnumSet<CreateFlag> flags =
        params.overwrite()
            ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
            : EnumSet.of(CreateFlag.CREATE);

    final DFSClient dfsClient = newDfsClient(nnId, confForCreate);
    OutputStream out =
        dfsClient.createWrappedOutputStream(
            dfsClient.create(
                path, permission, flags, replication, blockSize, null, bufferSize, null),
            null);
    DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, CREATED);

    final URI uri = new URI(HDFS_URI_SCHEME, nnId, path, null, null);
    resp.headers().set(LOCATION, uri.toString());
    resp.headers().set(CONTENT_LENGTH, 0);
    ctx.pipeline()
        .replace(this, HdfsWriter.class.getSimpleName(), new HdfsWriter(dfsClient, out, resp));
  }
Ejemplo n.º 4
0
  public void check(String addr) throws IOException {
    Configuration conf = new Configuration();
    File fhdfs = new File("hdfs-site.xml");
    InputStream inhdfs = new FileInputStream(fhdfs);
    File fcore = new File("core-site.xml");
    InputStream incore = new FileInputStream(fcore);
    conf.addResource(inhdfs);
    conf.addResource(incore);

    InetSocketAddress inetSocketAddress = new InetSocketAddress(addr, 9000);
    DFSClient client = new DFSClient(inetSocketAddress, conf);

    if (client.getMissingBlocksCount() > 0) {
      System.out.println("[ERROR] missing block count > 0");
      this.alert("[" + addr + "-HDFS检查]", "missing block个数大于0");
    } else {
      System.out.println("[INFO] there is no missing block count in cluster.");
    }

    DatanodeInfo[] datanodeInfos = client.datanodeReport(HdfsConstants.DatanodeReportType.DEAD);
    if (datanodeInfos.length > 0) {
      String ips = "";
      for (DatanodeInfo info : datanodeInfos) {
        ips += info.getIpAddr() + ";";
      }
      System.out.println("[ERROR] dead node ips is : " + ips);
      this.alert("[" + addr + "-HDFS检查]", "DEAD node个数大于0");
    } else {
      System.out.println("[INFO] dead node size is 0.");
    }

    org.apache.hadoop.fs.Path path = new Path(this.hdfsPath);
    FileSystem fs = null;
    FSDataOutputStream output = null;
    try {
      fs = path.getFileSystem(conf);
      if (fs.exists(path)) {
        System.out.println("[INFO] " + hdfsPath + " already exists.");
        fs.delete(path, false);
      }

      output = fs.create(path);
      for (String line : contents) {
        output.write(line.getBytes("UTF-8"));
        output.flush();
      }
    } catch (IOException e) {
      e.printStackTrace();
      System.out.println("[ERROR] write content failed.");
      this.alert("[" + addr + "-HDFS检查]", "HDFS可用性异常,删除写入文件失败");
    } finally {
      try {
        output.close();
        System.out.println("[INFO] delete and write file success.");
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
    client.close();
  }
Ejemplo n.º 5
0
 protected final LocatedBlocks ensureFileReplicasOnStorageType(Path path, StorageType storageType)
     throws IOException {
   // Ensure that returned block locations returned are correct!
   LOG.info("Ensure path: " + path + " is on StorageType: " + storageType);
   assertThat(fs.exists(path), is(true));
   long fileLength = client.getFileInfo(path.toString()).getLen();
   LocatedBlocks locatedBlocks = client.getLocatedBlocks(path.toString(), 0, fileLength);
   for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
     assertThat(locatedBlock.getStorageTypes()[0], is(storageType));
   }
   return locatedBlocks;
 }
Ejemplo n.º 6
0
  private void onAppend(ChannelHandlerContext ctx) throws IOException {
    writeContinueHeader(ctx);
    final String nnId = params.namenodeId();
    final int bufferSize = params.bufferSize();

    DFSClient dfsClient = newDfsClient(nnId, conf);
    OutputStream out =
        dfsClient.append(path, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
    DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, OK);
    resp.headers().set(CONTENT_LENGTH, 0);
    ctx.pipeline()
        .replace(this, HdfsWriter.class.getSimpleName(), new HdfsWriter(dfsClient, out, resp));
  }
Ejemplo n.º 7
0
  @Test
  public void testDfsClient() throws IOException {
    Configuration conf = MyConf.getConfiguration();
    DFSClient client = new DFSClient(conf);
    if (logger.isInfoEnabled()) {
      logger.info("testDfsClient() - DFSClient client=" + client.toString()); // $NON-NLS-1$
    }

    ClientProtocol protocol = DFSClient.createNamenode(conf);
    System.out.println(protocol.getClass().toString());
    ContentSummary summary = protocol.getContentSummary("/penglin");
    System.out.println(summary.getFileCount());
    System.out.println(summary.toString());
    System.out.println(summary.getSpaceConsumed());
  }
Ejemplo n.º 8
0
  /* fill up a cluster with <code>numNodes</code> datanodes
   * whose used space to be <code>size</code>
   */
  private Block[] generateBlocks(long size, short numNodes) throws IOException {
    cluster = new MiniDFSCluster(CONF, numNodes, true, null);
    try {
      cluster.waitActive();
      client = DFSClient.createNamenode(CONF);

      short replicationFactor = (short) (numNodes - 1);
      long fileLen = size / replicationFactor;
      createFile(fileLen, replicationFactor);

      List<LocatedBlock> locatedBlocks =
          client.getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();

      int numOfBlocks = locatedBlocks.size();
      Block[] blocks = new Block[numOfBlocks];
      for (int i = 0; i < numOfBlocks; i++) {
        Block b = locatedBlocks.get(i).getBlock();
        blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
      }

      return blocks;
    } finally {
      cluster.shutdown();
    }
  }
Ejemplo n.º 9
0
  /* This test start a one-node cluster, fill the node to be 30% full;
   * It then adds an empty node and start balancing.
   * @param newCapacity new node's capacity
   * @param new
   */
  private void test(long[] capacities, String[] racks, long newCapacity, String newRack)
      throws Exception {
    int numOfDatanodes = capacities.length;
    assertEquals(numOfDatanodes, racks.length);
    cluster = new MiniDFSCluster(0, CONF, capacities.length, true, true, null, racks, capacities);
    try {
      cluster.waitActive();
      client = DFSClient.createNamenode(CONF);

      long totalCapacity = 0L;
      for (long capacity : capacities) {
        totalCapacity += capacity;
      }
      // fill up the cluster to be 30% full
      long totalUsedSpace = totalCapacity * 3 / 10;
      createFile(totalUsedSpace / numOfDatanodes, (short) numOfDatanodes);
      // start up an empty node with the same capacity and on the same rack
      cluster.startDataNodes(CONF, 1, true, null, new String[] {newRack}, new long[] {newCapacity});

      totalCapacity += newCapacity;

      // run balancer and validate results
      runBalancer(totalUsedSpace, totalCapacity);
    } finally {
      cluster.shutdown();
    }
  }
Ejemplo n.º 10
0
  /* we first start a cluster and fill the cluster up to a certain size.
   * then redistribute blocks according the required distribution.
   * Afterwards a balancer is running to balance the cluster.
   */
  private void testUnevenDistribution(long distribution[], long capacities[], String[] racks)
      throws Exception {
    int numDatanodes = distribution.length;
    if (capacities.length != numDatanodes || racks.length != numDatanodes) {
      throw new IllegalArgumentException("Array length is not the same");
    }

    // calculate total space that need to be filled
    long totalUsedSpace = 0L;
    for (int i = 0; i < distribution.length; i++) {
      totalUsedSpace += distribution[i];
    }

    // fill the cluster
    Block[] blocks = generateBlocks(totalUsedSpace, (short) numDatanodes);

    // redistribute blocks
    Block[][] blocksDN = distributeBlocks(blocks, (short) (numDatanodes - 1), distribution);

    // restart the cluster: do NOT format the cluster
    CONF.set("dfs.safemode.threshold.pct", "0.0f");
    cluster = new MiniDFSCluster(0, CONF, numDatanodes, false, true, null, racks, capacities);
    cluster.waitActive();
    client = DFSClient.createNamenode(CONF);

    cluster.injectBlocks(blocksDN);

    long totalCapacity = 0L;
    for (long capacity : capacities) {
      totalCapacity += capacity;
    }
    runBalancer(totalUsedSpace, totalCapacity);
  }
Ejemplo n.º 11
0
  private void onOpen(ChannelHandlerContext ctx) throws IOException {
    final String nnId = params.namenodeId();
    final int bufferSize = params.bufferSize();
    final long offset = params.offset();
    final long length = params.length();

    DefaultHttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
    HttpHeaders headers = response.headers();
    // Allow the UI to access the file
    headers.set(ACCESS_CONTROL_ALLOW_METHODS, GET);
    headers.set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
    headers.set(CONTENT_TYPE, APPLICATION_OCTET_STREAM);
    headers.set(CONNECTION, CLOSE);

    final DFSClient dfsclient = newDfsClient(nnId, conf);
    HdfsDataInputStream in =
        dfsclient.createWrappedInputStream(dfsclient.open(path, bufferSize, true));
    in.seek(offset);

    long contentLength = in.getVisibleLength() - offset;
    if (length >= 0) {
      contentLength = Math.min(contentLength, length);
    }
    final InputStream data;
    if (contentLength >= 0) {
      headers.set(CONTENT_LENGTH, contentLength);
      data = new LimitInputStream(in, contentLength);
    } else {
      data = in;
    }

    ctx.write(response);
    ctx.writeAndFlush(
            new ChunkedStream(data) {
              @Override
              public void close() throws Exception {
                super.close();
                dfsclient.close();
              }
            })
        .addListener(ChannelFutureListener.CLOSE);
  }
Ejemplo n.º 12
0
  private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException {
    MD5MD5CRC32FileChecksum checksum = null;
    final String nnId = params.namenodeId();
    DFSClient dfsclient = newDfsClient(nnId, conf);
    try {
      checksum = dfsclient.getFileChecksum(path, Long.MAX_VALUE);
      dfsclient.close();
      dfsclient = null;
    } finally {
      IOUtils.cleanup(LOG, dfsclient);
    }
    final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8);
    DefaultFullHttpResponse resp =
        new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js));

    resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
    resp.headers().set(CONTENT_LENGTH, js.length);
    resp.headers().set(CONNECTION, CLOSE);
    ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
  }
Ejemplo n.º 13
0
 /** Create a {@link NameNode} proxy from the current {@link ServletContext}. */
 protected ClientProtocol createNameNodeProxy() throws IOException {
   ServletContext context = getServletContext();
   // if we are running in the Name Node, use it directly rather than via rpc
   NameNode nn = (NameNode) context.getAttribute("name.node");
   if (nn != null) {
     return nn;
   }
   InetSocketAddress nnAddr = (InetSocketAddress) context.getAttribute("name.node.address");
   Configuration conf =
       new Configuration((Configuration) context.getAttribute(JspHelper.CURRENT_CONF));
   return DFSClient.createNamenode(nnAddr, conf);
 }
Ejemplo n.º 14
0
 private void lostFoundInit(DFSClient dfs) {
   lfInited = true;
   try {
     String lfName = "/lost+found";
     // check that /lost+found exists
     if (!dfs.exists(lfName)) {
       lfInitedOk = dfs.mkdirs(lfName);
       lostFound = lfName;
     } else if (!dfs.isDirectory(lfName)) {
       LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
       lfInitedOk = false;
     } else { // exists and isDirectory
       lostFound = lfName;
       lfInitedOk = true;
     }
   } catch (Exception e) {
     e.printStackTrace();
     lfInitedOk = false;
   }
   if (lostFound == null) {
     LOG.warn("Cannot initialize /lost+found .");
     lfInitedOk = false;
   }
 }
Ejemplo n.º 15
0
  private static void setKeyProvider(DFSClient dfsClient, KeyProviderCryptoExtension provider)
      throws Exception {
    Method setKeyProviderHadoop27Method = null;
    try {
      setKeyProviderHadoop27Method = DFSClient.class.getMethod("setKeyProvider", KeyProvider.class);
    } catch (NoSuchMethodException err) {
      // We can just use setKeyProvider() as it is
    }

    if (setKeyProviderHadoop27Method != null) {
      // Method signature changed in Hadoop 2.7. Cast provider to KeyProvider
      setKeyProviderHadoop27Method.invoke(dfsClient, (KeyProvider) provider);
    } else {
      dfsClient.setKeyProvider(provider);
    }
  }
  /**
   * Verify that corrupt <tt>READ_ONLY_SHARED</tt> replicas aren't counted towards the corrupt
   * replicas total.
   */
  @Test
  public void testReadOnlyReplicaCorrupt() throws Exception {
    // "Corrupt" a READ_ONLY_SHARED replica by reporting it as a bad replica
    client.reportBadBlocks(
        new LocatedBlock[] {
          new LocatedBlock(extendedBlock, new DatanodeInfo[] {readOnlyDataNode})
        });

    // There should now be only 1 *location* for the block as the READ_ONLY_SHARED is corrupt
    waitForLocations(1);

    // However, the corrupt READ_ONLY_SHARED replica should *not* affect the overall corrupt
    // replicas count
    NumberReplicas numberReplicas = blockManager.countNodes(block);
    assertThat(numberReplicas.corruptReplicas(), is(0));
  }
  static void generateDirectoryStructure(
      JspWriter out, HttpServletRequest req, HttpServletResponse resp, Configuration conf)
      throws IOException, InterruptedException {
    final String dir =
        JspHelper.validatePath(StringEscapeUtils.unescapeHtml(req.getParameter("dir")));
    if (dir == null) {
      out.print("Invalid input");
      return;
    }
    String tokenString = req.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);
    UserGroupInformation ugi = JspHelper.getUGI(req, conf);
    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
    int namenodeInfoPort = -1;
    if (namenodeInfoPortStr != null) namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
    final String nnAddr = req.getParameter(JspHelper.NAMENODE_ADDRESS);
    if (nnAddr == null) {
      out.print(JspHelper.NAMENODE_ADDRESS + " url param is null");
      return;
    }

    DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
    String target = dir;
    final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
    if (targetStatus == null) { // not exists
      out.print("<h3>File or directory : " + target + " does not exist</h3>");
      JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, target, nnAddr);
    } else {
      if (!targetStatus.isDir()) { // a file
        List<LocatedBlock> blocks =
            dfs.getNamenode().getBlockLocations(dir, 0, 1).getLocatedBlocks();

        LocatedBlock firstBlock = null;
        DatanodeInfo[] locations = null;
        if (blocks.size() > 0) {
          firstBlock = blocks.get(0);
          locations = firstBlock.getLocations();
        }
        if (locations == null || locations.length == 0) {
          out.print("Empty file");
        } else {
          DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
          String fqdn = canonicalize(chosenNode.getIpAddr());
          int datanodePort = chosenNode.getXferPort();
          String redirectLocation =
              "http://"
                  + fqdn
                  + ":"
                  + chosenNode.getInfoPort()
                  + "/browseBlock.jsp?blockId="
                  + firstBlock.getBlock().getBlockId()
                  + "&blockSize="
                  + firstBlock.getBlock().getNumBytes()
                  + "&genstamp="
                  + firstBlock.getBlock().getGenerationStamp()
                  + "&filename="
                  + URLEncoder.encode(dir, "UTF-8")
                  + "&datanodePort="
                  + datanodePort
                  + "&namenodeInfoPort="
                  + namenodeInfoPort
                  + JspHelper.getDelegationTokenUrlParam(tokenString)
                  + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
          resp.sendRedirect(redirectLocation);
        }
        return;
      }
      // directory
      // generate a table and dump the info
      String[] headings = {
        "Name",
        "Type",
        "Size",
        "Replication",
        "Block Size",
        "Modification Time",
        "Permission",
        "Owner",
        "Group"
      };
      out.print("<h3>Contents of directory ");
      JspHelper.printPathWithLinks(dir, out, namenodeInfoPort, tokenString, nnAddr);
      out.print("</h3><hr>");
      JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, dir, nnAddr);
      out.print("<hr>");

      File f = new File(dir);
      String parent;
      if ((parent = f.getParent()) != null)
        out.print(
            "<a href=\""
                + req.getRequestURL()
                + "?dir="
                + parent
                + "&namenodeInfoPort="
                + namenodeInfoPort
                + JspHelper.getDelegationTokenUrlParam(tokenString)
                + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr)
                + "\">Go to parent directory</a><br>");

      DirectoryListing thisListing = dfs.listPaths(target, HdfsFileStatus.EMPTY_NAME);
      if (thisListing == null || thisListing.getPartialListing().length == 0) {
        out.print("Empty directory");
      } else {
        JspHelper.addTableHeader(out);
        int row = 0;
        JspHelper.addTableRow(out, headings, row++);
        String cols[] = new String[headings.length];
        do {
          HdfsFileStatus[] files = thisListing.getPartialListing();
          for (int i = 0; i < files.length; i++) {
            String localFileName = files[i].getLocalName();
            // Get the location of the first block of the file
            if (!files[i].isDir()) {
              cols[1] = "file";
              cols[2] = StringUtils.byteDesc(files[i].getLen());
              cols[3] = Short.toString(files[i].getReplication());
              cols[4] = StringUtils.byteDesc(files[i].getBlockSize());
            } else {
              cols[1] = "dir";
              cols[2] = "";
              cols[3] = "";
              cols[4] = "";
            }
            String datanodeUrl =
                req.getRequestURL()
                    + "?dir="
                    + URLEncoder.encode(files[i].getFullName(target), "UTF-8")
                    + "&namenodeInfoPort="
                    + namenodeInfoPort
                    + JspHelper.getDelegationTokenUrlParam(tokenString)
                    + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
            cols[0] = "<a href=\"" + datanodeUrl + "\">" + localFileName + "</a>";
            cols[5] = lsDateFormat.format(new Date((files[i].getModificationTime())));
            cols[6] = files[i].getPermission().toString();
            cols[7] = files[i].getOwner();
            cols[8] = files[i].getGroup();
            JspHelper.addTableRow(out, cols, row++);
          }
          if (!thisListing.hasMore()) {
            break;
          }
          thisListing = dfs.listPaths(target, thisListing.getLastName());
        } while (thisListing != null);
        JspHelper.addTableFooter(out);
      }
    }
    out.print(
        "<br><a href=\"http://"
            + canonicalize(nnAddr)
            + ":"
            + namenodeInfoPort
            + "/dfshealth.jsp\">Go back to DFS home</a>");
    dfs.close();
  }
Ejemplo n.º 18
0
 /**
  * DFS Client의 입력 스트립을 얻는다.
  *
  * @param client DFS Client
  * @param filename 파일 경로
  * @return 입력 스트림
  * @throws java.io.IOException HDFS IO를 처리할 수 없는 경우
  */
 public static InputStream getInputStream(DFSClient client, String filename) throws IOException {
   return client.open(filename);
 }
 private LocatedBlock getLocatedBlock() throws IOException {
   LocatedBlocks locatedBlocks = client.getLocatedBlocks(PATH.toString(), 0, BLOCK_SIZE);
   assertThat(locatedBlocks.getLocatedBlocks().size(), is(1));
   return Iterables.getOnlyElement(locatedBlocks.getLocatedBlocks());
 }
Ejemplo n.º 20
0
 /**
  * 지정한 경로를 삭제한다.
  *
  * @param client DFS Client
  * @param path 삭제할 경로
  * @param recursive Recusive 적용 여부
  * @return 성공시 <tt>true</tt>
  * @throws java.io.IOException 파일을 삭제할 수 없는 경우
  */
 public static boolean remove(DFSClient client, String path, boolean recursive)
     throws IOException {
   return client.exists(path) && client.delete(path, recursive);
 }
Ejemplo n.º 21
0
 /**
  * DFS Client의 출력 스트립을 얻는다.
  *
  * @param client DFS Client
  * @param filename 파일명
  * @param overwrite Overwrite 여부
  * @return 출력 스트림
  * @throws java.io.IOException HDFS IO를 처리할 수 없는 경우
  */
 public static OutputStream getOutputStream(DFSClient client, String filename, boolean overwrite)
     throws IOException {
   return client.create(filename, overwrite);
 }
  static void generateFileChunks(JspWriter out, HttpServletRequest req, Configuration conf)
      throws IOException, InterruptedException {
    long startOffset = 0;
    int datanodePort = 0;

    final String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
    final String nnAddr = req.getParameter(JspHelper.NAMENODE_ADDRESS);
    if (nnAddr == null) {
      out.print(JspHelper.NAMENODE_ADDRESS + " url param is null");
      return;
    }
    final String tokenString = req.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);
    UserGroupInformation ugi = JspHelper.getUGI(req, conf);
    int namenodeInfoPort = -1;
    if (namenodeInfoPortStr != null) namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);

    final String filename =
        JspHelper.validatePath(StringEscapeUtils.unescapeHtml(req.getParameter("filename")));
    if (filename == null) {
      out.print("Invalid input (filename absent)");
      return;
    }

    final Long blockId = JspHelper.validateLong(req.getParameter("blockId"));
    if (blockId == null) {
      out.print("Invalid input (blockId absent)");
      return;
    }

    final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);

    String bpid = null;
    Token<BlockTokenIdentifier> blockToken = BlockTokenSecretManager.DUMMY_TOKEN;
    List<LocatedBlock> blks =
        dfs.getNamenode().getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
    if (blks == null || blks.size() == 0) {
      out.print("Can't locate file blocks");
      dfs.close();
      return;
    }

    boolean needBlockToken =
        conf.getBoolean(
            DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,
            DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);

    for (int i = 0; i < blks.size(); i++) {
      if (blks.get(i).getBlock().getBlockId() == blockId) {
        bpid = blks.get(i).getBlock().getBlockPoolId();
        if (needBlockToken) {
          blockToken = blks.get(i).getBlockToken();
        }
        break;
      }
    }

    final Long genStamp = JspHelper.validateLong(req.getParameter("genstamp"));
    if (genStamp == null) {
      out.print("Invalid input (genstamp absent)");
      return;
    }

    long blockSize = 0;
    final String blockSizeStr = req.getParameter("blockSize");
    if (blockSizeStr == null) {
      out.print("Invalid input (blockSize absent)");
      return;
    }
    blockSize = Long.parseLong(blockSizeStr);

    final int chunkSizeToView =
        JspHelper.string2ChunkSizeToView(
            req.getParameter("chunkSizeToView"), getDefaultChunkSize(conf));

    String startOffsetStr = req.getParameter("startOffset");
    if (startOffsetStr == null || Long.parseLong(startOffsetStr) < 0) startOffset = 0;
    else startOffset = Long.parseLong(startOffsetStr);

    String datanodePortStr = req.getParameter("datanodePort");
    if (datanodePortStr == null) {
      out.print("Invalid input (datanodePort absent)");
      return;
    }
    datanodePort = Integer.parseInt(datanodePortStr);
    out.print("<h3>File: ");
    JspHelper.printPathWithLinks(filename, out, namenodeInfoPort, tokenString, nnAddr);
    out.print("</h3><hr>");
    String parent = new File(filename).getParent();
    JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, parent, nnAddr);
    out.print("<hr>");
    out.print(
        "<a href=\"http://"
            + req.getServerName()
            + ":"
            + req.getServerPort()
            + "/browseDirectory.jsp?dir="
            + URLEncoder.encode(parent, "UTF-8")
            + "&namenodeInfoPort="
            + namenodeInfoPort
            + JspHelper.getDelegationTokenUrlParam(tokenString)
            + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr)
            + "\"><i>Go back to dir listing</i></a><br>");
    out.print("<a href=\"#viewOptions\">Advanced view/download options</a><br>");
    out.print("<hr>");

    // Determine the prev & next blocks
    long nextStartOffset = 0;
    long nextBlockSize = 0;
    String nextBlockIdStr = null;
    String nextGenStamp = null;
    String nextHost = req.getServerName();
    int nextPort = req.getServerPort();
    int nextDatanodePort = datanodePort;
    // determine data for the next link
    if (startOffset + chunkSizeToView >= blockSize) {
      // we have to go to the next block from this point onwards
      List<LocatedBlock> blocks =
          dfs.getNamenode().getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
      for (int i = 0; i < blocks.size(); i++) {
        if (blocks.get(i).getBlock().getBlockId() == blockId) {
          if (i != blocks.size() - 1) {
            LocatedBlock nextBlock = blocks.get(i + 1);
            nextBlockIdStr = Long.toString(nextBlock.getBlock().getBlockId());
            nextGenStamp = Long.toString(nextBlock.getBlock().getGenerationStamp());
            nextStartOffset = 0;
            nextBlockSize = nextBlock.getBlock().getNumBytes();
            DatanodeInfo d = JspHelper.bestNode(nextBlock, conf);
            nextDatanodePort = d.getXferPort();
            nextHost = d.getIpAddr();
            nextPort = d.getInfoPort();
          }
        }
      }
    } else {
      // we are in the same block
      nextBlockIdStr = blockId.toString();
      nextStartOffset = startOffset + chunkSizeToView;
      nextBlockSize = blockSize;
      nextGenStamp = genStamp.toString();
    }
    String nextUrl = null;
    if (nextBlockIdStr != null) {
      nextUrl =
          "http://"
              + canonicalize(nextHost)
              + ":"
              + nextPort
              + "/browseBlock.jsp?blockId="
              + nextBlockIdStr
              + "&blockSize="
              + nextBlockSize
              + "&startOffset="
              + nextStartOffset
              + "&genstamp="
              + nextGenStamp
              + "&filename="
              + URLEncoder.encode(filename, "UTF-8")
              + "&chunkSizeToView="
              + chunkSizeToView
              + "&datanodePort="
              + nextDatanodePort
              + "&namenodeInfoPort="
              + namenodeInfoPort
              + JspHelper.getDelegationTokenUrlParam(tokenString)
              + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
      out.print("<a href=\"" + nextUrl + "\">View Next chunk</a>&nbsp;&nbsp;");
    }
    // determine data for the prev link
    String prevBlockIdStr = null;
    String prevGenStamp = null;
    long prevStartOffset = 0;
    long prevBlockSize = 0;
    String prevHost = req.getServerName();
    int prevPort = req.getServerPort();
    int prevDatanodePort = datanodePort;
    if (startOffset == 0) {
      List<LocatedBlock> blocks =
          dfs.getNamenode().getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
      for (int i = 0; i < blocks.size(); i++) {
        if (blocks.get(i).getBlock().getBlockId() == blockId) {
          if (i != 0) {
            LocatedBlock prevBlock = blocks.get(i - 1);
            prevBlockIdStr = Long.toString(prevBlock.getBlock().getBlockId());
            prevGenStamp = Long.toString(prevBlock.getBlock().getGenerationStamp());
            prevStartOffset = prevBlock.getBlock().getNumBytes() - chunkSizeToView;
            if (prevStartOffset < 0) prevStartOffset = 0;
            prevBlockSize = prevBlock.getBlock().getNumBytes();
            DatanodeInfo d = JspHelper.bestNode(prevBlock, conf);
            prevDatanodePort = d.getXferPort();
            prevHost = d.getIpAddr();
            prevPort = d.getInfoPort();
          }
        }
      }
    } else {
      // we are in the same block
      prevBlockIdStr = blockId.toString();
      prevStartOffset = startOffset - chunkSizeToView;
      if (prevStartOffset < 0) prevStartOffset = 0;
      prevBlockSize = blockSize;
      prevGenStamp = genStamp.toString();
    }

    String prevUrl = null;
    if (prevBlockIdStr != null) {
      prevUrl =
          "http://"
              + canonicalize(prevHost)
              + ":"
              + prevPort
              + "/browseBlock.jsp?blockId="
              + prevBlockIdStr
              + "&blockSize="
              + prevBlockSize
              + "&startOffset="
              + prevStartOffset
              + "&filename="
              + URLEncoder.encode(filename, "UTF-8")
              + "&chunkSizeToView="
              + chunkSizeToView
              + "&genstamp="
              + prevGenStamp
              + "&datanodePort="
              + prevDatanodePort
              + "&namenodeInfoPort="
              + namenodeInfoPort
              + JspHelper.getDelegationTokenUrlParam(tokenString)
              + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
      out.print("<a href=\"" + prevUrl + "\">View Prev chunk</a>&nbsp;&nbsp;");
    }
    out.print("<hr>");
    out.print("<textarea cols=\"100\" rows=\"25\" wrap=\"virtual\" style=\"width:100%\" READONLY>");
    try {
      JspHelper.streamBlockInAscii(
          new InetSocketAddress(req.getServerName(), datanodePort),
          bpid,
          blockId,
          blockToken,
          genStamp,
          blockSize,
          startOffset,
          chunkSizeToView,
          out,
          conf);
    } catch (Exception e) {
      out.print(e);
    }
    out.print("</textarea>");
    dfs.close();
  }
Ejemplo n.º 23
0
 /**
  * 지정한 경로가 존재하는지 확인한다.
  *
  * @param client DFS Client
  * @param path 존재 여부를 판단할 경로
  * @return 존재하면 <tt>true</tt>
  * @throws java.io.IOException HDFS IO를 처리할 수 없는 경우
  */
 public static boolean exists(DFSClient client, String path) throws IOException {
   return client.exists(path);
 }
  /*
   * This test creates a directory with 3 files and its fake parity file.
   * We decommissioned all nodes in the rack2 to make sure all data are stored
   * in rack1 machine.
   * Then we bring rack2 machines to normal state and create a non-raided file
   * which is too small to be raided in the directory with 4 replicas
   * (1 in rack1 and 3 in rack2).
   * Then we reduce the replication to 3 to trigger chooseReplicatToDelete.
   * We verify remaining replicas has 1 in rack1 and 2 in rack2.
   */
  @Test
  public void testChooseReplicasToDeleteForSmallFile() throws Exception {
    try {
      setupCluster(false, 512L, racks2, hosts2);
      // create test files
      int numFiles = 4;
      long blockSize = 1024L;
      String parentDir = "/dir/";
      DFSClient client = getDfsClient(cluster.getNameNode(), conf);
      DatanodeInfo[] infos = client.datanodeReport(DatanodeReportType.LIVE);
      ArrayList<String> rack2nodes = new ArrayList<String>();
      ArrayList<DatanodeInfo> rack2di = new ArrayList<DatanodeInfo>();
      for (DatanodeInfo di : infos) {
        if (di.getHostName().contains("rack2")) {
          rack2nodes.add(di.getName());
          rack2di.add(cluster.getNameNode().namesystem.getDatanode(di));
        }
      }
      LOG.info("Decommission rack2 nodes");
      writeConfigFile(excludeFile, rack2nodes);
      cluster.getNameNode().namesystem.refreshNodes(conf);
      waitState(rack2di, AdminStates.DECOMMISSIONED);
      for (int i = 0; i < numFiles; i++) {
        if (i == 2) {
          continue;
        }
        String file = parentDir + "file" + i;
        Path filePath = new Path(file);
        TestRaidDfs.createTestFile(fs, filePath, 1, 1, blockSize);
        printLocatedBlocks(filePath);
      }
      LOG.info("Created " + (numFiles - 1) + " files");
      // create fake parity file
      Codec code = Codec.getCodec("xor");
      long numStripes = RaidNode.numStripes(numFiles, code.stripeLength);
      Path parityPath = new Path(code.parityDirectory, "dir");
      TestRaidDfs.createTestFile(
          fs, parityPath, 1, (int) numStripes * code.parityLength, blockSize);
      LOG.info("Create parity file: " + parityPath);
      printLocatedBlocks(parityPath);

      LOG.info("Bring back rack2 nodes out of decommission");
      writeConfigFile(excludeFile, null);
      cluster.getNameNode().namesystem.refreshNodes(conf);
      waitState(rack2di, AdminStates.NORMAL);

      Path smallFilePath = new Path(parentDir + "file2");
      TestRaidDfs.createTestFile(fs, smallFilePath, 4, 1, 256L);
      assertEquals(
          "all datanodes should have replicas", hosts2.length, printLocatedBlocks(smallFilePath));
      LOG.info("Created small file: " + smallFilePath);

      LOG.info("Reduce replication to 3");
      dfs.setReplication(smallFilePath, (short) 3);
      long startTime = System.currentTimeMillis();
      while (System.currentTimeMillis() - startTime < 120000
          && printLocatedBlocks(smallFilePath) == 4) {
        Thread.sleep(1000);
      }
      LocatedBlocks lbs = dfs.getLocatedBlocks(smallFilePath, 0L, Integer.MAX_VALUE);
      boolean hasRack1 = false;
      for (DatanodeInfo di : lbs.getLocatedBlocks().get(0).getLocations()) {
        if (di.getNetworkLocation().contains("rack1")) {
          hasRack1 = true;
          break;
        }
      }
      assertTrue("We should keep the nodes in rack1", hasRack1);
    } finally {
      closeCluster();
    }
  }
Ejemplo n.º 25
0
  @SuppressWarnings("unchecked")
  public void doGet(HttpServletRequest request, HttpServletResponse response)
      throws ServletException, IOException {
    final String path = ServletUtil.getDecodedPath(request, "/streamFile");
    final String rawPath = ServletUtil.getRawPath(request, "/streamFile");
    final String filename = JspHelper.validatePath(path);
    final String rawFilename = JspHelper.validatePath(rawPath);
    if (filename == null) {
      response.setContentType("text/plain");
      PrintWriter out = response.getWriter();
      out.print("Invalid input");
      return;
    }

    Enumeration<String> reqRanges = request.getHeaders("Range");
    if (reqRanges != null && !reqRanges.hasMoreElements()) {
      reqRanges = null;
    }

    DFSClient dfs;
    try {
      dfs = getDFSClient(request);
    } catch (InterruptedException e) {
      response.sendError(400, e.getMessage());
      return;
    }

    DFSInputStream in = null;
    OutputStream out = null;

    try {
      in = dfs.open(filename);
      out = response.getOutputStream();
      final long fileLen = in.getFileLength();
      if (reqRanges != null) {
        List<InclusiveByteRange> ranges = InclusiveByteRange.satisfiableRanges(reqRanges, fileLen);
        StreamFile.sendPartialData(in, out, response, fileLen, ranges);
      } else {
        // No ranges, so send entire file
        response.setHeader("Content-Disposition", "attachment; filename=\"" + rawFilename + "\"");
        response.setContentType("application/octet-stream");
        response.setHeader(CONTENT_LENGTH, "" + fileLen);
        StreamFile.copyFromOffset(in, out, 0L, fileLen);
      }
      in.close();
      in = null;
      out.close();
      out = null;
      dfs.close();
      dfs = null;
    } catch (IOException ioe) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("response.isCommitted()=" + response.isCommitted(), ioe);
      }
      throw ioe;
    } finally {
      IOUtils.cleanup(LOG, in);
      IOUtils.cleanup(LOG, out);
      IOUtils.cleanup(LOG, dfs);
    }
  }
  protected void doTestRead(Configuration conf, MiniDFSCluster cluster, boolean isStriped)
      throws Exception {
    final int numDataNodes = cluster.getDataNodes().size();
    final NameNode nn = cluster.getNameNode();
    final NamenodeProtocols nnProto = nn.getRpcServer();
    final BlockManager bm = nn.getNamesystem().getBlockManager();
    final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();

    // set a short token lifetime (1 second) initially
    SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);

    Path fileToRead = new Path(FILE_TO_READ);
    FileSystem fs = cluster.getFileSystem();
    byte[] expected = generateBytes(FILE_SIZE);
    createFile(fs, fileToRead, expected);

    /*
     * setup for testing expiration handling of cached tokens
     */

    // read using blockSeekTo(). Acquired tokens are cached in in1
    FSDataInputStream in1 = fs.open(fileToRead);
    assertTrue(checkFile1(in1, expected));
    // read using blockSeekTo(). Acquired tokens are cached in in2
    FSDataInputStream in2 = fs.open(fileToRead);
    assertTrue(checkFile1(in2, expected));
    // read using fetchBlockByteRange(). Acquired tokens are cached in in3
    FSDataInputStream in3 = fs.open(fileToRead);
    assertTrue(checkFile2(in3, expected));

    /*
     * testing READ interface on DN using a BlockReader
     */
    DFSClient client = null;
    try {
      client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    } finally {
      if (client != null) client.close();
    }
    List<LocatedBlock> locatedBlocks =
        nnProto.getBlockLocations(FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
    LocatedBlock lblock = locatedBlocks.get(0); // first block
    // verify token is not expired
    assertFalse(isBlockTokenExpired(lblock));
    // read with valid token, should succeed
    tryRead(conf, lblock, true);

    /*
     * wait till myToken and all cached tokens in in1, in2 and in3 expire
     */

    while (!isBlockTokenExpired(lblock)) {
      try {
        Thread.sleep(10);
      } catch (InterruptedException ignored) {
      }
    }

    /*
     * continue testing READ interface on DN using a BlockReader
     */

    // verify token is expired
    assertTrue(isBlockTokenExpired(lblock));
    // read should fail
    tryRead(conf, lblock, false);
    // use a valid new token
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
    // read should succeed
    tryRead(conf, lblock, true);
    // use a token with wrong blockID
    long rightId = lblock.getBlock().getBlockId();
    long wrongId = rightId + 1;
    lblock.getBlock().setBlockId(wrongId);
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
    lblock.getBlock().setBlockId(rightId);
    // read should fail
    tryRead(conf, lblock, false);
    // use a token with wrong access modes
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.WRITE);
    // read should fail
    tryRead(conf, lblock, false);

    // set a long token lifetime for future tokens
    SecurityTestUtil.setBlockTokenLifetime(sm, 600 * 1000L);

    /*
     * testing that when cached tokens are expired, DFSClient will re-fetch
     * tokens transparently for READ.
     */

    // confirm all tokens cached in in1 are expired by now
    List<LocatedBlock> lblocks = DFSTestUtil.getAllBlocks(in1);
    for (LocatedBlock blk : lblocks) {
      assertTrue(isBlockTokenExpired(blk));
    }
    // verify blockSeekTo() is able to re-fetch token transparently
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));

    // confirm all tokens cached in in2 are expired by now
    List<LocatedBlock> lblocks2 = DFSTestUtil.getAllBlocks(in2);
    for (LocatedBlock blk : lblocks2) {
      assertTrue(isBlockTokenExpired(blk));
    }
    // verify blockSeekTo() is able to re-fetch token transparently (testing
    // via another interface method)
    if (isStriped) {
      // striped block doesn't support seekToNewSource
      in2.seek(0);
    } else {
      assertTrue(in2.seekToNewSource(0));
    }
    assertTrue(checkFile1(in2, expected));

    // confirm all tokens cached in in3 are expired by now
    List<LocatedBlock> lblocks3 = DFSTestUtil.getAllBlocks(in3);
    for (LocatedBlock blk : lblocks3) {
      assertTrue(isBlockTokenExpired(blk));
    }
    // verify fetchBlockByteRange() is able to re-fetch token transparently
    assertTrue(checkFile2(in3, expected));

    /*
     * testing that after datanodes are restarted on the same ports, cached
     * tokens should still work and there is no need to fetch new tokens from
     * namenode. This test should run while namenode is down (to make sure no
     * new tokens can be fetched from namenode).
     */

    // restart datanodes on the same ports that they currently use
    assertTrue(cluster.restartDataNodes(true));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());
    cluster.shutdownNameNode(0);

    // confirm tokens cached in in1 are still valid
    lblocks = DFSTestUtil.getAllBlocks(in1);
    for (LocatedBlock blk : lblocks) {
      assertFalse(isBlockTokenExpired(blk));
    }
    // verify blockSeekTo() still works (forced to use cached tokens)
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));

    // confirm tokens cached in in2 are still valid
    lblocks2 = DFSTestUtil.getAllBlocks(in2);
    for (LocatedBlock blk : lblocks2) {
      assertFalse(isBlockTokenExpired(blk));
    }

    // verify blockSeekTo() still works (forced to use cached tokens)
    if (isStriped) {
      in2.seek(0);
    } else {
      in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));

    // confirm tokens cached in in3 are still valid
    lblocks3 = DFSTestUtil.getAllBlocks(in3);
    for (LocatedBlock blk : lblocks3) {
      assertFalse(isBlockTokenExpired(blk));
    }
    // verify fetchBlockByteRange() still works (forced to use cached tokens)
    assertTrue(checkFile2(in3, expected));

    /*
     * testing that when namenode is restarted, cached tokens should still
     * work and there is no need to fetch new tokens from namenode. Like the
     * previous test, this test should also run while namenode is down. The
     * setup for this test depends on the previous test.
     */

    // restart the namenode and then shut it down for test
    cluster.restartNameNode(0);
    cluster.shutdownNameNode(0);

    // verify blockSeekTo() still works (forced to use cached tokens)
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    // verify again blockSeekTo() still works (forced to use cached tokens)
    if (isStriped) {
      in2.seek(0);
    } else {
      in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));

    // verify fetchBlockByteRange() still works (forced to use cached tokens)
    assertTrue(checkFile2(in3, expected));

    /*
     * testing that after both namenode and datanodes got restarted (namenode
     * first, followed by datanodes), DFSClient can't access DN without
     * re-fetching tokens and is able to re-fetch tokens transparently. The
     * setup of this test depends on the previous test.
     */

    // restore the cluster and restart the datanodes for test
    cluster.restartNameNode(0);
    assertTrue(cluster.restartDataNodes(true));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());

    // shutdown namenode so that DFSClient can't get new tokens from namenode
    cluster.shutdownNameNode(0);

    // verify blockSeekTo() fails (cached tokens become invalid)
    in1.seek(0);
    assertFalse(checkFile1(in1, expected));
    // verify fetchBlockByteRange() fails (cached tokens become invalid)
    assertFalse(checkFile2(in3, expected));

    // restart the namenode to allow DFSClient to re-fetch tokens
    cluster.restartNameNode(0);
    // verify blockSeekTo() works again (by transparently re-fetching
    // tokens from namenode)
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    if (isStriped) {
      in2.seek(0);
    } else {
      in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    // verify fetchBlockByteRange() works again (by transparently
    // re-fetching tokens from namenode)
    assertTrue(checkFile2(in3, expected));

    /*
     * testing that when datanodes are restarted on different ports, DFSClient
     * is able to re-fetch tokens transparently to connect to them
     */

    // restart datanodes on newly assigned ports
    assertTrue(cluster.restartDataNodes(false));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());
    // verify blockSeekTo() is able to re-fetch token transparently
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    // verify blockSeekTo() is able to re-fetch token transparently
    if (isStriped) {
      in2.seek(0);
    } else {
      in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    // verify fetchBlockByteRange() is able to re-fetch token transparently
    assertTrue(checkFile2(in3, expected));
  }
Ejemplo n.º 27
0
  void handleWrite(
      DFSClient dfsClient,
      WRITE3Request request,
      Channel channel,
      int xid,
      Nfs3FileAttributes preOpAttr)
      throws IOException {
    int count = request.getCount();
    byte[] data = request.getData().array();
    if (data.length < count) {
      WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
      Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
      return;
    }

    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
      LOG.debug("handleWrite " + request);
    }

    // Check if there is a stream to write
    FileHandle fileHandle = request.getHandle();
    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
    if (openFileCtx == null) {
      LOG.info("No opened stream for fileId:" + fileHandle.getFileId());

      String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
      HdfsDataOutputStream fos = null;
      Nfs3FileAttributes latestAttr = null;
      try {
        int bufferSize =
            config.getInt(
                CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
                CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);

        fos = dfsClient.append(fileIdPath, bufferSize, null, null);

        latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
      } catch (RemoteException e) {
        IOException io = e.unwrapRemoteException();
        if (io instanceof AlreadyBeingCreatedException) {
          LOG.warn(
              "Can't append file:"
                  + fileIdPath
                  + ". Possibly the file is being closed. Drop the request:"
                  + request
                  + ", wait for the client to retry...");
          return;
        }
        throw e;
      } catch (IOException e) {
        LOG.error("Can't apapend to file:" + fileIdPath, e);
        if (fos != null) {
          fos.close();
        }
        WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr);
        WRITE3Response response =
            new WRITE3Response(
                Nfs3Status.NFS3ERR_IO,
                fileWcc,
                count,
                request.getStableHow(),
                Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils.writeChannel(
            channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
        return;
      }

      // Add open stream
      String writeDumpDir =
          config.get(
              NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT);
      openFileCtx =
          new OpenFileCtx(
              fos,
              latestAttr,
              writeDumpDir + "/" + fileHandle.getFileId(),
              dfsClient,
              iug,
              aixCompatMode,
              config);

      if (!addOpenFileStream(fileHandle, openFileCtx)) {
        LOG.info("Can't add new stream. Close it. Tell client to retry.");
        try {
          fos.close();
        } catch (IOException e) {
          LOG.error("Can't close stream for fileId:" + handle.getFileId(), e);
        }
        // Notify client to retry
        WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
        WRITE3Response response =
            new WRITE3Response(
                Nfs3Status.NFS3ERR_JUKEBOX,
                fileWcc,
                0,
                request.getStableHow(),
                Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils.writeChannel(
            channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
        return;
      }

      if (LOG.isDebugEnabled()) {
        LOG.debug("Opened stream for appending file:" + fileHandle.getFileId());
      }
    }

    // Add write into the async job queue
    openFileCtx.receivedNewWrite(dfsClient, request, channel, xid, asyncDataService, iug);
    return;
  }
  static void generateFileChunksForTail(JspWriter out, HttpServletRequest req, Configuration conf)
      throws IOException, InterruptedException {
    String referrer = null;
    boolean noLink = false;
    try {
      referrer = new URL(req.getParameter("referrer")).toString();
    } catch (IOException e) {
      referrer = null;
      noLink = true;
    }

    final String filename =
        JspHelper.validatePath(StringEscapeUtils.unescapeHtml(req.getParameter("filename")));
    if (filename == null) {
      out.print("Invalid input (file name absent)");
      return;
    }
    String tokenString = req.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);
    UserGroupInformation ugi = JspHelper.getUGI(req, conf);

    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
    String nnAddr = req.getParameter(JspHelper.NAMENODE_ADDRESS);
    int namenodeInfoPort = -1;
    if (namenodeInfoPortStr != null) namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);

    final int chunkSizeToView =
        JspHelper.string2ChunkSizeToView(
            req.getParameter("chunkSizeToView"), getDefaultChunkSize(conf));

    if (!noLink) {
      out.print("<h3>Tail of File: ");
      JspHelper.printPathWithLinks(filename, out, namenodeInfoPort, tokenString, nnAddr);
      out.print("</h3><hr>");
      out.print("<a href=\"" + referrer + "\">Go Back to File View</a><hr>");
    } else {
      out.print("<h3>" + filename + "</h3>");
    }
    out.print("<b>Chunk size to view (in bytes, up to file's DFS block size): </b>");
    out.print(
        "<input type=\"text\" name=\"chunkSizeToView\" value="
            + chunkSizeToView
            + " size=10 maxlength=10>");
    out.print("&nbsp;&nbsp;<input type=\"submit\" name=\"submit\" value=\"Refresh\"><hr>");
    out.print("<input type=\"hidden\" name=\"filename\" value=\"" + filename + "\">");
    out.print(
        "<input type=\"hidden\" name=\"namenodeInfoPort\" value=\"" + namenodeInfoPort + "\">");
    out.print(
        "<input type=\"hidden\" name=\""
            + JspHelper.NAMENODE_ADDRESS
            + "\" value=\""
            + nnAddr
            + "\">");
    if (!noLink) out.print("<input type=\"hidden\" name=\"referrer\" value=\"" + referrer + "\">");

    // fetch the block from the datanode that has the last block for this file
    final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
    List<LocatedBlock> blocks =
        dfs.getNamenode().getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
    if (blocks == null || blocks.size() == 0) {
      out.print("No datanodes contain blocks of file " + filename);
      dfs.close();
      return;
    }
    LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
    String poolId = lastBlk.getBlock().getBlockPoolId();
    long blockSize = lastBlk.getBlock().getNumBytes();
    long blockId = lastBlk.getBlock().getBlockId();
    Token<BlockTokenIdentifier> accessToken = lastBlk.getBlockToken();
    long genStamp = lastBlk.getBlock().getGenerationStamp();
    DatanodeInfo chosenNode;
    try {
      chosenNode = JspHelper.bestNode(lastBlk, conf);
    } catch (IOException e) {
      out.print(e.toString());
      dfs.close();
      return;
    }
    InetSocketAddress addr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
    // view the last chunkSizeToView bytes while Tailing
    final long startOffset = blockSize >= chunkSizeToView ? blockSize - chunkSizeToView : 0;

    out.print("<textarea cols=\"100\" rows=\"25\" wrap=\"virtual\" style=\"width:100%\" READONLY>");
    JspHelper.streamBlockInAscii(
        addr,
        poolId,
        blockId,
        accessToken,
        genStamp,
        blockSize,
        startOffset,
        chunkSizeToView,
        out,
        conf);
    out.print("</textarea>");
    dfs.close();
  }
  /*
   * XXX (ab) Bulk of this method is copied verbatim from {@link DFSClient}, which is
   * bad. Both places should be refactored to provide a method to copy blocks
   * around.
   */
  private void copyBlock(DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();

    while (blockReader == null) {
      DatanodeInfo chosenNode;

      try {
        chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
        targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
      } catch (IOException ie) {
        if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
          throw new IOException("Could not obtain block " + lblock, ie);
        }
        LOG.info("Could not obtain block from any node:  " + ie);
        try {
          Thread.sleep(10000);
        } catch (InterruptedException iex) {
        }
        deadNodes.clear();
        failures++;
        continue;
      }
      try {
        String file =
            BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(), block.getBlockId());
        blockReader =
            new BlockReaderFactory(dfs.getConf())
                .setFileName(file)
                .setBlock(block)
                .setBlockToken(lblock.getBlockToken())
                .setStartOffset(0)
                .setLength(-1)
                .setVerifyChecksum(true)
                .setClientName("fsck")
                .setDatanodeInfo(chosenNode)
                .setInetSocketAddress(targetAddr)
                .setCachingStrategy(CachingStrategy.newDropBehind())
                .setClientCacheContext(dfs.getClientContext())
                .setConfiguration(namenode.conf)
                .setRemotePeerFactory(
                    new RemotePeerFactory() {
                      @Override
                      public Peer newConnectedPeer(InetSocketAddress addr) throws IOException {
                        Peer peer = null;
                        Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
                        try {
                          s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
                          s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
                          peer =
                              TcpPeerServer.peerFromSocketAndKey(
                                  s, namenode.getRpcServer().getDataEncryptionKey());
                        } finally {
                          if (peer == null) {
                            IOUtils.closeQuietly(s);
                          }
                        }
                        return peer;
                      }
                    })
                .build();
      } catch (IOException ex) {
        // Put chosen node into dead list, continue
        LOG.info("Failed to connect to " + targetAddr + ":" + ex);
        deadNodes.add(chosenNode);
      }
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
      while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
        fos.write(buf, 0, cnt);
        bytesRead += cnt;
      }
      if (bytesRead != block.getNumBytes()) {
        throw new IOException(
            "Recorded block size is "
                + block.getNumBytes()
                + ", but datanode returned "
                + bytesRead
                + " bytes");
      }
    } catch (Exception e) {
      LOG.error("Error reading block", e);
      success = false;
    } finally {
      blockReader.close();
    }
    if (!success) {
      throw new Exception("Could not copy block data for " + lblock.getBlock());
    }
  }
  private void copyBlocksToLostFound(String parent, HdfsFileStatus file, LocatedBlocks blocks)
      throws IOException {
    final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
    final String fullName = file.getFullName(parent);
    OutputStream fos = null;
    try {
      if (!lfInited) {
        lostFoundInit(dfs);
      }
      if (!lfInitedOk) {
        throw new IOException("failed to initialize lost+found");
      }
      String target = lostFound + fullName;
      if (hdfsPathExists(target)) {
        LOG.warn(
            "Fsck: can't copy the remains of "
                + fullName
                + " to "
                + "lost+found, because "
                + target
                + " already exists.");
        return;
      }
      if (!namenode.getRpcServer().mkdirs(target, file.getPermission(), true)) {
        throw new IOException("failed to create directory " + target);
      }
      // create chains
      int chain = 0;
      boolean copyError = false;
      for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        LocatedBlock lblock = lBlk;
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(target + "/" + chain, true);
          if (fos == null) {
            throw new IOException(
                "Failed to copy " + fullName + " to /lost+found: could not store chain " + chain);
          }
          chain++;
        }

        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          LOG.error("Fsck: could not copy block " + lblock.getBlock() + " to " + target, e);
          fos.flush();
          fos.close();
          fos = null;
          internalError = true;
          copyError = true;
        }
      }
      if (copyError) {
        LOG.warn(
            "Fsck: there were errors copying the remains of the "
                + "corrupted file "
                + fullName
                + " to /lost+found");
      } else {
        LOG.info("Fsck: copied the remains of the corrupted file " + fullName + " to /lost+found");
      }
    } catch (Exception e) {
      LOG.error("copyBlocksToLostFound: error processing " + fullName, e);
      internalError = true;
    } finally {
      if (fos != null) fos.close();
      dfs.close();
    }
  }