コード例 #1
0
ファイル: WebHdfsHandler.java プロジェクト: RiseOfApes/hadoop
  private void onCreate(ChannelHandlerContext ctx) throws IOException, URISyntaxException {
    writeContinueHeader(ctx);

    final String nnId = params.namenodeId();
    final int bufferSize = params.bufferSize();
    final short replication = params.replication();
    final long blockSize = params.blockSize();
    final FsPermission permission = params.permission();

    EnumSet<CreateFlag> flags =
        params.overwrite()
            ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
            : EnumSet.of(CreateFlag.CREATE);

    final DFSClient dfsClient = newDfsClient(nnId, confForCreate);
    OutputStream out =
        dfsClient.createWrappedOutputStream(
            dfsClient.create(
                path, permission, flags, replication, blockSize, null, bufferSize, null),
            null);
    DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, CREATED);

    final URI uri = new URI(HDFS_URI_SCHEME, nnId, path, null, null);
    resp.headers().set(LOCATION, uri.toString());
    resp.headers().set(CONTENT_LENGTH, 0);
    ctx.pipeline()
        .replace(this, HdfsWriter.class.getSimpleName(), new HdfsWriter(dfsClient, out, resp));
  }
コード例 #2
0
ファイル: HdfsUtils.java プロジェクト: YzPaul3/ankus
 /**
  * DFS Client의 출력 스트립을 얻는다.
  *
  * @param client DFS Client
  * @param filename 파일명
  * @param overwrite Overwrite 여부
  * @return 출력 스트림
  * @throws java.io.IOException HDFS IO를 처리할 수 없는 경우
  */
 public static OutputStream getOutputStream(DFSClient client, String filename, boolean overwrite)
     throws IOException {
   return client.create(filename, overwrite);
 }
コード例 #3
0
  private void copyBlocksToLostFound(String parent, HdfsFileStatus file, LocatedBlocks blocks)
      throws IOException {
    final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
    final String fullName = file.getFullName(parent);
    OutputStream fos = null;
    try {
      if (!lfInited) {
        lostFoundInit(dfs);
      }
      if (!lfInitedOk) {
        throw new IOException("failed to initialize lost+found");
      }
      String target = lostFound + fullName;
      if (hdfsPathExists(target)) {
        LOG.warn(
            "Fsck: can't copy the remains of "
                + fullName
                + " to "
                + "lost+found, because "
                + target
                + " already exists.");
        return;
      }
      if (!namenode.getRpcServer().mkdirs(target, file.getPermission(), true)) {
        throw new IOException("failed to create directory " + target);
      }
      // create chains
      int chain = 0;
      boolean copyError = false;
      for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        LocatedBlock lblock = lBlk;
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(target + "/" + chain, true);
          if (fos == null) {
            throw new IOException(
                "Failed to copy " + fullName + " to /lost+found: could not store chain " + chain);
          }
          chain++;
        }

        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          LOG.error("Fsck: could not copy block " + lblock.getBlock() + " to " + target, e);
          fos.flush();
          fos.close();
          fos = null;
          internalError = true;
          copyError = true;
        }
      }
      if (copyError) {
        LOG.warn(
            "Fsck: there were errors copying the remains of the "
                + "corrupted file "
                + fullName
                + " to /lost+found");
      } else {
        LOG.info("Fsck: copied the remains of the corrupted file " + fullName + " to /lost+found");
      }
    } catch (Exception e) {
      LOG.error("copyBlocksToLostFound: error processing " + fullName, e);
      internalError = true;
    } finally {
      if (fos != null) fos.close();
      dfs.close();
    }
  }
コード例 #4
0
  private void lostFoundMove(FileStatus file, LocatedBlocks blocks) throws IOException {
    final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
    try {
      if (!lfInited) {
        lostFoundInit(dfs);
      }
      if (!lfInitedOk) {
        return;
      }
      String target = lostFound + file.getPath();
      String errmsg = "Failed to move " + file.getPath() + " to /lost+found";
      try {
        PermissionStatus ps =
            new PermissionStatus(file.getOwner(), file.getGroup(), file.getPermission());
        if (!nn.namesystem.dir.mkdirs(target, ps, false, FSNamesystem.now())) {
          LOG.warn(errmsg);
          return;
        }
        // create chains
        int chain = 0;
        OutputStream fos = null;
        for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
          LocatedBlock lblock = lBlk;
          DatanodeInfo[] locs = lblock.getLocations();
          if (locs == null || locs.length == 0) {
            if (fos != null) {
              fos.flush();
              fos.close();
              fos = null;
            }
            continue;
          }
          if (fos == null) {
            fos = dfs.create(target + "/" + chain, true);
            if (fos != null) chain++;
            else {
              LOG.warn(errmsg + ": could not store chain " + chain);
              // perhaps we should bail out here...
              // return;
              continue;
            }
          }

          // copy the block. It's a pity it's not abstracted from DFSInputStream ...
          try {
            copyBlock(dfs, lblock, fos);
          } catch (Exception e) {
            e.printStackTrace();
            // something went wrong copying this block...
            LOG.warn(" - could not copy block " + lblock.getBlock() + " to " + target);
            fos.flush();
            fos.close();
            fos = null;
          }
        }
        if (fos != null) fos.close();
        LOG.warn("\n - moved corrupted file " + file.getPath() + " to /lost+found");
        dfs.delete(file.getPath().toString(), true);
      } catch (Exception e) {
        e.printStackTrace();
        LOG.warn(errmsg + ": " + e.getMessage());
      }
    } finally {
      dfs.close();
    }
  }