예제 #1
0
  /**
   * Generate a dummy namenode proxy instance that utilizes our hacked {@link
   * LossyRetryInvocationHandler}. Proxy instance generated using this method will proactively drop
   * RPC responses. Currently this method only support HA setup. null will be returned if the given
   * configuration is not for HA.
   *
   * @param config the configuration containing the required IPC properties, client failover
   *     configurations, etc.
   * @param nameNodeUri the URI pointing either to a specific NameNode or to a logical nameservice.
   * @param xface the IPC interface which should be created
   * @param numResponseToDrop The number of responses to drop for each RPC call
   * @param fallbackToSimpleAuth set to true or false during calls to indicate if a secure client
   *     falls back to simple auth
   * @return an object containing both the proxy and the associated delegation token service it
   *     corresponds to. Will return null of the given configuration does not support HA.
   * @throws IOException if there is an error creating the proxy
   */
  @SuppressWarnings("unchecked")
  public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
      Configuration config,
      URI nameNodeUri,
      Class<T> xface,
      int numResponseToDrop,
      AtomicBoolean fallbackToSimpleAuth)
      throws IOException {
    Preconditions.checkArgument(numResponseToDrop > 0);
    AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
        createFailoverProxyProvider(config, nameNodeUri, xface, true, fallbackToSimpleAuth);

    if (failoverProxyProvider != null) { // HA case
      int delay =
          config.getInt(
              HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
              HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
      int maxCap =
          config.getInt(
              HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY,
              HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT);
      int maxFailoverAttempts =
          config.getInt(
              HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
              HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
      int maxRetryAttempts =
          config.getInt(
              HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
              HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
      InvocationHandler dummyHandler =
          new LossyRetryInvocationHandler<T>(
              numResponseToDrop,
              failoverProxyProvider,
              RetryPolicies.failoverOnNetworkException(
                  RetryPolicies.TRY_ONCE_THEN_FAIL,
                  maxFailoverAttempts,
                  Math.max(numResponseToDrop + 1, maxRetryAttempts),
                  delay,
                  maxCap));

      T proxy =
          (T)
              Proxy.newProxyInstance(
                  failoverProxyProvider.getInterface().getClassLoader(),
                  new Class[] {xface},
                  dummyHandler);
      Text dtService;
      if (failoverProxyProvider.useLogicalURI()) {
        dtService =
            HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HDFS_URI_SCHEME);
      } else {
        dtService = SecurityUtil.buildTokenService(NameNode.getAddress(nameNodeUri));
      }
      return new ProxyAndInfo<T>(proxy, dtService, NameNode.getAddress(nameNodeUri));
    } else {
      LOG.warn(
          "Currently creating proxy using " + "LossyRetryInvocationHandler requires NN HA setup");
      return null;
    }
  }
예제 #2
0
  /** Initialize SecondaryNameNode. */
  private void initialize(Configuration conf) throws IOException {
    // initiate Java VM metrics
    JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));

    // Create connection to the namenode.
    shouldRun = true;
    nameNodeAddr = NameNode.getAddress(conf);

    this.conf = conf;
    this.namenode =
        (NamenodeProtocol)
            RPC.waitForProxy(
                NamenodeProtocol.class, NamenodeProtocol.versionID, nameNodeAddr, conf);

    // initialize checkpoint directories
    fsName = getInfoServer();
    checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointImage = new CheckpointStorage(conf);
    checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);

    // Initialize other scheduling parameters from the configuration
    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

    // initialize the webserver for uploading files.
    String infoAddr =
        NetUtils.getServerAddress(
            conf,
            "dfs.secondary.info.bindAddress",
            "dfs.secondary.info.port",
            "dfs.secondary.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf);
    infoServer.setAttribute("name.system.image", checkpointImage);
    this.infoServer.setAttribute("name.conf", conf);
    infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
    infoServer.start();

    // The web-server port can be ephemeral... ensure we have the correct info
    infoPort = infoServer.getPort();
    conf.set("dfs.secondary.http.address", infoBindAddress + ":" + infoPort);
    LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
    LOG.warn(
        "Checkpoint Period   :"
            + checkpointPeriod
            + " secs "
            + "("
            + checkpointPeriod / 60
            + " min)");
    LOG.warn(
        "Log Size Trigger    :"
            + checkpointSize
            + " bytes "
            + "("
            + checkpointSize / 1024
            + " KB)");
  }
예제 #3
0
  /**
   * Creates the namenode proxy with the passed protocol. This will handle creation of either HA- or
   * non-HA-enabled proxy objects, depending upon if the provided URI is a configured logical URI.
   *
   * @param conf the configuration containing the required IPC properties, client failover
   *     configurations, etc.
   * @param nameNodeUri the URI pointing either to a specific NameNode or to a logical nameservice.
   * @param xface the IPC interface which should be created
   * @param fallbackToSimpleAuth set to true or false during calls to indicate if a secure client
   *     falls back to simple auth
   * @return an object containing both the proxy and the associated delegation token service it
   *     corresponds to
   * @throws IOException if there is an error creating the proxy
   */
  @SuppressWarnings("unchecked")
  public static <T> ProxyAndInfo<T> createProxy(
      Configuration conf, URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth)
      throws IOException {
    AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
        createFailoverProxyProvider(conf, nameNodeUri, xface, true, fallbackToSimpleAuth);

    if (failoverProxyProvider == null) {
      // Non-HA case
      return createNonHAProxy(
          conf,
          NameNode.getAddress(nameNodeUri),
          xface,
          UserGroupInformation.getCurrentUser(),
          true,
          fallbackToSimpleAuth);
    } else {
      // HA case
      DfsClientConf config = new DfsClientConf(conf);
      T proxy =
          (T)
              RetryProxy.create(
                  xface,
                  failoverProxyProvider,
                  RetryPolicies.failoverOnNetworkException(
                      RetryPolicies.TRY_ONCE_THEN_FAIL,
                      config.getMaxFailoverAttempts(),
                      config.getMaxRetryAttempts(),
                      config.getFailoverSleepBaseMillis(),
                      config.getFailoverSleepMaxMillis()));

      Text dtService;
      if (failoverProxyProvider.useLogicalURI()) {
        dtService =
            HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HDFS_URI_SCHEME);
      } else {
        dtService = SecurityUtil.buildTokenService(NameNode.getAddress(nameNodeUri));
      }
      return new ProxyAndInfo<T>(proxy, dtService, NameNode.getAddress(nameNodeUri));
    }
  }
예제 #4
0
  /**
   * Refresh the authorization policy on the {@link NameNodeFBT}.
   *
   * @return exitcode 0 on success, non-zero on failure
   * @throws IOException
   */
  public int refreshServiceAcl() throws IOException {
    // Get the current configuration
    Configuration conf = getConf();

    // Create the client
    RefreshAuthorizationPolicyProtocol refreshProtocol =
        (RefreshAuthorizationPolicyProtocol)
            RPC.getProxy(
                RefreshAuthorizationPolicyProtocol.class,
                RefreshAuthorizationPolicyProtocol.versionID,
                NameNode.getAddress(conf),
                getUGI(conf),
                conf,
                NetUtils.getSocketFactory(conf, RefreshAuthorizationPolicyProtocol.class));

    // Refresh the authorization policy in-effect
    refreshProtocol.refreshServiceAcl();

    return 0;
  }
  public void testAbandonBlock() throws IOException {
    MiniDFSCluster cluster = new MiniDFSCluster(CONF, 2, true, null);
    FileSystem fs = cluster.getFileSystem();

    String src = FILE_NAME_PREFIX + "foo";
    FSDataOutputStream fout = null;
    try {
      // start writing a a file but not close it
      fout = fs.create(new Path(src), true, 4096, (short) 1, 512L);
      for (int i = 0; i < 1024; i++) {
        fout.write(123);
      }
      fout.sync();

      // try reading the block by someone
      final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF);
      LocatedBlocks blocks = dfsclient.namenode.getBlockLocations(src, 0, 1);
      LocatedBlock b = blocks.get(0);
      try {
        dfsclient.namenode.abandonBlock(b.getBlock(), src, "someone");
        // previous line should throw an exception.
        assertTrue(false);
      } catch (IOException ioe) {
        LOG.info("GREAT! " + StringUtils.stringifyException(ioe));
      }
    } finally {
      try {
        fout.close();
      } catch (Exception e) {
      }
      try {
        fs.close();
      } catch (Exception e) {
      }
      try {
        cluster.shutdown();
      } catch (Exception e) {
      }
    }
  }
예제 #6
0
  /**
   * Get {@link UserGroupInformation} and possibly the delegation token out of the request.
   *
   * @param context the Servlet context
   * @param request the http request
   * @param conf configuration
   * @param secureAuthMethod the AuthenticationMethod used in secure mode.
   * @param tryUgiParameter Should it try the ugi parameter?
   * @return a new user from the request
   * @throws AccessControlException if the request has no token
   */
  public static UserGroupInformation getUGI(
      ServletContext context,
      HttpServletRequest request,
      Configuration conf,
      final AuthenticationMethod secureAuthMethod,
      final boolean tryUgiParameter)
      throws IOException {
    final UserGroupInformation ugi;
    final String usernameFromQuery = getUsernameFromQuery(request, tryUgiParameter);
    final String doAsUserFromQuery = request.getParameter(DoAsParam.NAME);

    if (UserGroupInformation.isSecurityEnabled()) {
      final String remoteUser = request.getRemoteUser();
      String tokenString = request.getParameter(DELEGATION_PARAMETER_NAME);
      if (tokenString != null) {
        Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
        token.decodeFromUrlString(tokenString);
        SecurityUtil.setTokenService(token, NameNode.getAddress(conf));
        token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);

        ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
        DataInputStream in = new DataInputStream(buf);
        DelegationTokenIdentifier id = new DelegationTokenIdentifier();
        id.readFields(in);
        if (context != null) {
          NameNode nn = (NameNode) context.getAttribute("name.node");
          if (nn != null) {
            // Verify the token.
            nn.getNamesystem()
                .getDelegationTokenSecretManager()
                .verifyToken(id, token.getPassword());
          }
        }
        ugi = id.getUser();
        if (ugi.getRealUser() == null) {
          // non-proxy case
          checkUsername(ugi.getShortUserName(), usernameFromQuery);
          checkUsername(null, doAsUserFromQuery);
        } else {
          // proxy case
          checkUsername(ugi.getRealUser().getShortUserName(), usernameFromQuery);
          checkUsername(ugi.getShortUserName(), doAsUserFromQuery);
          ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
        }
        ugi.addToken(token);
        ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
      } else {
        if (remoteUser == null) {
          throw new IOException("Security enabled but user not " + "authenticated by filter");
        }
        final UserGroupInformation realUgi = UserGroupInformation.createRemoteUser(remoteUser);
        checkUsername(realUgi.getShortUserName(), usernameFromQuery);
        // This is not necessarily true, could have been auth'ed by user-facing
        // filter
        realUgi.setAuthenticationMethod(secureAuthMethod);
        ugi = initUGI(realUgi, doAsUserFromQuery, request, true, conf);
      }
    } else { // Security's not on, pull from url
      final UserGroupInformation realUgi =
          usernameFromQuery == null
              ? getDefaultWebUser(conf) // not specified in request
              : UserGroupInformation.createRemoteUser(usernameFromQuery);
      realUgi.setAuthenticationMethod(AuthenticationMethod.SIMPLE);
      ugi = initUGI(realUgi, doAsUserFromQuery, request, false, conf);
    }

    if (LOG.isDebugEnabled()) LOG.debug("getUGI is returning: " + ugi.getShortUserName());
    return ugi;
  }
  private void copyBlocksToLostFound(String parent, HdfsFileStatus file, LocatedBlocks blocks)
      throws IOException {
    final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
    final String fullName = file.getFullName(parent);
    OutputStream fos = null;
    try {
      if (!lfInited) {
        lostFoundInit(dfs);
      }
      if (!lfInitedOk) {
        throw new IOException("failed to initialize lost+found");
      }
      String target = lostFound + fullName;
      if (hdfsPathExists(target)) {
        LOG.warn(
            "Fsck: can't copy the remains of "
                + fullName
                + " to "
                + "lost+found, because "
                + target
                + " already exists.");
        return;
      }
      if (!namenode.getRpcServer().mkdirs(target, file.getPermission(), true)) {
        throw new IOException("failed to create directory " + target);
      }
      // create chains
      int chain = 0;
      boolean copyError = false;
      for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        LocatedBlock lblock = lBlk;
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(target + "/" + chain, true);
          if (fos == null) {
            throw new IOException(
                "Failed to copy " + fullName + " to /lost+found: could not store chain " + chain);
          }
          chain++;
        }

        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          LOG.error("Fsck: could not copy block " + lblock.getBlock() + " to " + target, e);
          fos.flush();
          fos.close();
          fos = null;
          internalError = true;
          copyError = true;
        }
      }
      if (copyError) {
        LOG.warn(
            "Fsck: there were errors copying the remains of the "
                + "corrupted file "
                + fullName
                + " to /lost+found");
      } else {
        LOG.info("Fsck: copied the remains of the corrupted file " + fullName + " to /lost+found");
      }
    } catch (Exception e) {
      LOG.error("copyBlocksToLostFound: error processing " + fullName, e);
      internalError = true;
    } finally {
      if (fos != null) fos.close();
      dfs.close();
    }
  }
예제 #8
0
  private void lostFoundMove(FileStatus file, LocatedBlocks blocks) throws IOException {
    final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
    try {
      if (!lfInited) {
        lostFoundInit(dfs);
      }
      if (!lfInitedOk) {
        return;
      }
      String target = lostFound + file.getPath();
      String errmsg = "Failed to move " + file.getPath() + " to /lost+found";
      try {
        PermissionStatus ps =
            new PermissionStatus(file.getOwner(), file.getGroup(), file.getPermission());
        if (!nn.namesystem.dir.mkdirs(target, ps, false, FSNamesystem.now())) {
          LOG.warn(errmsg);
          return;
        }
        // create chains
        int chain = 0;
        OutputStream fos = null;
        for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
          LocatedBlock lblock = lBlk;
          DatanodeInfo[] locs = lblock.getLocations();
          if (locs == null || locs.length == 0) {
            if (fos != null) {
              fos.flush();
              fos.close();
              fos = null;
            }
            continue;
          }
          if (fos == null) {
            fos = dfs.create(target + "/" + chain, true);
            if (fos != null) chain++;
            else {
              LOG.warn(errmsg + ": could not store chain " + chain);
              // perhaps we should bail out here...
              // return;
              continue;
            }
          }

          // copy the block. It's a pity it's not abstracted from DFSInputStream ...
          try {
            copyBlock(dfs, lblock, fos);
          } catch (Exception e) {
            e.printStackTrace();
            // something went wrong copying this block...
            LOG.warn(" - could not copy block " + lblock.getBlock() + " to " + target);
            fos.flush();
            fos.close();
            fos = null;
          }
        }
        if (fos != null) fos.close();
        LOG.warn("\n - moved corrupted file " + file.getPath() + " to /lost+found");
        dfs.delete(file.getPath().toString(), true);
      } catch (Exception e) {
        e.printStackTrace();
        LOG.warn(errmsg + ": " + e.getMessage());
      }
    } finally {
      dfs.close();
    }
  }