コード例 #1
0
  /**
   * The function will verify the token with NameNode if available and will create a
   * UserGroupInformation.
   *
   * <p>Code in this function is copied from JspHelper.getTokenUGI
   *
   * @param identifier Delegation token identifier
   * @param password Delegation token password
   * @param kind the kind of token
   * @param service the service for this token
   * @param servletContext Jetty servlet context which contains the NN address
   * @throws SecurityException Thrown when authentication fails
   */
  private static void verifyToken(
      byte[] identifier, byte[] password, Text kind, Text service, ServletContext servletContext) {
    try {
      Token<DelegationTokenIdentifier> token =
          new Token<DelegationTokenIdentifier>(identifier, password, kind, service);

      ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
      DataInputStream in = new DataInputStream(buf);
      DelegationTokenIdentifier id = new DelegationTokenIdentifier();
      id.readFields(in);

      final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(servletContext);
      if (nn != null) {
        nn.getNamesystem().verifyToken(id, token.getPassword());
      }

      UserGroupInformation userGroupInformation = id.getUser();
      userGroupInformation.addToken(token);
      LOG.debug(
          "user "
              + userGroupInformation.getUserName()
              + " ("
              + userGroupInformation.getShortUserName()
              + ") authenticated");

      // re-login if necessary
      userGroupInformation.checkTGTAndReloginFromKeytab();
    } catch (IOException e) {
      throw new SecurityException("Failed to verify delegation token " + e, e);
    }
  }
コード例 #2
0
ファイル: DragonAppMaster.java プロジェクト: Brassrat/dragon
 /**
  * Obtain the tokens needed by the job and put them in the UGI
  *
  * @param conf
  */
 protected void downloadTokensAndSetupUGI(Configuration conf) {
   try {
     this.currentUser = UserGroupInformation.getCurrentUser();
     if (UserGroupInformation.isSecurityEnabled()) {
       // Read the file-system tokens from the localized tokens-file.
       Path jobSubmitDir =
           FileContext.getLocalFSFileContext()
               .makeQualified(
                   new Path(new File(DragonJobConfig.JOB_SUBMIT_DIR).getAbsolutePath()));
       Path jobTokenFile = new Path(jobSubmitDir, DragonJobConfig.APPLICATION_TOKENS_FILE);
       fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
       LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile=" + jobTokenFile);
       for (Token<? extends TokenIdentifier> tk : fsTokens.getAllTokens()) {
         if (LOG.isDebugEnabled()) {
           LOG.debug(
               "Token of kind "
                   + tk.getKind()
                   + "in current ugi in the AppMaster for service "
                   + tk.getService());
         }
         currentUser.addToken(tk); // For use by AppMaster itself.
       }
     }
   } catch (IOException e) {
     throw new YarnException(e);
   }
 }
コード例 #3
0
  @Test
  public void testLocalUser() throws Exception {
    try {
      // nonsecure default
      Configuration conf = new YarnConfiguration();
      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");
      UserGroupInformation.setConfiguration(conf);
      LinuxContainerExecutor lce = new LinuxContainerExecutor();
      lce.setConf(conf);
      Assert.assertEquals(
          YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER, lce.getRunAsUser("foo"));

      // nonsecure custom setting
      conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, "bar");
      lce = new LinuxContainerExecutor();
      lce.setConf(conf);
      Assert.assertEquals("bar", lce.getRunAsUser("foo"));

      // secure
      conf = new YarnConfiguration();
      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
      UserGroupInformation.setConfiguration(conf);
      lce = new LinuxContainerExecutor();
      lce.setConf(conf);
      Assert.assertEquals("foo", lce.getRunAsUser("foo"));
    } finally {
      Configuration conf = new YarnConfiguration();
      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");
      UserGroupInformation.setConfiguration(conf);
    }
  }
コード例 #4
0
  /**
   * write text to a remote file system
   *
   * @param hdfsPath !null remote path
   * @param content !null test content
   */
  @Override
  public void writeToFileSystem(final String hdfsPath, final String content) {
    if (isRunningAsUser()) {
      super.writeToFileSystem(hdfsPath, content);
      return;
    }
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //          if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      uig.doAs(
          new PrivilegedExceptionAction<Void>() {

            public Void run() throws Exception {
              FileSystem fs = getDFS();
              Path src = new Path(hdfsPath);
              Path parent = src.getParent();
              guaranteeDirectory(parent);
              OutputStream os = FileSystem.create(fs, src, FULL_FILE_ACCESS);
              FileUtilities.writeFile(os, content);
              return null;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to writeToFileSystem because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
  }
コード例 #5
0
  @SuppressWarnings("unchecked") // from Mockito mocks
  @Test
  public <T extends TokenIdentifier> void testUGITokens() throws Exception {
    UserGroupInformation ugi =
        UserGroupInformation.createUserForTesting("TheDoctor", new String[] {"TheTARDIS"});
    Token<T> t1 = mock(Token.class);
    Token<T> t2 = mock(Token.class);

    ugi.addToken(t1);
    ugi.addToken(t2);

    Collection<Token<? extends TokenIdentifier>> z = ugi.getTokens();
    assertTrue(z.contains(t1));
    assertTrue(z.contains(t2));
    assertEquals(2, z.size());

    try {
      z.remove(t1);
      fail("Shouldn't be able to modify token collection from UGI");
    } catch (UnsupportedOperationException uoe) {
      // Can't modify tokens
    }

    // ensure that the tokens are passed through doAs
    Collection<Token<? extends TokenIdentifier>> otherSet =
        ugi.doAs(
            new PrivilegedExceptionAction<Collection<Token<?>>>() {
              public Collection<Token<?>> run() throws IOException {
                return UserGroupInformation.getCurrentUser().getTokens();
              }
            });
    assertTrue(otherSet.contains(t1));
    assertTrue(otherSet.contains(t2));
  }
コード例 #6
0
  /**
   * true if the file esists
   *
   * @param hdfsPath
   * @return
   */
  @Override
  public boolean exists(final String hdfsPath) {
    if (isRunningAsUser()) {
      return super.exists(hdfsPath);
    }
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      return uig.doAs(
          new PrivilegedExceptionAction<Boolean>() {

            public Boolean run() throws Exception {
              FileSystem fileSystem = getDFS();

              Path dst = new Path(hdfsPath);

              boolean directory = fileSystem.exists(dst);
              return directory;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to copyFromFileSystem because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
    //       return false;
  }
コード例 #7
0
  private void guaranteeDirectory(final Path src) {
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //          if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      uig.doAs(
          new PrivilegedExceptionAction<Void>() {

            public Void run() throws Exception {
              FileSystem fs = getDFS();

              if (fs.exists(src)) return null;
              if (!fs.isFile(src)) {
                return null;
              } else {
                fs.delete(src, false); // drop a file we want a directory
              }
              fs.setPermission(src, FULL_ACCESS);
              fs.mkdirs(src, FULL_ACCESS);
              return null;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to guaranteeDirectory because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
  }
コード例 #8
0
ファイル: AdminService.java プロジェクト: imace/hops
  @Override
  public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
      throws YarnException, StandbyException {
    String argName = "refreshNodes";
    UserGroupInformation user = checkAcls("refreshNodes");

    if (!isRMActive()) {
      RMAuditLogger.logFailure(
          user.getShortUserName(),
          argName,
          adminAcl.toString(),
          "AdminService",
          "ResourceManager is not active. Can not refresh nodes.");
      throwStandbyException();
    }

    try {
      Configuration conf =
          getConfiguration(
              new Configuration(false), YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
      rmContext.getNodesListManager().refreshNodes(conf);
      RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService");
      return recordFactory.newRecordInstance(RefreshNodesResponse.class);
    } catch (IOException ioe) {
      LOG.info("Exception refreshing nodes ", ioe);
      RMAuditLogger.logFailure(
          user.getShortUserName(),
          argName,
          adminAcl.toString(),
          "AdminService",
          "Exception refreshing nodes");
      throw RPCUtil.getRemoteException(ioe);
    }
  }
コード例 #9
0
ファイル: AdminService.java プロジェクト: imace/hops
  @Override
  public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
      RefreshUserToGroupsMappingsRequest request) throws YarnException, IOException {
    String argName = "refreshUserToGroupsMappings";
    UserGroupInformation user = checkAcls(argName);

    if (!isRMActive()) {
      RMAuditLogger.logFailure(
          user.getShortUserName(),
          argName,
          adminAcl.toString(),
          "AdminService",
          "ResourceManager is not active. Can not refresh user-groups.");
      throwStandbyException();
    }

    Groups.getUserToGroupsMappingService(
            getConfiguration(
                new Configuration(false), YarnConfiguration.CORE_SITE_CONFIGURATION_FILE))
        .refresh();

    RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService");

    return recordFactory.newRecordInstance(RefreshUserToGroupsMappingsResponse.class);
  }
コード例 #10
0
ファイル: AdminService.java プロジェクト: imace/hops
 @Override
 public synchronized void transitionToStandby(HAServiceProtocol.StateChangeRequestInfo reqInfo)
     throws IOException {
   // call refreshAdminAcls before HA state transition
   // for the case that adminAcls have been updated in previous active RM
   try {
     refreshAdminAcls(false);
   } catch (YarnException ex) {
     throw new ServiceFailedException("Can not execute refreshAdminAcls", ex);
   }
   UserGroupInformation user = checkAccess("transitionToStandby");
   checkHaStateChange(reqInfo);
   try {
     LOG.info("Transitioning to standby admin" + masterServiceAddress.toString());
     // TODO transition leader election to standby?
     rm.transitionToStandby(true);
     RMAuditLogger.logSuccess(
         user.getShortUserName(), "transitionToStandby", "RMHAProtocolService");
   } catch (Exception e) {
     RMAuditLogger.logFailure(
         user.getShortUserName(),
         "transitionToStandby",
         adminAcl.toString(),
         "RMHAProtocolService",
         "Exception transitioning to standby");
     throw new ServiceFailedException("Error when transitioning to Standby mode", e);
   }
 }
コード例 #11
0
ファイル: AdminService.java プロジェクト: imace/hops
  @Override
  public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request)
      throws YarnException, StandbyException {
    String argName = "refreshQueues";
    UserGroupInformation user = checkAcls(argName);

    if (!isRMActive()) {
      RMAuditLogger.logFailure(
          user.getShortUserName(),
          argName,
          adminAcl.toString(),
          "AdminService",
          "ResourceManager is not active. Can not refresh queues.");
      throwStandbyException();
    }

    RefreshQueuesResponse response = recordFactory.newRecordInstance(RefreshQueuesResponse.class);
    try {
      rmContext.getScheduler().reinitialize(getConfig(), this.rmContext);
      RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService");
      return response;
    } catch (IOException ioe) {
      LOG.info("Exception refreshing queues ", ioe);
      RMAuditLogger.logFailure(
          user.getShortUserName(),
          argName,
          adminAcl.toString(),
          "AdminService",
          "Exception refreshing queues");
      throw RPCUtil.getRemoteException(ioe);
    }
  }
コード例 #12
0
ファイル: AdminService.java プロジェクト: imace/hops
  @Override
  public synchronized void transitionToActive(HAServiceProtocol.StateChangeRequestInfo reqInfo)
      throws IOException {
    // call refreshAdminAcls before HA state transition
    // for the case that adminAcls have been updated in previous active RM
    try {
      refreshAdminAcls(false);
    } catch (YarnException ex) {
      throw new ServiceFailedException("Can not execute refreshAdminAcls", ex);
    }

    UserGroupInformation user = checkAccess("transitionToActive");
    checkHaStateChange(reqInfo);
    try {
      if (!autoFailoverEnabled) {
        LOG.info("admin transition to active " + masterServiceAddress.toString());
        rm.transitionToActive();
        // call all refresh*s for active RM to get the updated configurations.
        refreshAll();
        RMAuditLogger.logSuccess(
            user.getShortUserName(), "transitionToActive", "RMHAProtocolService");
      }
    } catch (Exception e) {
      RMAuditLogger.logFailure(
          user.getShortUserName(),
          "transitionToActive",
          adminAcl.toString(),
          "RMHAProtocolService",
          "Exception transitioning to active");
      throw new ServiceFailedException("Error when transitioning to Active mode", e);
    }
  }
コード例 #13
0
  private void registerWithRM() throws YarnRemoteException {
    this.resourceTracker = getRMClient();
    LOG.info("Connected to ResourceManager at " + this.rmAddress);

    RegisterNodeManagerRequest request =
        recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
    request.setHttpPort(this.httpPort);
    request.setResource(this.totalResource);
    request.setNodeId(this.nodeId);
    RegistrationResponse regResponse =
        this.resourceTracker.registerNodeManager(request).getRegistrationResponse();
    // if the Resourcemanager instructs NM to shutdown.
    if (NodeAction.SHUTDOWN.equals(regResponse.getNodeAction())) {
      throw new YarnException(
          "Recieved SHUTDOWN signal from Resourcemanager ,Registration of NodeManager failed");
    }

    if (UserGroupInformation.isSecurityEnabled()) {
      this.secretKeyBytes = regResponse.getSecretKey().array();
    }

    // do this now so that its set before we start heartbeating to RM
    if (UserGroupInformation.isSecurityEnabled()) {
      LOG.info("Security enabled - updating secret keys now");
      // It is expected that status updater is started by this point and
      // RM gives the shared secret in registration during StatusUpdater#start().
      this.containerTokenSecretManager.setSecretKey(
          this.nodeId.toString(), this.getRMNMSharedSecret());
    }
    LOG.info(
        "Registered with ResourceManager as "
            + this.nodeId
            + " with total resource of "
            + this.totalResource);
  }
コード例 #14
0
    @Override
    protected Job createJob(Configuration conf, JobStateInternal forcedState, String diagnostic) {
      UserGroupInformation currentUser = null;
      try {
        currentUser = UserGroupInformation.getCurrentUser();
      } catch (IOException e) {
        throw new YarnRuntimeException(e);
      }
      Job newJob =
          new TestJob(
              getJobId(),
              getAttemptID(),
              conf,
              getDispatcher().getEventHandler(),
              getTaskAttemptListener(),
              getContext().getClock(),
              getCommitter(),
              isNewApiCommitter(),
              currentUser.getUserName(),
              getContext(),
              forcedState,
              diagnostic);
      ((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob);

      getDispatcher().register(JobFinishEvent.Type.class, createJobFinishEventHandler());

      return newJob;
    }
コード例 #15
0
  protected HDFWithNameAccessor(final String host, final int port, final String user) {
    super(host, port, user);
    if (port <= 0) throw new IllegalArgumentException("bad port " + port);
    String connectString = "hdfs://" + host + ":" + port + "/";
    final String userDir = "/user/" + user;

    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //          if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");

      uig.doAs(
          new PrivilegedExceptionAction<Void>() {

            public Void run() throws Exception {
              getDFS();
              return null;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to connect on "
              + connectString
              + " because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
  }
コード例 #16
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
 static Path createHomeDirectory(FileSystem fs, UserGroupInformation ugi) throws IOException {
   final Path home = new Path("/user/" + ugi.getUserName());
   fs.mkdirs(home);
   fs.setOwner(home, ugi.getUserName(), ugi.getGroupNames()[0]);
   fs.setPermission(home, new FsPermission((short) 0700));
   return home;
 }
コード例 #17
0
  @Override
  public void writeToFileSystem(final String hdfsPath, final File localPath) {
    if (isRunningAsUser()) {
      super.writeToFileSystem(hdfsPath, localPath);
      return;
    }
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      uig.doAs(
          new PrivilegedExceptionAction<Void>() {

            public Void run() throws Exception {
              FileSystem fileSystem = getDFS();

              Path dst = new Path(hdfsPath);

              Path src = new Path(localPath.getAbsolutePath());

              fileSystem.copyFromLocalFile(src, dst);
              fileSystem.setPermission(dst, FULL_FILE_ACCESS);
              return null;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to writeToFileSystem because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
  }
コード例 #18
0
  /**
   * Log a user in from a keytab file. Loads a user identity from a keytab file and logs them in.
   * They become the currently logged-in user.
   *
   * @param user the principal name to load from the keytab
   * @param path the path to the keytab file
   * @throws IOException if the keytab file can't be read
   */
  public static synchronized void loginUserFromKeytab(String user, String path) throws IOException {
    if (!isSecurityEnabled()) {
      return;
    }

    keytabFile = path;
    keytabPrincipal = user;
    Subject subject = new Subject();
    LoginContext login;
    long start = 0;
    try {
      login = new LoginContext(HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, subject);
      start = System.currentTimeMillis();
      login.login();
      metrics.loginSuccess.inc(System.currentTimeMillis() - start);
      loginUser = new UserGroupInformation(subject);
      loginUser.setLogin(login);
      loginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
    } catch (LoginException le) {
      if (start > 0) {
        metrics.loginFailure.inc(System.currentTimeMillis() - start);
      }
      throw new IOException("Login failure for " + user + " from keytab " + path, le);
    }
    LOG.info("Login successful for user " + keytabPrincipal + " using keytab file " + keytabFile);
  }
コード例 #19
0
  /**
   * true if the file exists
   *
   * @param hdfsPath !null path - probably of an existing file
   * @return
   */
  @Override
  public long fileLength(final String hdfsPath) {
    if (isRunningAsUser()) {
      return super.fileLength(hdfsPath);
    }
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      return uig.doAs(
          new PrivilegedExceptionAction<Long>() {

            public Long run() throws Exception {
              FileSystem fs = getDFS();

              if (!exists(hdfsPath)) return null;
              Path src = new Path(hdfsPath);
              ContentSummary contentSummary = fs.getContentSummary(src);
              if (contentSummary == null) return null;
              return contentSummary.getLength();
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to get fileLength because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
  }
コード例 #20
0
  /**
   * Log a user in from a keytab file. Loads a user identity from a keytab file and login them in.
   * This new user does not affect the currently logged-in user.
   *
   * @param user the principal name to load from the keytab
   * @param path the path to the keytab file
   * @throws IOException if the keytab file can't be read
   */
  public static synchronized UserGroupInformation loginUserFromKeytabAndReturnUGI(
      String user, String path) throws IOException {
    if (!isSecurityEnabled()) return UserGroupInformation.getCurrentUser();
    String oldKeytabFile = null;
    String oldKeytabPrincipal = null;

    long start = 0;
    try {
      oldKeytabFile = keytabFile;
      oldKeytabPrincipal = keytabPrincipal;
      keytabFile = path;
      keytabPrincipal = user;
      Subject subject = new Subject();

      LoginContext login =
          new LoginContext(HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, subject);

      start = System.currentTimeMillis();
      login.login();
      metrics.loginSuccess.inc(System.currentTimeMillis() - start);
      UserGroupInformation newLoginUser = new UserGroupInformation(subject);
      newLoginUser.setLogin(login);
      newLoginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);

      return newLoginUser;
    } catch (LoginException le) {
      if (start > 0) {
        metrics.loginFailure.inc(System.currentTimeMillis() - start);
      }
      throw new IOException("Login failure for " + user + " from keytab " + path, le);
    } finally {
      if (oldKeytabFile != null) keytabFile = oldKeytabFile;
      if (oldKeytabPrincipal != null) keytabPrincipal = oldKeytabPrincipal;
    }
  }
コード例 #21
0
 public FsPermission getPermissions(final Path src) {
   UserGroupInformation uig = getCurrentUserGroup();
   try {
     //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
     // 1.0.*");
     return uig.doAs(
         new PrivilegedExceptionAction<FsPermission>() {
           public FsPermission run() throws Exception {
             FileSystem fs = getDFS();
             if (fs.exists(src)) {
               FileStatus fileStatus = fs.getFileStatus(src);
               return fileStatus.getPermission();
             }
             return null;
           }
         });
   } catch (Exception e) {
     throw new RuntimeException(
         "Failed to getPermissions because "
             + e.getMessage()
             + " exception of class "
             + e.getClass(),
         e);
   }
   //      return FsPermission.getDefault();
 }
コード例 #22
0
ファイル: DistCp.java プロジェクト: civvy/spring-hadoop
  /**
   * Initiate a copy operation using a command-line style (arguments are specified as {@link
   * String}s).
   *
   * @param arguments the copy arguments
   */
  public void copy(String... arguments) {
    Assert.notEmpty(arguments, "invalid number of arguments");
    // sanitize the arguments
    final List<String> parsedArguments = new ArrayList<String>();
    for (String arg : arguments) {
      parsedArguments.addAll(Arrays.asList(StringUtils.tokenizeToStringArray(arg, " ")));
    }

    try {
      if (StringUtils.hasText(user)) {
        UserGroupInformation ugi =
            UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
        ugi.doAs(
            new PrivilegedExceptionAction<Void>() {
              @Override
              public Void run() throws Exception {
                invokeCopy(
                    configuration, parsedArguments.toArray(new String[parsedArguments.size()]));
                return null;
              }
            });
      } else {
        invokeCopy(configuration, parsedArguments.toArray(new String[parsedArguments.size()]));
      }
    } catch (Exception ex) {
      throw new IllegalStateException("Cannot run distCp impersonated as '" + user + "'", ex);
    }
  }
コード例 #23
0
  /**
   * read a remote file as text
   *
   * @param hdfsPath !null remote path to an existing file
   * @return content as text
   */
  @Override
  public String readFromFileSystem(final String hdfsPath) {
    if (isRunningAsUser()) {
      return super.readFromFileSystem(hdfsPath);
    }
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      return uig.doAs(
          new PrivilegedExceptionAction<String>() {

            public String run() throws Exception {
              FileSystem fs = getDFS();
              Path src = new Path(hdfsPath);
              InputStream is = fs.open(src);
              String ret = FileUtilities.readInFile(is);
              return ret;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to readFromFileSystem because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
    //    return null;
  }
コード例 #24
0
  @BeforeClass
  public static void clusterSetupAtBegining()
      throws IOException, LoginException, URISyntaxException {
    SupportsBlocks = true;
    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

    cluster =
        new MiniDFSCluster.Builder(CONF)
            .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
            .numDataNodes(2)
            .build();
    cluster.waitClusterUp();

    fHdfs = cluster.getFileSystem(0);
    fHdfs2 = cluster.getFileSystem(1);
    fHdfs
        .getConf()
        .set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());
    fHdfs2
        .getConf()
        .set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());

    defaultWorkingDirectory =
        fHdfs.makeQualified(
            new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
    defaultWorkingDirectory2 =
        fHdfs2.makeQualified(
            new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));

    fHdfs.mkdirs(defaultWorkingDirectory);
    fHdfs2.mkdirs(defaultWorkingDirectory2);
  }
コード例 #25
0
  @Test
  public void testTokenIdentifiers() throws Exception {
    UserGroupInformation ugi =
        UserGroupInformation.createUserForTesting("TheDoctor", new String[] {"TheTARDIS"});
    TokenIdentifier t1 = mock(TokenIdentifier.class);
    TokenIdentifier t2 = mock(TokenIdentifier.class);

    ugi.addTokenIdentifier(t1);
    ugi.addTokenIdentifier(t2);

    Collection<TokenIdentifier> z = ugi.getTokenIdentifiers();
    assertTrue(z.contains(t1));
    assertTrue(z.contains(t2));
    assertEquals(2, z.size());

    // ensure that the token identifiers are passed through doAs
    Collection<TokenIdentifier> otherSet =
        ugi.doAs(
            new PrivilegedExceptionAction<Collection<TokenIdentifier>>() {
              public Collection<TokenIdentifier> run() throws IOException {
                return UserGroupInformation.getCurrentUser().getTokenIdentifiers();
              }
            });
    assertTrue(otherSet.contains(t1));
    assertTrue(otherSet.contains(t2));
    assertEquals(2, otherSet.size());
  }
コード例 #26
0
 // @Test
 public void testFileStatus() throws Exception {
   UserGroupInformation ugi =
       UserGroupInformation.createRemoteUser(cluster.getJTClient().getProxy().getDaemonUser());
   ugi.doAs(
       new PrivilegedExceptionAction<Void>() {
         @Override
         public Void run() throws Exception {
           MRCluster myCluster = null;
           try {
             myCluster = MRCluster.createCluster(cluster.getConf());
             myCluster.connect();
             JTClient jt = myCluster.getJTClient();
             String dir = ".";
             checkFileStatus(jt.getFileStatus(dir, true));
             checkFileStatus(jt.listStatus(dir, false, true), dir);
             for (TTClient tt : myCluster.getTTClients()) {
               String[] localDirs = tt.getMapredLocalDirs();
               for (String localDir : localDirs) {
                 checkFileStatus(tt.listStatus(localDir, true, false), localDir);
                 checkFileStatus(tt.listStatus(localDir, true, true), localDir);
               }
             }
             String systemDir = jt.getClient().getSystemDir().toString();
             checkFileStatus(jt.listStatus(systemDir, false, true), systemDir);
             checkFileStatus(jt.listStatus(jt.getLogDir(), true, true), jt.getLogDir());
           } finally {
             if (myCluster != null) {
               myCluster.disconnect();
             }
           }
           return null;
         }
       });
 }
コード例 #27
0
  /* generates a white list of ACL path regular expressions */
  public AclPathFilter(Configuration conf) {
    this();
    UserGroupInformation ugi = null;
    try {
      ugi = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {

    }
    String stagingRootDir =
        new Path(
                conf.get(
                    "yarn.app.mapreduce.am.staging-dir",
                    conf.get(
                        "mapreduce.jobtracker.staging.root.dir", "/tmp/hadoop/mapred/staging")))
            .toString();
    String user;
    String randid = "\\d*";
    if (ugi != null) {
      user = ugi.getShortUserName();
    } else {
      user = "******";
    }
    paths.add("^" + new Path(stagingRootDir, user + randid + "/.staging").toString() + ".*");
    paths.add(
        "^" + (new Path(stagingRootDir, user + randid + "/.staging")).toUri().getPath() + ".*");
    stagingRootDir =
        new Path(conf.get("mapreduce.jobtracker.staging.root.dir", "/tmp/hadoop/mapred/staging"))
            .toString();
    paths.add("^" + new Path(stagingRootDir, user + "/.staging").toString() + ".*");
    paths.add("^" + (new Path(stagingRootDir, user + "/.staging")).toUri().getPath() + ".*");
  }
コード例 #28
0
 protected void login() {
   ClassLoader prevCl = Thread.currentThread().getContextClassLoader();
   try {
     Thread.currentThread().setContextClassLoader(configHolder.getClassLoader());
     String userName = configHolder.getUserName();
     if (userName == null) {
       throw new HadoopException(
           "Unable to find login username for hadoop environment, [" + dataSource + "]");
     }
     String keyTabFile = configHolder.getKeyTabFile();
     if (keyTabFile != null) {
       if (UserGroupInformation.isSecurityEnabled()) {
         loginSubject = SecureClientLogin.loginUserFromKeytab(userName, keyTabFile);
       } else {
         loginSubject = SecureClientLogin.login(userName);
       }
     } else {
       String password = configHolder.getPassword();
       if (UserGroupInformation.isSecurityEnabled()) {
         loginSubject = SecureClientLogin.loginUserWithPassword(userName, password);
       } else {
         loginSubject = SecureClientLogin.login(userName);
       }
     }
   } catch (IOException ioe) {
     throw new HadoopException("Unable to login to Hadoop environment [" + dataSource + "]", ioe);
   } finally {
     Thread.currentThread().setContextClassLoader(prevCl);
   }
 }
コード例 #29
0
  static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
      DatanodeID datanodeid,
      Configuration conf,
      int socketTimeout,
      boolean connectToDnViaHostname,
      LocatedBlock locatedBlock)
      throws IOException {
    final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
    InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
    }

    // Since we're creating a new UserGroupInformation here, we know that no
    // future RPC proxies will be able to re-use the same connection. And
    // usages of this proxy tend to be one-off calls.
    //
    // This is a temporary fix: callers should really achieve this by using
    // RPC.stopProxy() on the resulting object, but this is currently not
    // working in trunk. See the discussion on HDFS-1965.
    Configuration confWithNoIpcIdle = new Configuration(conf);
    confWithNoIpcIdle.setInt(
        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);

    UserGroupInformation ticket =
        UserGroupInformation.createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
    ticket.addToken(locatedBlock.getBlockToken());
    return createClientDatanodeProtocolProxy(
        addr, ticket, confWithNoIpcIdle, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
  }
コード例 #30
0
  /**
   * Service a GET request as described below. Request: {@code GET http://<nn>:<port>/data[/<path>]
   * HTTP/1.1 }
   */
  @Override
  public void doGet(final HttpServletRequest request, final HttpServletResponse response)
      throws IOException {
    final Configuration conf = NameNodeHttpServer.getConfFromContext(getServletContext());
    final UserGroupInformation ugi = getUGI(request, conf);

    try {
      ugi.doAs(
          new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws IOException {
              ClientProtocol nn = createNameNodeProxy();
              final String path = ServletUtil.getDecodedPath(request, "/data");
              final String encodedPath = ServletUtil.getRawPath(request, "/data");
              String delegationToken = request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);

              HdfsFileStatus info = nn.getFileInfo(path);
              if (info != null && !info.isDir()) {
                response.sendRedirect(
                    createRedirectURL(path, encodedPath, info, ugi, nn, request, delegationToken)
                        .toString());
              } else if (info == null) {
                response.sendError(400, "File not found " + path);
              } else {
                response.sendError(400, path + ": is a directory");
              }
              return null;
            }
          });
    } catch (IOException e) {
      response.sendError(400, e.getMessage());
    } catch (InterruptedException e) {
      response.sendError(400, e.getMessage());
    }
  }