Esempio n. 1
0
 protected void login() {
   ClassLoader prevCl = Thread.currentThread().getContextClassLoader();
   try {
     Thread.currentThread().setContextClassLoader(configHolder.getClassLoader());
     String userName = configHolder.getUserName();
     if (userName == null) {
       throw new HadoopException(
           "Unable to find login username for hadoop environment, [" + dataSource + "]");
     }
     String keyTabFile = configHolder.getKeyTabFile();
     if (keyTabFile != null) {
       if (UserGroupInformation.isSecurityEnabled()) {
         loginSubject = SecureClientLogin.loginUserFromKeytab(userName, keyTabFile);
       } else {
         loginSubject = SecureClientLogin.login(userName);
       }
     } else {
       String password = configHolder.getPassword();
       if (UserGroupInformation.isSecurityEnabled()) {
         loginSubject = SecureClientLogin.loginUserWithPassword(userName, password);
       } else {
         loginSubject = SecureClientLogin.login(userName);
       }
     }
   } catch (IOException ioe) {
     throw new HadoopException("Unable to login to Hadoop environment [" + dataSource + "]", ioe);
   } finally {
     Thread.currentThread().setContextClassLoader(prevCl);
   }
 }
  private void registerWithRM() throws YarnRemoteException {
    this.resourceTracker = getRMClient();
    LOG.info("Connected to ResourceManager at " + this.rmAddress);

    RegisterNodeManagerRequest request =
        recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
    request.setHttpPort(this.httpPort);
    request.setResource(this.totalResource);
    request.setNodeId(this.nodeId);
    RegistrationResponse regResponse =
        this.resourceTracker.registerNodeManager(request).getRegistrationResponse();
    // if the Resourcemanager instructs NM to shutdown.
    if (NodeAction.SHUTDOWN.equals(regResponse.getNodeAction())) {
      throw new YarnException(
          "Recieved SHUTDOWN signal from Resourcemanager ,Registration of NodeManager failed");
    }

    if (UserGroupInformation.isSecurityEnabled()) {
      this.secretKeyBytes = regResponse.getSecretKey().array();
    }

    // do this now so that its set before we start heartbeating to RM
    if (UserGroupInformation.isSecurityEnabled()) {
      LOG.info("Security enabled - updating secret keys now");
      // It is expected that status updater is started by this point and
      // RM gives the shared secret in registration during StatusUpdater#start().
      this.containerTokenSecretManager.setSecretKey(
          this.nodeId.toString(), this.getRMNMSharedSecret());
    }
    LOG.info(
        "Registered with ResourceManager as "
            + this.nodeId
            + " with total resource of "
            + this.totalResource);
  }
Esempio n. 3
0
 private void injectToken() throws IOException {
   if (UserGroupInformation.isSecurityEnabled()) {
     Token<DelegationTokenIdentifier> token = params.delegationToken();
     token.setKind(HDFS_DELEGATION_KIND);
     ugi.addToken(token);
   }
 }
Esempio n. 4
0
 /**
  * Obtain the tokens needed by the job and put them in the UGI
  *
  * @param conf
  */
 protected void downloadTokensAndSetupUGI(Configuration conf) {
   try {
     this.currentUser = UserGroupInformation.getCurrentUser();
     if (UserGroupInformation.isSecurityEnabled()) {
       // Read the file-system tokens from the localized tokens-file.
       Path jobSubmitDir =
           FileContext.getLocalFSFileContext()
               .makeQualified(
                   new Path(new File(DragonJobConfig.JOB_SUBMIT_DIR).getAbsolutePath()));
       Path jobTokenFile = new Path(jobSubmitDir, DragonJobConfig.APPLICATION_TOKENS_FILE);
       fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
       LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile=" + jobTokenFile);
       for (Token<? extends TokenIdentifier> tk : fsTokens.getAllTokens()) {
         if (LOG.isDebugEnabled()) {
           LOG.debug(
               "Token of kind "
                   + tk.getKind()
                   + "in current ugi in the AppMaster for service "
                   + tk.getService());
         }
         currentUser.addToken(tk); // For use by AppMaster itself.
       }
     }
   } catch (IOException e) {
     throw new YarnException(e);
   }
 }
Esempio n. 5
0
  /**
   * Verify that configured directories exist, then Interactively confirm that formatting is desired
   * for each existing directory and format them.
   *
   * @param conf
   * @param force
   * @return true if formatting was aborted, false otherwise
   * @throws IOException
   */
  private static boolean format(Configuration conf, boolean force, boolean isInteractive)
      throws IOException {
    initializeGenericKeys(conf);
    checkAllowFormat(conf);

    if (UserGroupInformation.isSecurityEnabled()) {
      InetSocketAddress socAddr = getAddress(conf);
      SecurityUtil.login(
          conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_USER_NAME_KEY, socAddr.getHostName());
    }

    // if clusterID is not provided - see if you can find the current one
    String clusterId = StartupOption.FORMAT.getClusterId();
    if (clusterId == null || clusterId.equals("")) {
      // Generate a new cluster id
      clusterId = StorageInfo.newClusterID();
    }
    System.out.println("Formatting using clusterid: " + clusterId);

    try {
      HdfsStorageFactory.setConfiguration(conf);
      if (force) {
        HdfsStorageFactory.formatStorageNonTransactional();
      } else {
        HdfsStorageFactory.formatStorage();
      }
      StorageInfo.storeStorageInfoToDB(clusterId); // this adds new row to the db
    } catch (StorageException e) {
      throw new RuntimeException(e.getMessage());
    }

    return false;
  }
Esempio n. 6
0
 /**
  * Open the given File for read access, verifying the expected user/group constraints if security
  * is enabled.
  *
  * <p>Note that this function provides no additional checks if Hadoop security is disabled, since
  * doing the checks would be too expensive when native libraries are not available.
  *
  * @param f the file that we are trying to open
  * @param expectedOwner the expected user owner for the file
  * @param expectedGroup the expected group owner for the file
  * @throws IOException if an IO Error occurred, or security is enabled and the user/group does not
  *     match
  */
 public static FileInputStream openForRead(File f, String expectedOwner, String expectedGroup)
     throws IOException {
   if (!UserGroupInformation.isSecurityEnabled()) {
     return new FileInputStream(f);
   }
   return forceSecureOpenForRead(f, expectedOwner, expectedGroup);
 }
Esempio n. 7
0
  private ContainerLaunchContext newContainerLaunchContext(
      Container container, String helixInstanceName) throws IOException {
    Path appWorkDir =
        GobblinClusterUtils.getAppWorkDirPath(this.fs, this.applicationName, this.applicationId);
    Path containerWorkDir =
        new Path(appWorkDir, GobblinYarnConfigurationKeys.CONTAINER_WORK_DIR_NAME);

    Map<String, LocalResource> resourceMap = Maps.newHashMap();

    addContainerLocalResources(
        new Path(appWorkDir, GobblinYarnConfigurationKeys.LIB_JARS_DIR_NAME), resourceMap);
    addContainerLocalResources(
        new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_JARS_DIR_NAME), resourceMap);
    addContainerLocalResources(
        new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_FILES_DIR_NAME), resourceMap);

    if (this.config.hasPath(GobblinYarnConfigurationKeys.CONTAINER_FILES_REMOTE_KEY)) {
      addRemoteAppFiles(
          this.config.getString(GobblinYarnConfigurationKeys.CONTAINER_FILES_REMOTE_KEY),
          resourceMap);
    }

    ContainerLaunchContext containerLaunchContext = Records.newRecord(ContainerLaunchContext.class);
    containerLaunchContext.setLocalResources(resourceMap);
    containerLaunchContext.setEnvironment(
        YarnHelixUtils.getEnvironmentVariables(this.yarnConfiguration));
    containerLaunchContext.setCommands(
        Lists.newArrayList(buildContainerCommand(container, helixInstanceName)));

    if (UserGroupInformation.isSecurityEnabled()) {
      containerLaunchContext.setTokens(this.tokens.duplicate());
    }

    return containerLaunchContext;
  }
Esempio n. 8
0
 private <T> T doWithState(RunningQueryState state, PrivilegedExceptionAction<T> action)
     throws BeeswaxException {
   try {
     UserGroupInformation ugi;
     if (UserGroupInformation.isSecurityEnabled())
       ugi =
           UserGroupInformation.createProxyUser(
               state.query.hadoop_user, UserGroupInformation.getLoginUser());
     else {
       ugi = UserGroupInformation.createRemoteUser(state.query.hadoop_user);
     }
     return ugi.doAs(action);
   } catch (UndeclaredThrowableException e) {
     if (e.getUndeclaredThrowable() instanceof PrivilegedActionException) {
       Throwable bwe = e.getUndeclaredThrowable().getCause();
       if (bwe instanceof BeeswaxException) {
         LOG.error("Caught BeeswaxException", (BeeswaxException) bwe);
         throw (BeeswaxException) bwe;
       }
     }
     LOG.error("Caught unexpected exception.", e);
     throw new BeeswaxException(e.getMessage(), state.handle.log_context, state.handle);
   } catch (IOException e) {
     LOG.error("Caught IOException", e);
     throw new BeeswaxException(e.getMessage(), state.handle.log_context, state.handle);
   } catch (InterruptedException e) {
     LOG.error("Caught InterruptedException", e);
     throw new BeeswaxException(e.getMessage(), state.handle.log_context, state.handle);
   }
 }
Esempio n. 9
0
 protected void register() {
   // Register
   InetSocketAddress serviceAddr = null;
   if (clientService != null) {
     serviceAddr = clientService.getBindAddress();
   }
   try {
     RegisterApplicationMasterRequest request =
         recordFactory.newRecordInstance(RegisterApplicationMasterRequest.class);
     if (serviceAddr != null) {
       request.setHost(serviceAddr.getHostName());
       request.setRpcPort(serviceAddr.getPort());
       request.setTrackingUrl(serviceAddr.getHostName() + ":" + clientService.getHttpPort());
     }
     RegisterApplicationMasterResponse response = scheduler.registerApplicationMaster(request);
     maxContainerCapability = response.getMaximumResourceCapability();
     this.context.getClusterInfo().setMaxContainerCapability(maxContainerCapability);
     if (UserGroupInformation.isSecurityEnabled()) {
       setClientToAMToken(response.getClientToAMTokenMasterKey());
     }
     this.applicationACLs = response.getApplicationACLs();
     LOG.info("maxContainerCapability: " + maxContainerCapability.getMemory());
   } catch (Exception are) {
     LOG.error("Exception while registering", are);
     throw new YarnRuntimeException(are);
   }
 }
Esempio n. 10
0
 @Override
 public void afterPropertiesSet() throws Exception {
   Assert.notNull(configuration, "Yarn configuration must be set");
   Assert.notNull(protocolClazz, "Rpc protocol class must be set");
   if (UserGroupInformation.isSecurityEnabled()) {
     UserGroupInformation.setConfiguration(configuration);
   }
   address = getRpcAddress(configuration);
   proxy = createProxy();
 }
Esempio n. 11
0
 /**
  * Returns the url parameter for the given token string.
  *
  * @param tokenString
  * @return url parameter
  */
 public static String getDelegationTokenUrlParam(String tokenString) {
   if (tokenString == null) {
     return "";
   }
   if (UserGroupInformation.isSecurityEnabled()) {
     return SET_DELEGATION + tokenString;
   } else {
     return "";
   }
 }
Esempio n. 12
0
  @Test
  public void invalidTokenThrows() {
    when(UserGroupInformation.isSecurityEnabled()).thenReturn(true);
    when(mockProtocolData.getToken()).thenReturn("This is odd");

    try {
      SecuredHDFS.verifyToken(mockProtocolData, mockContext);
      fail("invalid X-GP-TOKEN should throw");
    } catch (SecurityException e) {
      assertEquals("Failed to verify delegation token java.io.EOFException", e.getMessage());
    }
  }
  @Test
  public void testGetKerberosPrincipalWithSubstitutedHostNonSecure() throws Exception {
    String principal =
        StartupProperties.get().getProperty(FalconAuthenticationFilter.KERBEROS_PRINCIPAL);
    Configuration conf = new Configuration(false);
    conf.set("hadoop.security.authentication", "simple");
    UserGroupInformation.setConfiguration(conf);
    Assert.assertFalse(UserGroupInformation.isSecurityEnabled());

    FalconAuthenticationFilter filter = new FalconAuthenticationFilter();
    Properties properties = filter.getConfiguration(FalconAuthenticationFilter.FALCON_PREFIX, null);
    Assert.assertEquals(properties.get(KerberosAuthenticationHandler.PRINCIPAL), principal);
  }
  protected synchronized void finishApplication(ApplicationId applicationId) {
    if (applicationId == null) {
      LOG.error("RMAppManager received completed appId of null, skipping");
    } else {
      // Inform the DelegationTokenRenewer
      if (UserGroupInformation.isSecurityEnabled()) {
        rmContext.getDelegationTokenRenewer().applicationFinished(applicationId);
      }

      completedApps.add(applicationId);
      completedAppsInStateStore++;
      writeAuditLog(applicationId);
    }
  }
Esempio n. 15
0
 /**
  * Returns the filter configuration properties, including the ones prefixed with {@link
  * #CONF_PREFIX}. The prefix is removed from the returned property names.
  *
  * @param prefix parameter not used.
  * @param config parameter contains the initialization values.
  * @return Hadoop-Auth configuration properties.
  * @throws ServletException
  */
 @Override
 protected Properties getConfiguration(String prefix, FilterConfig config)
     throws ServletException {
   final Properties p = super.getConfiguration(CONF_PREFIX, config);
   // set authentication type
   p.setProperty(
       AUTH_TYPE,
       UserGroupInformation.isSecurityEnabled()
           ? KerberosAuthenticationHandler.TYPE
           : PseudoAuthenticationHandler.TYPE);
   // For Pseudo Authentication, allow anonymous.
   p.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
   // set cookie path
   p.setProperty(COOKIE_PATH, "/");
   return p;
 }
  private static void loadSecureStore() throws IOException {
    if (!UserGroupInformation.isSecurityEnabled()) {
      return;
    }

    File file = new File(Constants.Files.CREDENTIALS);
    if (file.exists()) {
      Credentials credentials = new Credentials();
      try (DataInputStream input = new DataInputStream(new FileInputStream(file))) {
        credentials.readTokenStorageStream(input);
      }

      UserGroupInformation.getCurrentUser().addCredentials(credentials);
      LOG.info("Secure store updated from {}", file);
    }
  }
Esempio n. 17
0
  /** Obtain Kerberos security token for HBase. */
  private static void obtainTokenForHBase(Credentials credentials, Configuration conf)
      throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
      LOG.info("Attempting to obtain Kerberos security token for HBase");
      try {
        // ----
        // Intended call: HBaseConfiguration.addHbaseResources(conf);
        Class.forName("org.apache.hadoop.hbase.HBaseConfiguration")
            .getMethod("addHbaseResources", Configuration.class)
            .invoke(null, conf);
        // ----

        LOG.info("HBase security setting: {}", conf.get("hbase.security.authentication"));

        if (!"kerberos".equals(conf.get("hbase.security.authentication"))) {
          LOG.info("HBase has not been configured to use Kerberos.");
          return;
        }

        LOG.info("Obtaining Kerberos security token for HBase");
        // ----
        // Intended call: Token<AuthenticationTokenIdentifier> token = TokenUtil.obtainToken(conf);
        Token<?> token =
            (Token<?>)
                Class.forName("org.apache.hadoop.hbase.security.token.TokenUtil")
                    .getMethod("obtainToken", Configuration.class)
                    .invoke(null, conf);
        // ----

        if (token == null) {
          LOG.error("No Kerberos security token for HBase available");
          return;
        }

        credentials.addToken(token.getService(), token);
        LOG.info("Added HBase Kerberos security token to credentials.");
      } catch (ClassNotFoundException
          | NoSuchMethodException
          | IllegalAccessException
          | InvocationTargetException e) {
        LOG.info(
            "HBase is not available (not packaged with this application): {} : \"{}\".",
            e.getClass().getSimpleName(),
            e.getMessage());
      }
    }
  }
Esempio n. 18
0
 private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi, Configuration conf)
     throws IOException {
   if (UserGroupInformation.isSecurityEnabled()) {
     DelegationTokenIdentifier dtId =
         new DelegationTokenIdentifier(new Text(ugi.getUserName()), null, null);
     FSNamesystem namesystem = mock(FSNamesystem.class);
     DelegationTokenSecretManager dtSecretManager =
         new DelegationTokenSecretManager(86400000, 86400000, 86400000, 86400000, namesystem);
     dtSecretManager.startThreads();
     Token<DelegationTokenIdentifier> token =
         new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
     SecurityUtil.setTokenService(token, NetUtils.createSocketAddr(uri.getAuthority()));
     token.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
     ugi.addToken(token);
   }
   return (WebHdfsFileSystem) FileSystem.get(uri, conf);
 }
Esempio n. 19
0
  @Override
  public synchronized void init(HiveConf hiveConf) {
    this.hiveConf = hiveConf;
    sessionManager = new SessionManager(hiveServer2);
    defaultFetchRows = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_RESULTSET_DEFAULT_FETCH_SIZE);
    addService(sessionManager);
    //  If the hadoop cluster is secure, do a kerberos login for the service from the keytab
    if (UserGroupInformation.isSecurityEnabled()) {
      try {
        HiveAuthFactory.loginFromKeytab(hiveConf);
        this.serviceUGI = Utils.getUGI();
      } catch (IOException e) {
        throw new ServiceException("Unable to login to kerberos with given principal/keytab", e);
      } catch (LoginException e) {
        throw new ServiceException("Unable to login to kerberos with given principal/keytab", e);
      }

      // Also try creating a UGI object for the SPNego principal
      String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_PRINCIPAL);
      String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_KEYTAB);
      if (principal.isEmpty() || keyTabFile.isEmpty()) {
        LOG.info(
            "SPNego httpUGI not created, spNegoPrincipal: "
                + principal
                + ", ketabFile: "
                + keyTabFile);
      } else {
        try {
          this.httpUGI = HiveAuthFactory.loginFromSpnegoKeytabAndReturnUGI(hiveConf);
          LOG.info("SPNego httpUGI successfully created.");
        } catch (IOException e) {
          LOG.warn("SPNego httpUGI creation failed: ", e);
        }
      }
    }
    // creates connection to HMS and thus *must* occur after kerberos login above
    try {
      applyAuthorizationConfigPolicy(hiveConf);
    } catch (Exception e) {
      throw new RuntimeException(
          "Error applying authorization policy on hive configuration: " + e.getMessage(), e);
    }
    setupBlockedUdfs();
    super.init(hiveConf);
  }
Esempio n. 20
0
  /**
   * The function will get the token information from parameters and call SecuredHDFS to verify the
   * token.
   *
   * <p>All token properties will be deserialized from string to a Token object
   *
   * @param protData input parameters
   * @param context servlet context which contains the NN address
   * @throws SecurityException Thrown when authentication fails
   */
  public static void verifyToken(ProtocolData protData, ServletContext context) {
    try {
      if (UserGroupInformation.isSecurityEnabled()) {
        Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
        String tokenString = protData.getToken();
        token.decodeFromUrlString(tokenString);

        verifyToken(
            token.getIdentifier(),
            token.getPassword(),
            token.getKind(),
            token.getService(),
            context);
      }
    } catch (IOException e) {
      throw new SecurityException("Failed to verify delegation token " + e, e);
    }
  }
Esempio n. 21
0
 public static void initProcessSecurity(Configuration conf)
     throws IOException, BadConfigException {
   log.info("Secure mode with kerberos realm {}", HoyaUtils.getKerberosRealm());
   // this gets UGI to reset its previous world view (i.e simple auth)
   // security
   SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
   UserGroupInformation.setConfiguration(conf);
   UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
   log.debug("Authenticating as " + ugi.toString());
   log.debug("Login user is {}", UserGroupInformation.getLoginUser());
   if (!UserGroupInformation.isSecurityEnabled()) {
     throw new BadConfigException(
         "Although secure mode is enabled,"
             + "the application has already set up its user as an insecure entity %s",
         ugi);
   }
   HoyaUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL);
   HoyaUtils.verifyPrincipalSet(conf, DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY);
 }
  private URI redirectURI(
      final NameNode namenode,
      final UserGroupInformation ugi,
      final DelegationParam delegation,
      final UserParam username,
      final DoAsParam doAsUser,
      final String path,
      final HttpOpParam.Op op,
      final long openOffset,
      final long blocksize,
      final Param<?, ?>... parameters)
      throws URISyntaxException, IOException {
    final Configuration conf = (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
    final DatanodeInfo dn = chooseDatanode(namenode, path, op, openOffset, blocksize, conf);

    final String delegationQuery;
    if (!UserGroupInformation.isSecurityEnabled()) {
      // security disabled
      delegationQuery = Param.toSortedString("&", doAsUser, username);
    } else if (delegation.getValue() != null) {
      // client has provided a token
      delegationQuery = "&" + delegation;
    } else {
      // generate a token
      final Token<? extends TokenIdentifier> t =
          generateDelegationToken(namenode, ugi, request.getUserPrincipal().getName());
      delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
    }
    final String query =
        op.toQueryString()
            + delegationQuery
            + "&"
            + new NamenodeRpcAddressParam(namenode)
            + Param.toSortedString("&", parameters);
    final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;

    final URI uri = new URI("http", null, dn.getHostName(), dn.getInfoPort(), uripath, query, null);
    if (LOG.isTraceEnabled()) {
      LOG.trace("redirectURI=" + uri);
    }
    return uri;
  }
Esempio n. 23
0
  /**
   * Ensure that we are set up to run with the appropriate native support code. If security is
   * disabled, and the support code is unavailable, this class still tries its best to be secure,
   * but is vulnerable to some race condition attacks.
   *
   * <p>If security is enabled but the support code is unavailable, throws a RuntimeException since
   * we don't want to run insecurely.
   */
  static {
    boolean shouldBeSecure = UserGroupInformation.isSecurityEnabled();
    boolean canBeSecure = NativeIO.isAvailable();

    if (!canBeSecure && shouldBeSecure) {
      throw new RuntimeException("Secure IO is not possible without native code extensions.");
    }

    // Pre-cache an instance of the raw FileSystem since we sometimes
    // do secure IO in a shutdown hook, where this call could fail.
    try {
      rawFilesystem = FileSystem.getLocal(new Configuration()).getRaw();
    } catch (IOException ie) {
      throw new RuntimeException("Couldn't obtain an instance of RawLocalFileSystem.");
    }

    // SecureIO just skips security checks in the case that security is
    // disabled
    skipSecurity = !canBeSecure;
  }
Esempio n. 24
0
  @SuppressWarnings("unchecked")
  protected void submitApplication(
      ApplicationSubmissionContext submissionContext, long submitTime, String user)
      throws YarnException {
    ApplicationId applicationId = submissionContext.getApplicationId();

    RMAppImpl application = createAndPopulateNewRMApp(submissionContext, submitTime, user, false);
    ApplicationId appId = submissionContext.getApplicationId();
    Credentials credentials = null;
    try {
      credentials = parseCredentials(submissionContext);
      if (UserGroupInformation.isSecurityEnabled()) {
        this.rmContext
            .getDelegationTokenRenewer()
            .addApplicationAsync(
                appId,
                credentials,
                submissionContext.getCancelTokensWhenComplete(),
                application.getUser());
      } else {
        // Dispatcher is not yet started at this time, so these START events
        // enqueued should be guaranteed to be first processed when dispatcher
        // gets started.
        this.rmContext
            .getDispatcher()
            .getEventHandler()
            .handle(new RMAppEvent(applicationId, RMAppEventType.START));
      }
    } catch (Exception e) {
      LOG.warn("Unable to parse credentials.", e);
      // Sending APP_REJECTED is fine, since we assume that the
      // RMApp is in NEW state and thus we haven't yet informed the
      // scheduler about the existence of the application
      assert application.getState() == RMAppState.NEW;
      this.rmContext
          .getDispatcher()
          .getEventHandler()
          .handle(new RMAppRejectedEvent(applicationId, e.getMessage()));
      throw RPCUtil.getRemoteException(e);
    }
  }
Esempio n. 25
0
 public static void printGotoForm(
     JspWriter out, int namenodeInfoPort, String tokenString, String file) throws IOException {
   out.print("<form action=\"browseDirectory.jsp\" method=\"get\" name=\"goto\">");
   out.print("Goto : ");
   out.print("<input name=\"dir\" type=\"text\" width=\"50\" id\"dir\" value=\"" + file + "\">");
   out.print("<input name=\"go\" type=\"submit\" value=\"go\">");
   out.print(
       "<input name=\"namenodeInfoPort\" type=\"hidden\" "
           + "value=\""
           + namenodeInfoPort
           + "\">");
   if (UserGroupInformation.isSecurityEnabled()) {
     out.print(
         "<input name=\""
             + DELEGATION_PARAMETER_NAME
             + "\" type=\"hidden\" value=\""
             + tokenString
             + "\">");
   }
   out.print("</form>");
 }
  @SuppressWarnings("unchecked")
  protected void recoverApplication(ApplicationState appState, RMState rmState) throws Exception {
    ApplicationSubmissionContext appContext = appState.getApplicationSubmissionContext();
    ApplicationId appId = appState.getAppId();

    // create and recover app.
    RMAppImpl application =
        createAndPopulateNewRMApp(appContext, appState.getSubmitTime(), appState.getUser());
    application.recover(rmState);
    if (isApplicationInFinalState(appState.getState())) {
      // We are synchronously moving the application into final state so that
      // momentarily client will not see this application in NEW state. Also
      // for finished applications we will avoid renewing tokens.
      application.handle(new RMAppEvent(appId, RMAppEventType.RECOVER));
      return;
    }

    if (UserGroupInformation.isSecurityEnabled()) {
      Credentials credentials = null;
      try {
        credentials = parseCredentials(appContext);
        // synchronously renew delegation token on recovery.
        rmContext
            .getDelegationTokenRenewer()
            .addApplicationSync(appId, credentials, appContext.getCancelTokensWhenComplete());
        application.handle(new RMAppEvent(appId, RMAppEventType.RECOVER));
      } catch (Exception e) {
        LOG.warn("Unable to parse and renew delegation tokens.", e);
        this.rmContext
            .getDispatcher()
            .getEventHandler()
            .handle(new RMAppRejectedEvent(appId, e.getMessage()));
        throw e;
      }
    } else {
      application.handle(new RMAppEvent(appId, RMAppEventType.RECOVER));
    }
  }
Esempio n. 27
0
 /**
  * Get a protocol proxy that contains a proxy connection to a remote server and a set of methods
  * that are supported by the server
  *
  * @param protocol protocol
  * @param clientVersion client's version
  * @param addr server address
  * @param ticket security ticket
  * @param conf configuration
  * @param factory socket factory
  * @param rpcTimeout max time for each rpc; 0 means no timeout
  * @return the proxy
  * @throws IOException if any error occurs
  */
 public static <T> ProtocolProxy<T> getProtocolProxy(
     Class<T> protocol,
     long clientVersion,
     InetSocketAddress addr,
     UserGroupInformation ticket,
     Configuration conf,
     SocketFactory factory,
     int rpcTimeout,
     RetryPolicy connectionRetryPolicy)
     throws IOException {
   if (UserGroupInformation.isSecurityEnabled()) {
     SaslRpcServer.init(conf);
   }
   return getProtocolEngine(protocol, conf)
       .getProxy(
           protocol,
           clientVersion,
           addr,
           ticket,
           conf,
           factory,
           rpcTimeout,
           connectionRetryPolicy);
 }
  @Test
  public void testGetKerberosPrincipalWithSubstitutedHostSecure() throws Exception {
    String principal =
        StartupProperties.get().getProperty(FalconAuthenticationFilter.KERBEROS_PRINCIPAL);

    String expectedPrincipal =
        "falcon/" + SecurityUtil.getLocalHostName().toLowerCase() + "@Example.com";
    try {
      Configuration conf = new Configuration(false);
      conf.set("hadoop.security.authentication", "kerberos");
      UserGroupInformation.setConfiguration(conf);
      Assert.assertTrue(UserGroupInformation.isSecurityEnabled());

      StartupProperties.get()
          .setProperty(FalconAuthenticationFilter.KERBEROS_PRINCIPAL, "falcon/[email protected]");
      FalconAuthenticationFilter filter = new FalconAuthenticationFilter();
      Properties properties =
          filter.getConfiguration(FalconAuthenticationFilter.FALCON_PREFIX, null);
      Assert.assertEquals(
          properties.get(KerberosAuthenticationHandler.PRINCIPAL), expectedPrincipal);
    } finally {
      StartupProperties.get().setProperty(FalconAuthenticationFilter.KERBEROS_PRINCIPAL, principal);
    }
  }
Esempio n. 29
0
  public int executeInChildVM(DriverContext driverContext) {
    // execute in child jvm
    try {
      // generate the cmd line to run in the child jvm
      Context ctx = driverContext.getCtx();
      String hiveJar = conf.getJar();

      String hadoopExec = conf.getVar(HiveConf.ConfVars.HADOOPBIN);
      conf.setVar(
          ConfVars.HIVEADDEDJARS, Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR));
      // write out the plan to a local file
      Path planPath = new Path(ctx.getLocalTmpPath(), "plan.xml");
      MapredLocalWork plan = getWork();
      LOG.info("Generating plan file " + planPath.toString());

      OutputStream out = null;
      try {
        out = FileSystem.getLocal(conf).create(planPath);
        SerializationUtilities.serializePlan(plan, out);
        out.close();
        out = null;
      } finally {
        IOUtils.closeQuietly(out);
      }

      String isSilent = "true".equalsIgnoreCase(System.getProperty("test.silent")) ? "-nolog" : "";

      String jarCmd;

      jarCmd = hiveJar + " " + ExecDriver.class.getName();
      String hiveConfArgs = ExecDriver.generateCmdLine(conf, ctx);
      String cmdLine =
          hadoopExec
              + " jar "
              + jarCmd
              + " -localtask -plan "
              + planPath.toString()
              + " "
              + isSilent
              + " "
              + hiveConfArgs;

      String workDir = (new File(".")).getCanonicalPath();
      String files = Utilities.getResourceFiles(conf, SessionState.ResourceType.FILE);

      if (!files.isEmpty()) {
        cmdLine = cmdLine + " -files " + files;

        workDir = ctx.getLocalTmpPath().toUri().getPath();

        if (!(new File(workDir)).mkdir()) {
          throw new IOException("Cannot create tmp working dir: " + workDir);
        }

        for (String f : StringUtils.split(files, ',')) {
          Path p = new Path(f);
          String target = p.toUri().getPath();
          String link = workDir + Path.SEPARATOR + p.getName();
          if (FileUtil.symLink(target, link) != 0) {
            throw new IOException("Cannot link to added file: " + target + " from: " + link);
          }
        }
      }

      // Inherit Java system variables
      String hadoopOpts;
      StringBuilder sb = new StringBuilder();
      Properties p = System.getProperties();
      for (String element : HIVE_SYS_PROP) {
        if (p.containsKey(element)) {
          sb.append(" -D" + element + "=" + p.getProperty(element));
        }
      }
      hadoopOpts = sb.toString();
      // Inherit the environment variables
      String[] env;
      Map<String, String> variables = new HashMap<String, String>(System.getenv());
      // The user can specify the hadoop memory

      // if ("local".equals(conf.getVar(HiveConf.ConfVars.HADOOPJT))) {
      // if we are running in local mode - then the amount of memory used
      // by the child jvm can no longer default to the memory used by the
      // parent jvm
      // int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM);
      int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM);
      if (hadoopMem == 0) {
        // remove env var that would default child jvm to use parent's memory
        // as default. child jvm would use default memory for a hadoop client
        variables.remove(HADOOP_MEM_KEY);
      } else {
        // user specified the memory for local mode hadoop run
        console.printInfo(" set heap size\t" + hadoopMem + "MB");
        variables.put(HADOOP_MEM_KEY, String.valueOf(hadoopMem));
      }
      // } else {
      // nothing to do - we are not running in local mode - only submitting
      // the job via a child process. in this case it's appropriate that the
      // child jvm use the same memory as the parent jvm

      // }

      // Set HADOOP_USER_NAME env variable for child process, so that
      // it also runs with hadoop permissions for the user the job is running as
      // This will be used by hadoop only in unsecure(/non kerberos) mode
      String endUserName = Utils.getUGI().getShortUserName();
      LOG.debug("setting HADOOP_USER_NAME\t" + endUserName);
      variables.put("HADOOP_USER_NAME", endUserName);

      if (variables.containsKey(HADOOP_OPTS_KEY)) {
        variables.put(HADOOP_OPTS_KEY, variables.get(HADOOP_OPTS_KEY) + hadoopOpts);
      } else {
        variables.put(HADOOP_OPTS_KEY, hadoopOpts);
      }

      // For Windows OS, we need to pass HIVE_HADOOP_CLASSPATH Java parameter while starting
      // Hiveserver2 using "-hiveconf hive.hadoop.classpath=%HIVE_LIB%". This is to combine path(s).
      if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_HADOOP_CLASSPATH) != null) {
        if (variables.containsKey("HADOOP_CLASSPATH")) {
          variables.put(
              "HADOOP_CLASSPATH",
              variables.get("HADOOP_CLASSPATH")
                  + ";"
                  + HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_HADOOP_CLASSPATH));
        } else {
          variables.put(
              "HADOOP_CLASSPATH", HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_HADOOP_CLASSPATH));
        }
      }

      if (variables.containsKey(MapRedTask.HIVE_DEBUG_RECURSIVE)) {
        MapRedTask.configureDebugVariablesForChildJVM(variables);
      }

      if (UserGroupInformation.isSecurityEnabled() && UserGroupInformation.isLoginKeytabBased()) {
        // If kerberos security is enabled, and HS2 doAs is enabled,
        // then additional params need to be set so that the command is run as
        // intended user
        secureDoAs = new SecureCmdDoAs(conf);
        secureDoAs.addEnv(variables);
      }

      // If HIVE_LOCAL_TASK_CHILD_OPTS is set, child VM environment setting
      // HADOOP_CLIENT_OPTS will be replaced with HIVE_LOCAL_TASK_CHILD_OPTS.
      // HADOOP_OPTS is updated too since HADOOP_CLIENT_OPTS is appended
      // to HADOOP_OPTS in most cases. This way, the local task JVM can
      // have different settings from those of HiveServer2.
      if (variables.containsKey(HIVE_LOCAL_TASK_CHILD_OPTS_KEY)) {
        String childOpts = variables.get(HIVE_LOCAL_TASK_CHILD_OPTS_KEY);
        if (childOpts == null) {
          childOpts = "";
        }
        String clientOpts = variables.put(HADOOP_CLIENT_OPTS, childOpts);
        String tmp = variables.get(HADOOP_OPTS_KEY);
        if (tmp != null && !StringUtils.isBlank(clientOpts)) {
          tmp = tmp.replace(clientOpts, childOpts);
          variables.put(HADOOP_OPTS_KEY, tmp);
        }
      }

      env = new String[variables.size()];
      int pos = 0;
      for (Map.Entry<String, String> entry : variables.entrySet()) {
        String name = entry.getKey();
        String value = entry.getValue();
        env[pos++] = name + "=" + value;
        LOG.debug("Setting env: " + env[pos - 1]);
      }

      LOG.info("Executing: " + cmdLine);

      // Run ExecDriver in another JVM
      executor = Runtime.getRuntime().exec(cmdLine, env, new File(workDir));

      CachingPrintStream errPrintStream = new CachingPrintStream(System.err);

      StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
      StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, errPrintStream);

      outPrinter.start();
      errPrinter.start();

      int exitVal = jobExecHelper.progressLocal(executor, getId());

      // wait for stream threads to finish
      outPrinter.join();
      errPrinter.join();

      if (exitVal != 0) {
        LOG.error("Execution failed with exit status: " + exitVal);
        if (SessionState.get() != null) {
          SessionState.get().addLocalMapRedErrors(getId(), errPrintStream.getOutput());
        }
      } else {
        LOG.info("Execution completed successfully");
      }

      return exitVal;
    } catch (Exception e) {
      LOG.error("Exception: " + e, e);
      return (1);
    } finally {
      if (secureDoAs != null) {
        secureDoAs.close();
      }
    }
  }
Esempio n. 30
0
  @SuppressWarnings("unchecked")
  public static void main(String[] args) throws IOException, HiveException {

    String planFileName = null;
    String jobConfFileName = null;
    boolean noLog = false;
    String files = null;
    boolean localtask = false;
    try {
      for (int i = 0; i < args.length; i++) {
        if (args[i].equals("-plan")) {
          planFileName = args[++i];
        } else if (args[i].equals("-jobconffile")) {
          jobConfFileName = args[++i];
        } else if (args[i].equals("-nolog")) {
          noLog = true;
        } else if (args[i].equals("-files")) {
          files = args[++i];
        } else if (args[i].equals("-localtask")) {
          localtask = true;
        }
      }
    } catch (IndexOutOfBoundsException e) {
      System.err.println("Missing argument to option");
      printUsage();
    }

    JobConf conf;
    if (localtask) {
      conf = new JobConf(MapredLocalTask.class);
    } else {
      conf = new JobConf(ExecDriver.class);
    }

    if (jobConfFileName != null) {
      conf.addResource(new Path(jobConfFileName));
    }

    if (files != null) {
      conf.set("tmpfiles", files);
    }

    if (UserGroupInformation.isSecurityEnabled()) {
      String hadoopAuthToken = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
      if (hadoopAuthToken != null) {
        conf.set("mapreduce.job.credentials.binary", hadoopAuthToken);
      }
    }

    boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESESSIONSILENT);

    String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID, "").trim();
    if (queryId.isEmpty()) {
      queryId = "unknown-" + System.currentTimeMillis();
    }
    System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId);

    if (noLog) {
      // If started from main(), and noLog is on, we should not output
      // any logs. To turn the log on, please set -Dtest.silent=false
      org.apache.logging.log4j.Logger logger = org.apache.logging.log4j.LogManager.getRootLogger();
      NullAppender appender = NullAppender.createNullAppender();
      appender.addToLogger(logger.getName(), Level.ERROR);
      appender.start();
    } else {
      setupChildLog4j(conf);
    }

    Logger LOG = LoggerFactory.getLogger(ExecDriver.class.getName());
    LogHelper console = new LogHelper(LOG, isSilent);

    if (planFileName == null) {
      console.printError("Must specify Plan File Name");
      printUsage();
    }

    // print out the location of the log file for the user so
    // that it's easy to find reason for local mode execution failures
    for (Appender appender :
        ((org.apache.logging.log4j.core.Logger) LogManager.getRootLogger())
            .getAppenders()
            .values()) {
      if (appender instanceof FileAppender) {
        console.printInfo("Execution log at: " + ((FileAppender) appender).getFileName());
      } else if (appender instanceof RollingFileAppender) {
        console.printInfo("Execution log at: " + ((RollingFileAppender) appender).getFileName());
      }
    }

    // the plan file should always be in local directory
    Path p = new Path(planFileName);
    FileSystem fs = FileSystem.getLocal(conf);
    InputStream pathData = fs.open(p);

    // this is workaround for hadoop-17 - libjars are not added to classpath of the
    // child process. so we add it here explicitly

    String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS);
    String addedJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEADDEDJARS);
    try {
      // see also - code in CliDriver.java
      ClassLoader loader = conf.getClassLoader();
      if (StringUtils.isNotBlank(auxJars)) {
        loader = Utilities.addToClassPath(loader, StringUtils.split(auxJars, ","));
      }
      if (StringUtils.isNotBlank(addedJars)) {
        loader = Utilities.addToClassPath(loader, StringUtils.split(addedJars, ","));
      }
      conf.setClassLoader(loader);
      // Also set this to the Thread ContextClassLoader, so new threads will
      // inherit
      // this class loader, and propagate into newly created Configurations by
      // those
      // new threads.
      Thread.currentThread().setContextClassLoader(loader);
    } catch (Exception e) {
      throw new HiveException(e.getMessage(), e);
    }
    int ret;
    if (localtask) {
      memoryMXBean = ManagementFactory.getMemoryMXBean();
      MapredLocalWork plan = Utilities.deserializePlan(pathData, MapredLocalWork.class, conf);
      MapredLocalTask ed = new MapredLocalTask(plan, conf, isSilent);
      ret = ed.executeInProcess(new DriverContext());

    } else {
      MapredWork plan = Utilities.deserializePlan(pathData, MapredWork.class, conf);
      ExecDriver ed = new ExecDriver(plan, conf, isSilent);
      ret = ed.execute(new DriverContext());
    }

    if (ret != 0) {
      System.exit(ret);
    }
  }