public static void setTokensFor( ContainerLaunchContext amContainer, List<Path> paths, Configuration conf) throws IOException { Credentials credentials = new Credentials(); // for HDFS TokenCache.obtainTokensForNamenodes(credentials, paths.toArray(new Path[0]), conf); // for HBase obtainTokenForHBase(credentials, conf); // for user UserGroupInformation currUsr = UserGroupInformation.getCurrentUser(); Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens(); for (Token<? extends TokenIdentifier> token : usrTok) { final Text id = new Text(token.getIdentifier()); LOG.info("Adding user token " + id + " with " + token); credentials.addToken(id, token); } try (DataOutputBuffer dob = new DataOutputBuffer()) { credentials.writeTokenStorageToStream(dob); if (LOG.isDebugEnabled()) { LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength()); } ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(securityTokens); } }
/** * The function will verify the token with NameNode if available and will create a * UserGroupInformation. * * <p>Code in this function is copied from JspHelper.getTokenUGI * * @param identifier Delegation token identifier * @param password Delegation token password * @param kind the kind of token * @param service the service for this token * @param servletContext Jetty servlet context which contains the NN address * @throws SecurityException Thrown when authentication fails */ private static void verifyToken( byte[] identifier, byte[] password, Text kind, Text service, ServletContext servletContext) { try { Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(identifier, password, kind, service); ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); DelegationTokenIdentifier id = new DelegationTokenIdentifier(); id.readFields(in); final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(servletContext); if (nn != null) { nn.getNamesystem().verifyToken(id, token.getPassword()); } UserGroupInformation userGroupInformation = id.getUser(); userGroupInformation.addToken(token); LOG.debug( "user " + userGroupInformation.getUserName() + " (" + userGroupInformation.getShortUserName() + ") authenticated"); // re-login if necessary userGroupInformation.checkTGTAndReloginFromKeytab(); } catch (IOException e) { throw new SecurityException("Failed to verify delegation token " + e, e); } }
private void injectToken() throws IOException { if (UserGroupInformation.isSecurityEnabled()) { Token<DelegationTokenIdentifier> token = params.delegationToken(); token.setKind(HDFS_DELEGATION_KIND); ugi.addToken(token); } }
private static long renewDelegationToken(DelegationTokenToRenew dttr) throws Exception { long newExpirationDate = System.currentTimeMillis() + 3600 * 1000; Token<DelegationTokenIdentifier> token = dttr.token; Configuration conf = dttr.conf; if (token.getKind().equals(kindHdfs)) { try { DistributedFileSystem dfs = getDFSForToken(token, conf); newExpirationDate = dfs.renewDelegationToken(token); } catch (InvalidToken ite) { LOG.warn("invalid token - not scheduling for renew"); removeFailedDelegationToken(dttr); throw new IOException("failed to renew token", ite); } catch (AccessControlException ioe) { LOG.warn("failed to renew token:" + token, ioe); removeFailedDelegationToken(dttr); throw new IOException("failed to renew token", ioe); } catch (Exception e) { LOG.warn("failed to renew token:" + token, e); // returns default expiration date } } else { throw new Exception("unknown token type to renew+" + token.getKind()); } return newExpirationDate; }
/** * Obtain the tokens needed by the job and put them in the UGI * * @param conf */ protected void downloadTokensAndSetupUGI(Configuration conf) { try { this.currentUser = UserGroupInformation.getCurrentUser(); if (UserGroupInformation.isSecurityEnabled()) { // Read the file-system tokens from the localized tokens-file. Path jobSubmitDir = FileContext.getLocalFSFileContext() .makeQualified( new Path(new File(DragonJobConfig.JOB_SUBMIT_DIR).getAbsolutePath())); Path jobTokenFile = new Path(jobSubmitDir, DragonJobConfig.APPLICATION_TOKENS_FILE); fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf)); LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile=" + jobTokenFile); for (Token<? extends TokenIdentifier> tk : fsTokens.getAllTokens()) { if (LOG.isDebugEnabled()) { LOG.debug( "Token of kind " + tk.getKind() + "in current ugi in the AppMaster for service " + tk.getService()); } currentUser.addToken(tk); // For use by AppMaster itself. } } } catch (IOException e) { throw new YarnException(e); } }
private void requestNewHdfsDelegationToken( ApplicationId applicationId, String user, boolean shouldCancelAtEnd) throws IOException, InterruptedException { // Get new hdfs tokens for this user Credentials credentials = new Credentials(); Token<?>[] newTokens = obtainSystemTokensForUser(user, credentials); // Add new tokens to the toRenew list. LOG.info( "Received new tokens for " + applicationId + ". Received " + newTokens.length + " tokens."); if (newTokens.length > 0) { for (Token<?> token : newTokens) { if (token.isManaged()) { DelegationTokenToRenew tokenToRenew = new DelegationTokenToRenew( applicationId, token, getConfig(), Time.now(), shouldCancelAtEnd, user); // renew the token to get the next expiration date. renewToken(tokenToRenew); setTimerForTokenRenewal(tokenToRenew); appTokens.get(applicationId).add(tokenToRenew); LOG.info("Received new token " + token); } } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer byteBuffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); rmContext.getSystemCredentialsForApps().put(applicationId, byteBuffer); }
private void addRMDelegationToken(final String renewer, final Credentials credentials) throws IOException, YarnException { // Get the ResourceManager delegation rmToken final org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient.getRMDelegationToken(new Text(renewer)); Token<RMDelegationTokenIdentifier> token; // TODO: Use the utility method getRMDelegationTokenService in ClientRMProxy to remove the // separate handling of // TODO: HA and non-HA cases when hadoop dependency is changed to hadoop 2.4 or above if (conf.getBoolean(RM_HA_ENABLED, DEFAULT_RM_HA_ENABLED)) { LOG.info("Yarn Resource Manager HA is enabled"); token = getRMHAToken(rmDelegationToken); } else { LOG.info("Yarn Resource Manager HA is not enabled"); InetSocketAddress rmAddress = conf.getSocketAddr( YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT); token = ConverterUtils.convertFromYarn(rmDelegationToken, rmAddress); } LOG.info("RM dt {}", token); credentials.addToken(token.getService(), token); }
@Test public void testGetTokensForViewFS() throws IOException, URISyntaxException { Configuration conf = new Configuration(jConf); FileSystem dfs = dfsCluster.getFileSystem(); String serviceName = dfs.getCanonicalServiceName(); Path p1 = new Path("/mount1"); Path p2 = new Path("/mount2"); p1 = dfs.makeQualified(p1); p2 = dfs.makeQualified(p2); conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString()); conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString()); Credentials credentials = new Credentials(); Path lp1 = new Path("viewfs:///dir1"); Path lp2 = new Path("viewfs:///dir2"); Path[] paths = new Path[2]; paths[0] = lp1; paths[1] = lp2; TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf); Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens(); assertEquals("number of tokens is not 1", 1, tns.size()); boolean found = false; for (Token<? extends TokenIdentifier> tt : tns) { System.out.println("token=" + tt); if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) && tt.getService().equals(new Text(serviceName))) { found = true; } assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found); } }
@Test public void testGetTokensForNamenodes() throws IOException { Credentials credentials = new Credentials(); TokenCache.obtainTokensForNamenodesInternal(credentials, new Path[] {p1, p2}, jConf); // this token is keyed by hostname:port key. String fs_addr = SecurityUtil.buildDTServiceName(p1.toUri(), NameNode.DEFAULT_PORT); Token<DelegationTokenIdentifier> nnt = TokenCache.getDelegationToken(credentials, fs_addr); System.out.println("dt for " + p1 + "(" + fs_addr + ")" + " = " + nnt); assertNotNull("Token for nn is null", nnt); // verify the size Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens(); assertEquals("number of tokens is not 1", 1, tns.size()); boolean found = false; for (Token<? extends TokenIdentifier> t : tns) { if (t.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) && t.getService().equals(new Text(fs_addr))) { found = true; } assertTrue("didn't find token for " + p1, found); } }
private static DistributedFileSystem getDFSForToken( Token<DelegationTokenIdentifier> token, final Configuration conf) throws Exception { DistributedFileSystem dfs = null; try { // TODO: The service is usually an IPaddress:port. We convert // it to dns name and then obtain the filesystem just so that // we reuse the existing filesystem handle (that the jobtracker // might have for this namenode; the namenode is usually // specified as the dns name in the jobtracker). // THIS IS A WORKAROUND FOR NOW. NEED TO SOLVE THIS PROBLEM // IN A BETTER WAY. String[] ipaddr = token.getService().toString().split(":"); InetAddress iaddr = InetAddress.getByName(ipaddr[0]); String dnsName = iaddr.getCanonicalHostName(); final URI uri = new URI(SCHEME + "://" + dnsName + ":" + ipaddr[1]); dfs = (DistributedFileSystem) UserGroupInformation.getLoginUser() .doAs( new PrivilegedExceptionAction<DistributedFileSystem>() { public DistributedFileSystem run() throws IOException { return (DistributedFileSystem) FileSystem.get(uri, conf); } }); } catch (Exception e) { LOG.warn("Failed to create a dfs to renew for:" + token.getService(), e); throw e; } return dfs; }
/** * Sets the connector information needed to communicate with Accumulo in this job. * * <p><b>WARNING:</b> Some tokens, when serialized, divulge sensitive information in the * configuration as a means to pass the token to MapReduce tasks. This information is BASE64 * encoded to provide a charset safe conversion to a string, but this conversion is not intended * to be secure. {@link PasswordToken} is one example that is insecure in this way; however {@link * DelegationToken}s, acquired using {@link * SecurityOperations#getDelegationToken(DelegationTokenConfig)}, is not subject to this concern. * * @param job the Hadoop job instance to be configured * @param principal a valid Accumulo user name (user must have Table.CREATE permission) * @param token the user's password * @since 1.5.0 */ public static void setConnectorInfo(JobConf job, String principal, AuthenticationToken token) throws AccumuloSecurityException { if (token instanceof KerberosToken) { log.info("Received KerberosToken, attempting to fetch DelegationToken"); try { Instance instance = getInstance(job); Connector conn = instance.getConnector(principal, token); token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig()); } catch (Exception e) { log.warn( "Failed to automatically obtain DelegationToken, Mappers/Reducers will likely fail to communicate with Accumulo", e); } } // DelegationTokens can be passed securely from user to task without serializing insecurely in // the configuration if (token instanceof DelegationTokenImpl) { DelegationTokenImpl delegationToken = (DelegationTokenImpl) token; // Convert it into a Hadoop Token AuthenticationTokenIdentifier identifier = delegationToken.getIdentifier(); Token<AuthenticationTokenIdentifier> hadoopToken = new Token<>( identifier.getBytes(), delegationToken.getPassword(), identifier.getKind(), delegationToken.getServiceName()); // Add the Hadoop Token to the Job so it gets serialized and passed along. job.getCredentials().addToken(hadoopToken.getService(), hadoopToken); } InputConfigurator.setConnectorInfo(CLASS, job, principal, token); }
@SuppressWarnings("unchecked") public static synchronized void registerDelegationTokensForRenewal( JobID jobId, Credentials ts, Configuration conf) { if (ts == null) return; // nothing to add Collection<Token<? extends TokenIdentifier>> tokens = ts.getAllTokens(); long now = System.currentTimeMillis(); for (Token<? extends TokenIdentifier> t : tokens) { // currently we only check for HDFS delegation tokens // later we can add more different types. if (!t.getKind().equals(kindHdfs)) { continue; } Token<DelegationTokenIdentifier> dt = (Token<DelegationTokenIdentifier>) t; // first renew happens immediately DelegationTokenToRenew dtr = new DelegationTokenToRenew(jobId, dt, conf, now); addTokenToList(dtr); setTimerForTokenRenewal(dtr, true); LOG.info( "registering token for renewal for service =" + dt.getService() + " and jobID = " + jobId); } }
private void addTimelineDelegationToken(ContainerLaunchContext clc) throws YarnException, IOException { Credentials credentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); ByteBuffer tokens = clc.getTokens(); if (tokens != null) { dibb.reset(tokens); credentials.readTokenStorageStream(dibb); tokens.rewind(); } // If the timeline delegation token is already in the CLC, no need to add // one more for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token : credentials.getAllTokens()) { if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) { return; } } org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier> timelineDelegationToken = getTimelineDelegationToken(); if (timelineDelegationToken == null) { return; } credentials.addToken(timelineService, timelineDelegationToken); if (LOG.isDebugEnabled()) { LOG.debug("Add timline delegation token into credentials: " + timelineDelegationToken); } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); clc.setTokens(tokens); }
public static HdfsProtos.BlockTokenIdentifierProto toProto(Token<?> blockToken) { return HdfsProtos.BlockTokenIdentifierProto.newBuilder() .setIdentifier(ByteString.copyFrom(blockToken.getIdentifier())) .setPassword(ByteString.copyFrom(blockToken.getPassword())) .setKind(blockToken.getKind().toString()) .setService(blockToken.getService().toString()) .build(); }
private void handleAppSubmitEvent(DelegationTokenRenewerAppSubmitEvent evt) throws IOException, InterruptedException { ApplicationId applicationId = evt.getApplicationId(); Credentials ts = evt.getCredentials(); boolean shouldCancelAtEnd = evt.shouldCancelAtEnd(); if (ts == null) { return; // nothing to add } if (LOG.isDebugEnabled()) { LOG.debug("Registering tokens for renewal for:" + " appId = " + applicationId); } Collection<Token<?>> tokens = ts.getAllTokens(); long now = System.currentTimeMillis(); // find tokens for renewal, but don't add timers until we know // all renewable tokens are valid // At RM restart it is safe to assume that all the previously added tokens // are valid appTokens.put( applicationId, Collections.synchronizedSet(new HashSet<DelegationTokenToRenew>())); Set<DelegationTokenToRenew> tokenList = new HashSet<DelegationTokenToRenew>(); boolean hasHdfsToken = false; for (Token<?> token : tokens) { if (token.isManaged()) { tokenList.add( new DelegationTokenToRenew( applicationId, token, getConfig(), now, shouldCancelAtEnd, evt.getUser())); if (token.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) { LOG.info(applicationId + " found existing hdfs token " + token); hasHdfsToken = true; } } } if (!tokenList.isEmpty()) { // Renewing token and adding it to timer calls are separated purposefully // If user provides incorrect token then it should not be added for // renewal. for (DelegationTokenToRenew dtr : tokenList) { try { renewToken(dtr); } catch (IOException ioe) { throw new IOException("Failed to renew token: " + dtr.token, ioe); } } for (DelegationTokenToRenew dtr : tokenList) { appTokens.get(applicationId).add(dtr); setTimerForTokenRenewal(dtr); } } if (!hasHdfsToken) { requestNewHdfsDelegationToken(applicationId, evt.getUser(), shouldCancelAtEnd); } }
public static Token<? extends AbstractDelegationTokenIdentifier> extractThriftToken( String tokenStrForm, String tokenSignature) throws MetaException, TException, IOException { // LOG.info("extractThriftToken("+tokenStrForm+","+tokenSignature+")"); Token<? extends AbstractDelegationTokenIdentifier> t = new Token<DelegationTokenIdentifier>(); t.decodeFromUrlString(tokenStrForm); t.setService(new Text(tokenSignature)); // LOG.info("returning "+t); return t; }
public Token<AuthenticationTokenIdentifier> generateToken(String username) { AuthenticationTokenIdentifier ident = new AuthenticationTokenIdentifier(username); Token<AuthenticationTokenIdentifier> token = new Token<AuthenticationTokenIdentifier>(ident, this); if (clusterId.hasId()) { token.setService(new Text(clusterId.getId())); } return token; }
/** Convert a Json map to a Token. */ public static Token<? extends TokenIdentifier> toToken(final Map<?, ?> m) throws IOException { if (m == null) { return null; } final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(); token.decodeFromUrlString((String) m.get("urlString")); return token; }
@Test(timeout = 15000) public void testRMDTMasterKeyStateOnRollingMasterKey() throws Exception { MemoryRMStateStore memStore = new MemoryRMStateStore(); memStore.init(conf); RMState rmState = memStore.getState(); Map<RMDelegationTokenIdentifier, Long> rmDTState = rmState.getRMDTSecretManagerState().getTokenState(); Set<DelegationKey> rmDTMasterKeyState = rmState.getRMDTSecretManagerState().getMasterKeyState(); MockRM rm1 = new MyMockRM(conf, memStore); rm1.start(); // on rm start, two master keys are created. // One is created at RMDTSecretMgr.startThreads.updateCurrentKey(); // the other is created on the first run of // tokenRemoverThread.rollMasterKey() RMDelegationTokenSecretManager dtSecretManager = rm1.getRMDTSecretManager(); // assert all master keys are saved Assert.assertEquals(dtSecretManager.getAllMasterKeys(), rmDTMasterKeyState); Set<DelegationKey> expiringKeys = new HashSet<DelegationKey>(); expiringKeys.addAll(dtSecretManager.getAllMasterKeys()); // record the current key DelegationKey oldCurrentKey = ((TestRMDelegationTokenSecretManager) dtSecretManager).getCurrentKey(); // request to generate a RMDelegationToken GetDelegationTokenRequest request = mock(GetDelegationTokenRequest.class); when(request.getRenewer()).thenReturn("renewer1"); GetDelegationTokenResponse response = rm1.getClientRMService().getDelegationToken(request); org.apache.hadoop.yarn.api.records.Token delegationToken = response.getRMDelegationToken(); Token<RMDelegationTokenIdentifier> token1 = ConverterUtils.convertFromYarn(delegationToken, null); RMDelegationTokenIdentifier dtId1 = token1.decodeIdentifier(); // wait for the first rollMasterKey while (((TestRMDelegationTokenSecretManager) dtSecretManager).numUpdatedKeys.get() < 1) { Thread.sleep(200); } // assert old-current-key and new-current-key exist Assert.assertTrue(rmDTMasterKeyState.contains(oldCurrentKey)); DelegationKey newCurrentKey = ((TestRMDelegationTokenSecretManager) dtSecretManager).getCurrentKey(); Assert.assertTrue(rmDTMasterKeyState.contains(newCurrentKey)); // wait for token to expire // rollMasterKey is called every 1 second. while (((TestRMDelegationTokenSecretManager) dtSecretManager).numUpdatedKeys.get() < 6) { Thread.sleep(200); } Assert.assertFalse(rmDTState.containsKey(dtId1)); rm1.stop(); }
public static ContainerTokenIdentifier newContainerTokenIdentifier(Token containerToken) throws IOException { org.apache.hadoop.security.token.Token<ContainerTokenIdentifier> token = new org.apache.hadoop.security.token.Token<ContainerTokenIdentifier>( containerToken.getIdentifier().array(), containerToken.getPassword().array(), new Text(containerToken.getKind()), new Text(containerToken.getService())); return token.decodeIdentifier(); }
private Token<? extends TokenIdentifier> generateDelegationToken( final NameNode namenode, final UserGroupInformation ugi, final String renewer) throws IOException { final Credentials c = DelegationTokenSecretManager.createCredentials( namenode, ugi, renewer != null ? renewer : ugi.getShortUserName()); final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next(); t.setKind(WebHdfsFileSystem.TOKEN_KIND); SecurityUtil.setTokenService(t, namenode.getHttpAddress()); return t; }
/** * Get a delegation token for the user from the JobTracker. * * @param renewer the user who can renew the token * @return the new token * @throws IOException */ public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) throws IOException, InterruptedException { Token<DelegationTokenIdentifier> result = client.getDelegationToken(renewer); InetSocketAddress addr = Master.getMasterAddress(conf); StringBuilder service = new StringBuilder(); service.append(NetUtils.normalizeHostName(addr.getAddress().getHostAddress())); service.append(':'); service.append(addr.getPort()); result.setService(new Text(service.toString())); return result; }
/** @return a string representation of the token */ public static String stringifyToken(final Token<?> token) throws IOException { DelegationTokenIdentifier ident = new DelegationTokenIdentifier(); ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); ident.readFields(in); if (token.getService().getLength() > 0) { return ident + " on " + token.getService(); } else { return ident.toString(); } }
private void obtainTokenAndAddIntoUGI(UserGroupInformation clientUgi, String tokenSig) throws Exception { // obtain a token by directly invoking the metastore operation(without going // through the thrift interface). Obtaining a token makes the secret manager // aware of the user and that it gave the token to the user String tokenStrForm; if (tokenSig == null) { tokenStrForm = HiveMetaStore.getDelegationToken(clientUgi.getShortUserName()); } else { tokenStrForm = HiveMetaStore.getDelegationToken(clientUgi.getShortUserName(), tokenSig); conf.set("hive.metastore.token.signature", tokenSig); } Token<DelegationTokenIdentifier> t = new Token<DelegationTokenIdentifier>(); t.decodeFromUrlString(tokenStrForm); // add the token to the clientUgi for securely talking to the metastore clientUgi.addToken(t); // Create the metastore client as the clientUgi. Doing so this // way will give the client access to the token that was added earlier // in the clientUgi HiveMetaStoreClient hiveClient = clientUgi.doAs( new PrivilegedExceptionAction<HiveMetaStoreClient>() { public HiveMetaStoreClient run() throws Exception { HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(conf); return hiveClient; } }); assertTrue("Couldn't connect to metastore", hiveClient != null); // try out some metastore operations createDBAndVerifyExistence(hiveClient); hiveClient.close(); // Now cancel the delegation token HiveMetaStore.cancelDelegationToken(tokenStrForm); // now metastore connection should fail hiveClient = clientUgi.doAs( new PrivilegedExceptionAction<HiveMetaStoreClient>() { public HiveMetaStoreClient run() { try { HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(conf); return hiveClient; } catch (MetaException e) { return null; } } }); assertTrue("Expected metastore operations to fail", hiveClient == null); }
private void printTokens(JobID jobId, Credentials credentials) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Printing tokens for job: " + jobId); for (Token<?> token : credentials.getAllTokens()) { if (token.getKind().toString().equals("HDFS_DELEGATION_TOKEN")) { LOG.debug( "Submitting with " + org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier .stringifyToken(token)); } } } }
@Override public Token<StramDelegationTokenIdentifier> selectToken( Text text, Collection<Token<? extends TokenIdentifier>> clctn) { Token<StramDelegationTokenIdentifier> token = null; if (text != null) { for (Token<? extends TokenIdentifier> ctoken : clctn) { if (StramDelegationTokenIdentifier.IDENTIFIER_KIND.equals(ctoken.getKind()) && text.equals(ctoken.getService())) { token = (Token<StramDelegationTokenIdentifier>) ctoken; } } } return token; }
private void updateAMRMToken(Token token) throws IOException { org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken = new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>( token.getIdentifier().array(), token.getPassword().array(), new Text(token.getKind()), new Text(token.getService())); // Preserve the token service sent by the RM when adding the token // to ensure we replace the previous token setup by the RM. // Afterwards we can update the service address for the RPC layer. UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser(); currentUGI.addToken(amrmToken); amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig())); }
// cancel a token private static void cancelToken(DelegationTokenToRenew t) { Token<DelegationTokenIdentifier> token = t.token; Configuration conf = t.conf; if (token.getKind().equals(kindHdfs)) { try { DistributedFileSystem dfs = getDFSForToken(token, conf); if (LOG.isDebugEnabled()) LOG.debug("canceling token " + token.getService() + " for dfs=" + dfs); dfs.cancelDelegationToken(token); } catch (Exception e) { LOG.warn("Failed to cancel " + token, e); } } }
public static ByteBuffer serializeServiceData(Token<JobTokenIdentifier> jobToken) throws IOException { // TODO these bytes should be versioned DataOutputBuffer jobToken_dob = new DataOutputBuffer(); jobToken.write(jobToken_dob); return ByteBuffer.wrap(jobToken_dob.getData(), 0, jobToken_dob.getLength()); }
/** * Cancel a Delegation Token. * * @param nnAddr the NameNode's address * @param tok the token to cancel * @throws IOException */ public static void cancelDelegationToken(String nnAddr, Token<DelegationTokenIdentifier> tok) throws IOException { StringBuilder buf = new StringBuilder(); buf.append(nnAddr); buf.append(CancelDelegationTokenServlet.PATH_SPEC); buf.append("?"); buf.append(CancelDelegationTokenServlet.TOKEN); buf.append("="); buf.append(tok.encodeToUrlString()); BufferedReader in = null; HttpURLConnection connection = null; try { URL url = new URL(buf.toString()); SecurityUtil.fetchServiceTicket(url); connection = (HttpURLConnection) url.openConnection(); if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { throw new IOException("Error cancelling token: " + connection.getResponseMessage()); } } catch (IOException ie) { LOG.info("error in cancel over HTTP", ie); IOException e = getExceptionFromResponse(connection); IOUtils.cleanup(LOG, in); if (e != null) { LOG.info("rethrowing exception from HTTP request: " + e.getLocalizedMessage()); throw e; } throw ie; } }