@Test public void testGetTokensForNamenodes() throws IOException { Credentials credentials = new Credentials(); TokenCache.obtainTokensForNamenodesInternal(credentials, new Path[] {p1, p2}, jConf); // this token is keyed by hostname:port key. String fs_addr = SecurityUtil.buildDTServiceName(p1.toUri(), NameNode.DEFAULT_PORT); Token<DelegationTokenIdentifier> nnt = TokenCache.getDelegationToken(credentials, fs_addr); System.out.println("dt for " + p1 + "(" + fs_addr + ")" + " = " + nnt); assertNotNull("Token for nn is null", nnt); // verify the size Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens(); assertEquals("number of tokens is not 1", 1, tns.size()); boolean found = false; for (Token<? extends TokenIdentifier> t : tns) { if (t.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) && t.getService().equals(new Text(fs_addr))) { found = true; } assertTrue("didn't find token for " + p1, found); } }
@Test public void testGetTokensForViewFS() throws IOException, URISyntaxException { Configuration conf = new Configuration(jConf); FileSystem dfs = dfsCluster.getFileSystem(); String serviceName = dfs.getCanonicalServiceName(); Path p1 = new Path("/mount1"); Path p2 = new Path("/mount2"); p1 = dfs.makeQualified(p1); p2 = dfs.makeQualified(p2); conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString()); conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString()); Credentials credentials = new Credentials(); Path lp1 = new Path("viewfs:///dir1"); Path lp2 = new Path("viewfs:///dir2"); Path[] paths = new Path[2]; paths[0] = lp1; paths[1] = lp2; TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf); Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens(); assertEquals("number of tokens is not 1", 1, tns.size()); boolean found = false; for (Token<? extends TokenIdentifier> tt : tns) { System.out.println("token=" + tt); if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) && tt.getService().equals(new Text(serviceName))) { found = true; } assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found); } }
/** * Obtain the tokens needed by the job and put them in the UGI * * @param conf */ protected void downloadTokensAndSetupUGI(Configuration conf) { try { this.currentUser = UserGroupInformation.getCurrentUser(); if (UserGroupInformation.isSecurityEnabled()) { // Read the file-system tokens from the localized tokens-file. Path jobSubmitDir = FileContext.getLocalFSFileContext() .makeQualified( new Path(new File(DragonJobConfig.JOB_SUBMIT_DIR).getAbsolutePath())); Path jobTokenFile = new Path(jobSubmitDir, DragonJobConfig.APPLICATION_TOKENS_FILE); fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf)); LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile=" + jobTokenFile); for (Token<? extends TokenIdentifier> tk : fsTokens.getAllTokens()) { if (LOG.isDebugEnabled()) { LOG.debug( "Token of kind " + tk.getKind() + "in current ugi in the AppMaster for service " + tk.getService()); } currentUser.addToken(tk); // For use by AppMaster itself. } } } catch (IOException e) { throw new YarnException(e); } }
private void addTimelineDelegationToken(ContainerLaunchContext clc) throws YarnException, IOException { Credentials credentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); ByteBuffer tokens = clc.getTokens(); if (tokens != null) { dibb.reset(tokens); credentials.readTokenStorageStream(dibb); tokens.rewind(); } // If the timeline delegation token is already in the CLC, no need to add // one more for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token : credentials.getAllTokens()) { if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) { return; } } org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier> timelineDelegationToken = getTimelineDelegationToken(); if (timelineDelegationToken == null) { return; } credentials.addToken(timelineService, timelineDelegationToken); if (LOG.isDebugEnabled()) { LOG.debug("Add timline delegation token into credentials: " + timelineDelegationToken); } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); clc.setTokens(tokens); }
/** * Load Hadoop Job Token into secret manager. * * @param conf Configuration * @throws IOException */ private void setupSecretManager(Configuration conf) throws IOException { secretManager = new JobTokenSecretManager(); String localJobTokenFile = System.getenv().get(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION); if (localJobTokenFile == null) { throw new IOException( "Could not find job credentials: environment " + "variable: " + UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION + " was not defined."); } JobConf jobConf = new JobConf(conf); // Find the JobTokenIdentifiers among all the tokens available in the // jobTokenFile and store them in the secretManager. Credentials credentials = TokenCache.loadTokens(localJobTokenFile, jobConf); Collection<Token<? extends TokenIdentifier>> collection = credentials.getAllTokens(); for (Token<? extends TokenIdentifier> token : collection) { TokenIdentifier tokenIdentifier = decodeIdentifier(token, JobTokenIdentifier.class); if (tokenIdentifier instanceof JobTokenIdentifier) { Token<JobTokenIdentifier> theToken = (Token<JobTokenIdentifier>) token; JobTokenIdentifier jobTokenIdentifier = (JobTokenIdentifier) tokenIdentifier; secretManager.addTokenForJob(jobTokenIdentifier.getJobId().toString(), theToken); } } if (LOG.isDebugEnabled()) { LOG.debug( "loaded JobToken credentials: " + credentials + " from " + "localJobTokenFile: " + localJobTokenFile); } }
@SuppressWarnings("unchecked") public static synchronized void registerDelegationTokensForRenewal( JobID jobId, Credentials ts, Configuration conf) { if (ts == null) return; // nothing to add Collection<Token<? extends TokenIdentifier>> tokens = ts.getAllTokens(); long now = System.currentTimeMillis(); for (Token<? extends TokenIdentifier> t : tokens) { // currently we only check for HDFS delegation tokens // later we can add more different types. if (!t.getKind().equals(kindHdfs)) { continue; } Token<DelegationTokenIdentifier> dt = (Token<DelegationTokenIdentifier>) t; // first renew happens immediately DelegationTokenToRenew dtr = new DelegationTokenToRenew(jobId, dt, conf, now); addTokenToList(dtr); setTimerForTokenRenewal(dtr, true); LOG.info( "registering token for renewal for service =" + dt.getService() + " and jobID = " + jobId); } }
private void handleAppSubmitEvent(DelegationTokenRenewerAppSubmitEvent evt) throws IOException, InterruptedException { ApplicationId applicationId = evt.getApplicationId(); Credentials ts = evt.getCredentials(); boolean shouldCancelAtEnd = evt.shouldCancelAtEnd(); if (ts == null) { return; // nothing to add } if (LOG.isDebugEnabled()) { LOG.debug("Registering tokens for renewal for:" + " appId = " + applicationId); } Collection<Token<?>> tokens = ts.getAllTokens(); long now = System.currentTimeMillis(); // find tokens for renewal, but don't add timers until we know // all renewable tokens are valid // At RM restart it is safe to assume that all the previously added tokens // are valid appTokens.put( applicationId, Collections.synchronizedSet(new HashSet<DelegationTokenToRenew>())); Set<DelegationTokenToRenew> tokenList = new HashSet<DelegationTokenToRenew>(); boolean hasHdfsToken = false; for (Token<?> token : tokens) { if (token.isManaged()) { tokenList.add( new DelegationTokenToRenew( applicationId, token, getConfig(), now, shouldCancelAtEnd, evt.getUser())); if (token.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) { LOG.info(applicationId + " found existing hdfs token " + token); hasHdfsToken = true; } } } if (!tokenList.isEmpty()) { // Renewing token and adding it to timer calls are separated purposefully // If user provides incorrect token then it should not be added for // renewal. for (DelegationTokenToRenew dtr : tokenList) { try { renewToken(dtr); } catch (IOException ioe) { throw new IOException("Failed to renew token: " + dtr.token, ioe); } } for (DelegationTokenToRenew dtr : tokenList) { appTokens.get(applicationId).add(dtr); setTimerForTokenRenewal(dtr); } } if (!hasHdfsToken) { requestNewHdfsDelegationToken(applicationId, evt.getUser(), shouldCancelAtEnd); } }
private Token<? extends TokenIdentifier> generateDelegationToken( final NameNode namenode, final UserGroupInformation ugi, final String renewer) throws IOException { final Credentials c = DelegationTokenSecretManager.createCredentials( namenode, ugi, renewer != null ? renewer : ugi.getShortUserName()); final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next(); t.setKind(WebHdfsFileSystem.TOKEN_KIND); SecurityUtil.setTokenService(t, namenode.getHttpAddress()); return t; }
private void printTokens(JobID jobId, Credentials credentials) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Printing tokens for job: " + jobId); for (Token<?> token : credentials.getAllTokens()) { if (token.getKind().toString().equals("HDFS_DELEGATION_TOKEN")) { LOG.debug( "Submitting with " + org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier .stringifyToken(token)); } } } }
private void populateTokens(Job job) { // Credentials in the job will not have delegation tokens // because security is disabled. Fetch delegation tokens // and populate the credential in the job. try { Credentials ts = job.getCredentials(); Path p1 = new Path("file1"); p1 = p1.getFileSystem(job.getConfiguration()).makeQualified(p1); Credentials cred = new Credentials(); TokenCache.obtainTokensForNamenodesInternal(cred, new Path[] {p1}, job.getConfiguration()); for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) { ts.addToken(new Text("Hdfs"), t); } } catch (IOException e) { Assert.fail("Exception " + e); } }
/** attempts to access tokenCache as from client */ @Override public void map(IntWritable key, IntWritable value, Context context) throws IOException, InterruptedException { // get token storage and a key Credentials ts = context.getCredentials(); byte[] key1 = ts.getSecretKey(new Text("alias1")); Collection<Token<? extends TokenIdentifier>> dts = ts.getAllTokens(); int dts_size = 0; if (dts != null) dts_size = dts.size(); if (dts_size != 2) { // one job token and one delegation token throw new RuntimeException("tokens are not available"); // fail the test } if (key1 == null || ts == null || ts.numberOfSecretKeys() != NUM_OF_KEYS) { throw new RuntimeException("secret keys are not available"); // fail the test } super.map(key, value, context); }
public static Credentials getDTfromRemote(String nnAddr, String renewer) throws IOException { DataInputStream dis = null; try { StringBuffer url = new StringBuffer(); if (renewer != null) { url.append(nnAddr) .append(GetDelegationTokenServlet.PATH_SPEC) .append("?") .append(GetDelegationTokenServlet.RENEWER) .append("=") .append(renewer); } else { url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC); } if (LOG.isDebugEnabled()) { LOG.debug("Retrieving token from: " + url); } URL remoteURL = new URL(url.toString()); SecurityUtil.fetchServiceTicket(remoteURL); URLConnection connection = remoteURL.openConnection(); InputStream in = connection.getInputStream(); Credentials ts = new Credentials(); dis = new DataInputStream(in); ts.readFields(dis); for (Token<?> token : ts.getAllTokens()) { token.setKind(HftpFileSystem.TOKEN_KIND); token.setService( new Text( SecurityUtil.buildDTServiceName( remoteURL.toURI(), DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT))); } return ts; } catch (Exception e) { throw new IOException("Unable to obtain remote token", e); } finally { if (dis != null) dis.close(); } }
private ByteBuffer getSecurityTokens() throws IOException { Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); Closer closer = Closer.create(); try { DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer()); credentials.writeTokenStorageToStream(dataOutputBuffer); // Remove the AM->RM token so that containers cannot access it Iterator<Token<?>> tokenIterator = credentials.getAllTokens().iterator(); while (tokenIterator.hasNext()) { Token<?> token = tokenIterator.next(); if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { tokenIterator.remove(); } } return ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength()); } catch (Throwable t) { throw closer.rethrow(t); } finally { closer.close(); } }
private static Collection<Token<?>> readTokens(Path file, Configuration conf) throws IOException { Credentials creds = Credentials.readTokenStorageFile(file, conf); return creds.getAllTokens(); }
@Test public void testAutomaticTimelineDelegationTokenLoading() throws Exception { Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); TimelineDelegationTokenIdentifier timelineDT = new TimelineDelegationTokenIdentifier(); final Token<TimelineDelegationTokenIdentifier> dToken = new Token<TimelineDelegationTokenIdentifier>( timelineDT.getBytes(), new byte[0], timelineDT.getKind(), new Text()); // crate a mock client YarnClientImpl client = spy( new YarnClientImpl() { @Override protected void serviceInit(Configuration conf) throws Exception { if (getConfig() .getBoolean( YarnConfiguration.TIMELINE_SERVICE_ENABLED, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) { timelineServiceEnabled = true; timelineClient = mock(TimelineClient.class); when(timelineClient.getDelegationToken(any(String.class))).thenReturn(dToken); timelineClient.init(getConfig()); timelineService = TimelineUtils.buildTimelineTokenService(getConfig()); } this.setConfig(conf); } @Override protected void serviceStart() throws Exception { rmClient = mock(ApplicationClientProtocol.class); } @Override protected void serviceStop() throws Exception {} @Override public ApplicationReport getApplicationReport(ApplicationId appId) { ApplicationReport report = mock(ApplicationReport.class); when(report.getYarnApplicationState()).thenReturn(YarnApplicationState.SUBMITTED); return report; } @Override public boolean isSecurityEnabled() { return true; } }); client.init(conf); client.start(); try { // when i == 0, timeline DT already exists, no need to get one more // when i == 1, timeline DT doesn't exist, need to get one more for (int i = 0; i < 2; ++i) { ApplicationSubmissionContext context = mock(ApplicationSubmissionContext.class); ApplicationId applicationId = ApplicationId.newInstance(0, i + 1); when(context.getApplicationId()).thenReturn(applicationId); DataOutputBuffer dob = new DataOutputBuffer(); Credentials credentials = new Credentials(); if (i == 0) { credentials.addToken(client.timelineService, dToken); } credentials.writeTokenStorageToStream(dob); ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); ContainerLaunchContext clc = ContainerLaunchContext.newInstance(null, null, null, null, tokens, null); when(context.getAMContainerSpec()).thenReturn(clc); client.submitApplication(context); if (i == 0) { // GetTimelineDelegationToken shouldn't be called verify(client, never()).getTimelineDelegationToken(); } // In either way, token should be there credentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); tokens = clc.getTokens(); if (tokens != null) { dibb.reset(tokens); credentials.readTokenStorageStream(dibb); tokens.rewind(); } Collection<Token<? extends TokenIdentifier>> dTokens = credentials.getAllTokens(); Assert.assertEquals(1, dTokens.size()); Assert.assertEquals(dToken, dTokens.iterator().next()); } } finally { client.stop(); } }
/** * Main run function for the application master * * @throws YarnException * @throws IOException */ @SuppressWarnings({"unchecked"}) public void run() throws YarnException, IOException { LOG.info("Starting ApplicationMaster"); try { publishApplicationAttemptEvent( timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START); } catch (Exception e) { LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e); } Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); // Now remove the AM->RM token so that containers cannot access it. Iterator<Token<?>> iter = credentials.getAllTokens().iterator(); LOG.info("Executing with tokens:"); while (iter.hasNext()) { Token<?> token = iter.next(); LOG.info(token); if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { iter.remove(); } } allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); // Create appSubmitterUgi and add original tokens to it String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name()); appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName); appSubmitterUgi.addCredentials(credentials); AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler(); amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener); amRMClient.init(conf); amRMClient.start(); containerListener = createNMCallbackHandler(); nmClientAsync = new NMClientAsyncImpl(containerListener); nmClientAsync.init(conf); nmClientAsync.start(); // Setup local RPC Server to accept status requests directly from clients // TODO need to setup a protocol for client to be able to communicate to // the RPC server // TODO use the rpc port info to register with the RM for the client to // send requests to this app master // Register self with ResourceManager // This will start heartbeating to the RM appMasterHostname = NetUtils.getHostname(); RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster( appMasterHostname, appMasterRpcPort, appMasterTrackingUrl); // Dump out information about cluster capability as seen by the // resource manager int maxMem = response.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); int maxVCores = response.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores); // A resource ask cannot exceed the max. if (containerMemory > maxMem) { LOG.info( "Container memory specified above max threshold of cluster." + " Using max value." + ", specified=" + containerMemory + ", max=" + maxMem); containerMemory = maxMem; } if (containerVirtualCores > maxVCores) { LOG.info( "Container virtual cores specified above max threshold of cluster." + " Using max value." + ", specified=" + containerVirtualCores + ", max=" + maxVCores); containerVirtualCores = maxVCores; } List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts(); LOG.info( "Received " + previousAMRunningContainers.size() + " previous AM's running containers on AM registration."); numAllocatedContainers.addAndGet(previousAMRunningContainers.size()); int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size(); // Setup ask for containers from RM // Send request for containers to RM // Until we get our fully allocated quota, we keep on polling RM for // containers // Keep looping until all the containers are launched and shell script // executed on them ( regardless of success/failure). for (int i = 0; i < numTotalContainersToRequest; ++i) { ContainerRequest containerAsk = setupContainerAskForRM(); amRMClient.addContainerRequest(containerAsk); } numRequestedContainers.set(numTotalContainersToRequest); try { publishApplicationAttemptEvent( timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_END); } catch (Exception e) { LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e); } }
@Test public void testGetTokensForHftpFS() throws IOException, URISyntaxException { HftpFileSystem hfs = mock(HftpFileSystem.class); DelegationTokenSecretManager dtSecretManager = NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()); String renewer = "renewer"; jConf.set(JTConfig.JT_USER_NAME, renewer); DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text("user"), new Text(renewer), null); final Token<DelegationTokenIdentifier> t = new Token<DelegationTokenIdentifier>(dtId, dtSecretManager); final URI uri = new URI("hftp://host:2222/file1"); final String fs_addr = SecurityUtil.buildDTServiceName(uri, NameNode.DEFAULT_PORT); t.setService(new Text(fs_addr)); // when(hfs.getUri()).thenReturn(uri); Mockito.doAnswer( new Answer<URI>() { @Override public URI answer(InvocationOnMock invocation) throws Throwable { return uri; } }) .when(hfs) .getUri(); // when(hfs.getDelegationToken()).thenReturn((Token<? extends TokenIdentifier>) t); Mockito.doAnswer( new Answer<Token<DelegationTokenIdentifier>>() { @Override public Token<DelegationTokenIdentifier> answer(InvocationOnMock invocation) throws Throwable { return t; } }) .when(hfs) .getDelegationToken(renewer); // when(hfs.getDelegationTokens()).thenReturn((Token<? extends TokenIdentifier>) t); Mockito.doAnswer( new Answer<List<Token<DelegationTokenIdentifier>>>() { @Override public List<Token<DelegationTokenIdentifier>> answer(InvocationOnMock invocation) throws Throwable { return Collections.singletonList(t); } }) .when(hfs) .getDelegationTokens(renewer); // when(hfs.getCanonicalServiceName).thenReturn(fs_addr); Mockito.doAnswer( new Answer<String>() { @Override public String answer(InvocationOnMock invocation) throws Throwable { return fs_addr; } }) .when(hfs) .getCanonicalServiceName(); Credentials credentials = new Credentials(); Path p = new Path(uri.toString()); System.out.println("Path for hftp=" + p + "; fs_addr=" + fs_addr + "; rn=" + renewer); TokenCache.obtainTokensForNamenodesInternal(hfs, credentials, jConf); Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens(); assertEquals("number of tokens is not 1", 1, tns.size()); boolean found = false; for (Token<? extends TokenIdentifier> tt : tns) { System.out.println("token=" + tt); if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) && tt.getService().equals(new Text(fs_addr))) { found = true; assertEquals("different token", tt, t); } assertTrue("didn't find token for " + p, found); } }