@Test
  public void testGetTokensForViewFS() throws IOException, URISyntaxException {
    Configuration conf = new Configuration(jConf);
    FileSystem dfs = dfsCluster.getFileSystem();
    String serviceName = dfs.getCanonicalServiceName();

    Path p1 = new Path("/mount1");
    Path p2 = new Path("/mount2");
    p1 = dfs.makeQualified(p1);
    p2 = dfs.makeQualified(p2);

    conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString());
    conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString());
    Credentials credentials = new Credentials();
    Path lp1 = new Path("viewfs:///dir1");
    Path lp2 = new Path("viewfs:///dir2");
    Path[] paths = new Path[2];
    paths[0] = lp1;
    paths[1] = lp2;
    TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);

    Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens();
    assertEquals("number of tokens is not 1", 1, tns.size());

    boolean found = false;
    for (Token<? extends TokenIdentifier> tt : tns) {
      System.out.println("token=" + tt);
      if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
          && tt.getService().equals(new Text(serviceName))) {
        found = true;
      }
      assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found);
    }
  }
  /**
   * Basic idea of the test: 1. register a token for 2 seconds with no cancel at the end 2. cancel
   * it immediately 3. Sleep and check that the 2 seconds renew didn't happen (totally 5 renewals)
   * 4. check cancellation
   *
   * @throws IOException
   * @throws URISyntaxException
   */
  @Test(timeout = 60000)
  public void testDTRenewalWithNoCancel() throws Exception {
    MyFS dfs = (MyFS) FileSystem.get(conf);
    LOG.info("dfs=" + (Object) dfs.hashCode() + ";conf=" + conf.hashCode());

    Credentials ts = new Credentials();
    MyToken token1 = dfs.getDelegationToken(new Text("user1"));

    // to cause this one to be set for renew in 2 secs
    Renewer.tokenToRenewIn2Sec = token1;
    LOG.info("token=" + token1 + " should be renewed for 2 secs");

    String nn1 = DelegationTokenRenewer.SCHEME + "://host1:0";
    ts.addToken(new Text(nn1), token1);

    ApplicationId applicationId_1 = BuilderUtils.newApplicationId(0, 1);
    delegationTokenRenewer.addApplication(applicationId_1, ts, false, false);
    waitForEventsToGetProcessed(delegationTokenRenewer);
    delegationTokenRenewer.applicationFinished(applicationId_1);
    waitForEventsToGetProcessed(delegationTokenRenewer);
    int numberOfExpectedRenewals = Renewer.counter; // number of renewals so far
    try {
      Thread.sleep(6 * 1000); // sleep 6 seconds, so it has time to renew
    } catch (InterruptedException e) {
    }
    LOG.info("Counter = " + Renewer.counter + ";t=" + Renewer.lastRenewed);

    // counter and the token should still be the old ones
    assertEquals(
        "renew wasn't called as many times as expected", numberOfExpectedRenewals, Renewer.counter);

    // also renewing of the canceled token should not fail, because it has not
    // been canceled
    token1.renew(conf);
  }
 private void addTimelineDelegationToken(ContainerLaunchContext clc)
     throws YarnException, IOException {
   Credentials credentials = new Credentials();
   DataInputByteBuffer dibb = new DataInputByteBuffer();
   ByteBuffer tokens = clc.getTokens();
   if (tokens != null) {
     dibb.reset(tokens);
     credentials.readTokenStorageStream(dibb);
     tokens.rewind();
   }
   // If the timeline delegation token is already in the CLC, no need to add
   // one more
   for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token :
       credentials.getAllTokens()) {
     if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) {
       return;
     }
   }
   org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier>
       timelineDelegationToken = getTimelineDelegationToken();
   if (timelineDelegationToken == null) {
     return;
   }
   credentials.addToken(timelineService, timelineDelegationToken);
   if (LOG.isDebugEnabled()) {
     LOG.debug("Add timline delegation token into credentials: " + timelineDelegationToken);
   }
   DataOutputBuffer dob = new DataOutputBuffer();
   credentials.writeTokenStorageToStream(dob);
   tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
   clc.setTokens(tokens);
 }
  @Test
  public void testGetTokensForNamenodes() throws IOException {

    Credentials credentials = new Credentials();
    TokenCache.obtainTokensForNamenodesInternal(credentials, new Path[] {p1, p2}, jConf);

    // this token is keyed by hostname:port key.
    String fs_addr = SecurityUtil.buildDTServiceName(p1.toUri(), NameNode.DEFAULT_PORT);
    Token<DelegationTokenIdentifier> nnt = TokenCache.getDelegationToken(credentials, fs_addr);
    System.out.println("dt for " + p1 + "(" + fs_addr + ")" + " = " + nnt);
    assertNotNull("Token for nn is null", nnt);

    // verify the size
    Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens();
    assertEquals("number of tokens is not 1", 1, tns.size());

    boolean found = false;
    for (Token<? extends TokenIdentifier> t : tns) {
      if (t.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
          && t.getService().equals(new Text(fs_addr))) {
        found = true;
      }
      assertTrue("didn't find token for " + p1, found);
    }
  }
  /**
   * Load Hadoop Job Token into secret manager.
   *
   * @param conf Configuration
   * @throws IOException
   */
  private void setupSecretManager(Configuration conf) throws IOException {
    secretManager = new JobTokenSecretManager();
    String localJobTokenFile = System.getenv().get(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
    if (localJobTokenFile == null) {
      throw new IOException(
          "Could not find job credentials: environment "
              + "variable: "
              + UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION
              + " was not defined.");
    }
    JobConf jobConf = new JobConf(conf);

    // Find the JobTokenIdentifiers among all the tokens available in the
    // jobTokenFile and store them in the secretManager.
    Credentials credentials = TokenCache.loadTokens(localJobTokenFile, jobConf);
    Collection<Token<? extends TokenIdentifier>> collection = credentials.getAllTokens();
    for (Token<? extends TokenIdentifier> token : collection) {
      TokenIdentifier tokenIdentifier = decodeIdentifier(token, JobTokenIdentifier.class);
      if (tokenIdentifier instanceof JobTokenIdentifier) {
        Token<JobTokenIdentifier> theToken = (Token<JobTokenIdentifier>) token;
        JobTokenIdentifier jobTokenIdentifier = (JobTokenIdentifier) tokenIdentifier;
        secretManager.addTokenForJob(jobTokenIdentifier.getJobId().toString(), theToken);
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug(
          "loaded JobToken credentials: "
              + credentials
              + " from "
              + "localJobTokenFile: "
              + localJobTokenFile);
    }
  }
Exemple #6
0
 /**
  * Obtain the tokens needed by the job and put them in the UGI
  *
  * @param conf
  */
 protected void downloadTokensAndSetupUGI(Configuration conf) {
   try {
     this.currentUser = UserGroupInformation.getCurrentUser();
     if (UserGroupInformation.isSecurityEnabled()) {
       // Read the file-system tokens from the localized tokens-file.
       Path jobSubmitDir =
           FileContext.getLocalFSFileContext()
               .makeQualified(
                   new Path(new File(DragonJobConfig.JOB_SUBMIT_DIR).getAbsolutePath()));
       Path jobTokenFile = new Path(jobSubmitDir, DragonJobConfig.APPLICATION_TOKENS_FILE);
       fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
       LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile=" + jobTokenFile);
       for (Token<? extends TokenIdentifier> tk : fsTokens.getAllTokens()) {
         if (LOG.isDebugEnabled()) {
           LOG.debug(
               "Token of kind "
                   + tk.getKind()
                   + "in current ugi in the AppMaster for service "
                   + tk.getService());
         }
         currentUser.addToken(tk); // For use by AppMaster itself.
       }
     }
   } catch (IOException e) {
     throw new YarnException(e);
   }
 }
Exemple #7
0
 @Override
 public synchronized KeyVersion createKey(String name, byte[] material, Options options)
     throws IOException {
   Text nameT = new Text(name);
   if (credentials.getSecretKey(nameT) != null) {
     throw new IOException("Key " + name + " already exists in " + this);
   }
   if (options.getBitLength() != 8 * material.length) {
     throw new IOException(
         "Wrong key length. Required "
             + options.getBitLength()
             + ", but got "
             + (8 * material.length));
   }
   Metadata meta =
       new Metadata(
           options.getCipher(),
           options.getBitLength(),
           options.getDescription(),
           options.getAttributes(),
           new Date(),
           1);
   cache.put(name, meta);
   String versionName = buildVersionName(name, 0);
   credentials.addSecretKey(nameT, meta.serialize());
   credentials.addSecretKey(new Text(versionName), material);
   return new KeyVersion(name, versionName, material);
 }
  private void requestNewHdfsDelegationToken(
      ApplicationId applicationId, String user, boolean shouldCancelAtEnd)
      throws IOException, InterruptedException {
    // Get new hdfs tokens for this user
    Credentials credentials = new Credentials();
    Token<?>[] newTokens = obtainSystemTokensForUser(user, credentials);

    // Add new tokens to the toRenew list.
    LOG.info(
        "Received new tokens for " + applicationId + ". Received " + newTokens.length + " tokens.");
    if (newTokens.length > 0) {
      for (Token<?> token : newTokens) {
        if (token.isManaged()) {
          DelegationTokenToRenew tokenToRenew =
              new DelegationTokenToRenew(
                  applicationId, token, getConfig(), Time.now(), shouldCancelAtEnd, user);
          // renew the token to get the next expiration date.
          renewToken(tokenToRenew);
          setTimerForTokenRenewal(tokenToRenew);
          appTokens.get(applicationId).add(tokenToRenew);
          LOG.info("Received new token " + token);
        }
      }
    }
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    ByteBuffer byteBuffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
    rmContext.getSystemCredentialsForApps().put(applicationId, byteBuffer);
  }
Exemple #9
0
  public static void setTokensFor(
      ContainerLaunchContext amContainer, List<Path> paths, Configuration conf) throws IOException {
    Credentials credentials = new Credentials();
    // for HDFS
    TokenCache.obtainTokensForNamenodes(credentials, paths.toArray(new Path[0]), conf);
    // for HBase
    obtainTokenForHBase(credentials, conf);
    // for user
    UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();

    Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
    for (Token<? extends TokenIdentifier> token : usrTok) {
      final Text id = new Text(token.getIdentifier());
      LOG.info("Adding user token " + id + " with " + token);
      credentials.addToken(id, token);
    }
    try (DataOutputBuffer dob = new DataOutputBuffer()) {
      credentials.writeTokenStorageToStream(dob);

      if (LOG.isDebugEnabled()) {
        LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength());
      }

      ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
      amContainer.setTokens(securityTokens);
    }
  }
  @SuppressWarnings("unchecked")
  private void readTokensFromFiles(Configuration conf, Credentials credentials) throws IOException {
    // add tokens and secrets coming from a token storage file
    String binaryTokenFilename = conf.get("mapreduce.job.credentials.binary");
    if (binaryTokenFilename != null) {
      Credentials binary =
          Credentials.readTokenStorageFile(new Path("file:///" + binaryTokenFilename), conf);
      credentials.addAll(binary);
    }
    // add secret keys coming from a json file
    String tokensFileName = conf.get("mapreduce.job.credentials.json");
    if (tokensFileName != null) {
      LOG.info("loading user's secret keys from " + tokensFileName);
      String localFileName = new Path(tokensFileName).toUri().getPath();

      boolean json_error = false;
      try {
        // read JSON
        ObjectMapper mapper = new ObjectMapper();
        Map<String, String> nm = mapper.readValue(new File(localFileName), Map.class);

        for (Map.Entry<String, String> ent : nm.entrySet()) {
          credentials.addSecretKey(new Text(ent.getKey()), ent.getValue().getBytes());
        }
      } catch (JsonMappingException e) {
        json_error = true;
      } catch (JsonParseException e) {
        json_error = true;
      }
      if (json_error) LOG.warn("couldn't parse Token Cache JSON file with user secret keys");
    }
  }
  @Test(timeout = 60000)
  public void testAppRejectionWithCancelledDelegationToken() throws Exception {
    MyFS dfs = (MyFS) FileSystem.get(conf);
    LOG.info("dfs=" + (Object) dfs.hashCode() + ";conf=" + conf.hashCode());

    MyToken token = dfs.getDelegationToken(new Text("user1"));
    token.cancelToken();

    Credentials ts = new Credentials();
    ts.addToken(token.getKind(), token);

    // register the tokens for renewal
    ApplicationId appId = BuilderUtils.newApplicationId(0, 0);
    delegationTokenRenewer.addApplication(appId, ts, true, false);
    int waitCnt = 20;
    while (waitCnt-- > 0) {
      if (!eventQueue.isEmpty()) {
        Event evt = eventQueue.take();
        if (evt.getType() == RMAppEventType.APP_REJECTED) {
          Assert.assertTrue(((RMAppEvent) evt).getApplicationId().equals(appId));
          return;
        }
      } else {
        Thread.sleep(500);
      }
    }
    fail("App submission with a cancelled token should have failed");
  }
  /**
   * Basic idea of the test: 0. Setup token KEEP_ALIVE 1. create tokens. 2. register them for
   * renewal - to be cancelled on app complete 3. Complete app. 4. Verify token is alive within the
   * KEEP_ALIVE time 5. Verify token has been cancelled after the KEEP_ALIVE_TIME
   *
   * @throws IOException
   * @throws URISyntaxException
   */
  @Test(timeout = 60000)
  public void testDTKeepAlive1() throws Exception {
    Configuration lconf = new Configuration(conf);
    lconf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
    // Keep tokens alive for 6 seconds.
    lconf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 6000l);
    // Try removing tokens every second.
    lconf.setLong(YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS, 1000l);
    DelegationTokenRenewer localDtr = createNewDelegationTokenRenewer(lconf, counter);
    localDtr.init(lconf);
    RMContext mockContext = mock(RMContext.class);
    ClientRMService mockClientRMService = mock(ClientRMService.class);
    when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
    when(mockContext.getDelegationTokenRenewer()).thenReturn(localDtr);
    when(mockContext.getDispatcher()).thenReturn(dispatcher);
    InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234);
    when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
    localDtr.setRMContext(mockContext);
    localDtr.start();

    MyFS dfs = (MyFS) FileSystem.get(lconf);
    LOG.info("dfs=" + (Object) dfs.hashCode() + ";conf=" + lconf.hashCode());

    Credentials ts = new Credentials();
    // get the delegation tokens
    MyToken token1 = dfs.getDelegationToken(new Text("user1"));

    String nn1 = DelegationTokenRenewer.SCHEME + "://host1:0";
    ts.addToken(new Text(nn1), token1);

    // register the tokens for renewal
    ApplicationId applicationId_0 = BuilderUtils.newApplicationId(0, 0);
    localDtr.addApplication(applicationId_0, ts, true, false);
    waitForEventsToGetProcessed(localDtr);
    if (!eventQueue.isEmpty()) {
      Event evt = eventQueue.take();
      if (evt instanceof RMAppEvent) {
        Assert.assertEquals(((RMAppEvent) evt).getType(), RMAppEventType.START);
      } else {
        fail("RMAppEvent.START was expected!!");
      }
    }

    localDtr.applicationFinished(applicationId_0);
    waitForEventsToGetProcessed(localDtr);

    // Token should still be around. Renewal should not fail.
    token1.renew(lconf);

    // Allow the keepalive time to run out
    Thread.sleep(10000l);

    // The token should have been cancelled at this point. Renewal will fail.
    try {
      token1.renew(lconf);
      fail("Renewal of cancelled token should have failed");
    } catch (InvalidToken ite) {
    }
  }
  @Test(timeout = 20000)
  public void testConcurrentAddApplication()
      throws IOException, InterruptedException, BrokenBarrierException {
    final CyclicBarrier startBarrier = new CyclicBarrier(2);
    final CyclicBarrier endBarrier = new CyclicBarrier(2);

    // this token uses barriers to block during renew
    final Credentials creds1 = new Credentials();
    final Token<?> token1 = mock(Token.class);
    creds1.addToken(new Text("token"), token1);
    doReturn(true).when(token1).isManaged();
    doAnswer(
            new Answer<Long>() {
              public Long answer(InvocationOnMock invocation)
                  throws InterruptedException, BrokenBarrierException {
                startBarrier.await();
                endBarrier.await();
                return Long.MAX_VALUE;
              }
            })
        .when(token1)
        .renew(any(Configuration.class));

    // this dummy token fakes renewing
    final Credentials creds2 = new Credentials();
    final Token<?> token2 = mock(Token.class);
    creds2.addToken(new Text("token"), token2);
    doReturn(true).when(token2).isManaged();
    doReturn(Long.MAX_VALUE).when(token2).renew(any(Configuration.class));

    // fire up the renewer
    final DelegationTokenRenewer dtr = createNewDelegationTokenRenewer(conf, counter);
    dtr.init(conf);
    RMContext mockContext = mock(RMContext.class);
    ClientRMService mockClientRMService = mock(ClientRMService.class);
    when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
    InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234);
    when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
    dtr.setRMContext(mockContext);
    when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr);
    dtr.start();
    // submit a job that blocks during renewal
    Thread submitThread =
        new Thread() {
          @Override
          public void run() {
            dtr.addApplication(mock(ApplicationId.class), creds1, false, false);
          }
        };
    submitThread.start();

    // wait till 1st submit blocks, then submit another
    startBarrier.await();
    dtr.addApplication(mock(ApplicationId.class), creds2, false, false);
    // signal 1st to complete
    endBarrier.await();
    submitThread.join();
  }
  private void handleAppSubmitEvent(DelegationTokenRenewerAppSubmitEvent evt)
      throws IOException, InterruptedException {
    ApplicationId applicationId = evt.getApplicationId();
    Credentials ts = evt.getCredentials();
    boolean shouldCancelAtEnd = evt.shouldCancelAtEnd();
    if (ts == null) {
      return; // nothing to add
    }

    if (LOG.isDebugEnabled()) {
      LOG.debug("Registering tokens for renewal for:" + " appId = " + applicationId);
    }

    Collection<Token<?>> tokens = ts.getAllTokens();
    long now = System.currentTimeMillis();

    // find tokens for renewal, but don't add timers until we know
    // all renewable tokens are valid
    // At RM restart it is safe to assume that all the previously added tokens
    // are valid
    appTokens.put(
        applicationId, Collections.synchronizedSet(new HashSet<DelegationTokenToRenew>()));
    Set<DelegationTokenToRenew> tokenList = new HashSet<DelegationTokenToRenew>();
    boolean hasHdfsToken = false;
    for (Token<?> token : tokens) {
      if (token.isManaged()) {
        tokenList.add(
            new DelegationTokenToRenew(
                applicationId, token, getConfig(), now, shouldCancelAtEnd, evt.getUser()));
        if (token.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
          LOG.info(applicationId + " found existing hdfs token " + token);
          hasHdfsToken = true;
        }
      }
    }

    if (!tokenList.isEmpty()) {
      // Renewing token and adding it to timer calls are separated purposefully
      // If user provides incorrect token then it should not be added for
      // renewal.
      for (DelegationTokenToRenew dtr : tokenList) {
        try {
          renewToken(dtr);
        } catch (IOException ioe) {
          throw new IOException("Failed to renew token: " + dtr.token, ioe);
        }
      }
      for (DelegationTokenToRenew dtr : tokenList) {
        appTokens.get(applicationId).add(dtr);
        setTimerForTokenRenewal(dtr);
      }
    }

    if (!hasHdfsToken) {
      requestNewHdfsDelegationToken(applicationId, evt.getUser(), shouldCancelAtEnd);
    }
  }
 private Token<? extends TokenIdentifier> generateDelegationToken(
     final NameNode namenode, final UserGroupInformation ugi, final String renewer)
     throws IOException {
   final Credentials c =
       DelegationTokenSecretManager.createCredentials(
           namenode, ugi, renewer != null ? renewer : ugi.getShortUserName());
   final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
   t.setKind(WebHdfsFileSystem.TOKEN_KIND);
   SecurityUtil.setTokenService(t, namenode.getHttpAddress());
   return t;
 }
 private Credentials parseCredentials(ApplicationSubmissionContext application)
     throws IOException {
   Credentials credentials = new Credentials();
   DataInputByteBuffer dibb = new DataInputByteBuffer();
   ByteBuffer tokens = application.getAMContainerSpec().getTokens();
   if (tokens != null) {
     dibb.reset(tokens);
     credentials.readTokenStorageStream(dibb);
     tokens.rewind();
   }
   return credentials;
 }
Exemple #17
0
 @Override
 public synchronized void deleteKey(String name) throws IOException {
   Metadata meta = getMetadata(name);
   if (meta == null) {
     throw new IOException("Key " + name + " does not exist in " + this);
   }
   for (int v = 0; v < meta.getVersions(); ++v) {
     credentials.removeSecretKey(new Text(buildVersionName(name, v)));
   }
   credentials.removeSecretKey(new Text(name));
   cache.remove(name);
 }
  @SuppressWarnings("unchecked")
  public static synchronized void registerDelegationTokensForRenewal(
      JobID jobId, Credentials ts, Configuration conf) {
    if (ts == null) return; // nothing to add

    Collection<Token<? extends TokenIdentifier>> tokens = ts.getAllTokens();
    long now = System.currentTimeMillis();

    for (Token<? extends TokenIdentifier> t : tokens) {
      // currently we only check for HDFS delegation tokens
      // later we can add more different types.
      if (!t.getKind().equals(kindHdfs)) {
        continue;
      }
      Token<DelegationTokenIdentifier> dt = (Token<DelegationTokenIdentifier>) t;

      // first renew happens immediately
      DelegationTokenToRenew dtr = new DelegationTokenToRenew(jobId, dt, conf, now);

      addTokenToList(dtr);

      setTimerForTokenRenewal(dtr, true);
      LOG.info(
          "registering token for renewal for service ="
              + dt.getService()
              + " and jobID = "
              + jobId);
    }
  }
  /**
   * Creates a ByteBuffer with serialized {@link Credentials}.
   *
   * @param creds The credentials.
   * @return The ByteBuffer with the credentials.
   * @throws IOException
   */
  public static ByteBuffer createTokenBuffer(Credentials creds) throws IOException {
    DataOutputBuffer dob = new DataOutputBuffer();

    creds.writeTokenStorageToStream(dob);

    return ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  }
  private static void loadSecureStore() throws IOException {
    if (!UserGroupInformation.isSecurityEnabled()) {
      return;
    }

    File file = new File(Constants.Files.CREDENTIALS);
    if (file.exists()) {
      Credentials credentials = new Credentials();
      try (DataInputStream input = new DataInputStream(new FileInputStream(file))) {
        credentials.readTokenStorageStream(input);
      }

      UserGroupInformation.getCurrentUser().addCredentials(credentials);
      LOG.info("Secure store updated from {}", file);
    }
  }
Exemple #21
0
  private void addRMDelegationToken(final String renewer, final Credentials credentials)
      throws IOException, YarnException {
    // Get the ResourceManager delegation rmToken
    final org.apache.hadoop.yarn.api.records.Token rmDelegationToken =
        yarnClient.getRMDelegationToken(new Text(renewer));

    Token<RMDelegationTokenIdentifier> token;
    // TODO: Use the utility method getRMDelegationTokenService in ClientRMProxy to remove the
    // separate handling of
    // TODO: HA and non-HA cases when hadoop dependency is changed to hadoop 2.4 or above
    if (conf.getBoolean(RM_HA_ENABLED, DEFAULT_RM_HA_ENABLED)) {
      LOG.info("Yarn Resource Manager HA is enabled");
      token = getRMHAToken(rmDelegationToken);
    } else {
      LOG.info("Yarn Resource Manager HA is not enabled");
      InetSocketAddress rmAddress =
          conf.getSocketAddr(
              YarnConfiguration.RM_ADDRESS,
              YarnConfiguration.DEFAULT_RM_ADDRESS,
              YarnConfiguration.DEFAULT_RM_PORT);

      token = ConverterUtils.convertFromYarn(rmDelegationToken, rmAddress);
    }

    LOG.info("RM dt {}", token);

    credentials.addToken(token.getService(), token);
  }
Exemple #22
0
 @Override
 public synchronized KeyVersion getKeyVersion(String versionName) throws IOException {
   byte[] bytes = credentials.getSecretKey(new Text(versionName));
   if (bytes == null) {
     return null;
   }
   return new KeyVersion(getBaseName(versionName), versionName, bytes);
 }
 private void populateTokens(Job job) {
   // Credentials in the job will not have delegation tokens
   // because security is disabled. Fetch delegation tokens
   // and populate the credential in the job.
   try {
     Credentials ts = job.getCredentials();
     Path p1 = new Path("file1");
     p1 = p1.getFileSystem(job.getConfiguration()).makeQualified(p1);
     Credentials cred = new Credentials();
     TokenCache.obtainTokensForNamenodesInternal(cred, new Path[] {p1}, job.getConfiguration());
     for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {
       ts.addToken(new Text("Hdfs"), t);
     }
   } catch (IOException e) {
     Assert.fail("Exception " + e);
   }
 }
Exemple #24
0
 @Override
 public synchronized KeyVersion rollNewVersion(String name, byte[] material) throws IOException {
   Metadata meta = getMetadata(name);
   if (meta == null) {
     throw new IOException("Key " + name + " not found");
   }
   if (meta.getBitLength() != 8 * material.length) {
     throw new IOException(
         "Wrong key length. Required "
             + meta.getBitLength()
             + ", but got "
             + (8 * material.length));
   }
   int nextVersion = meta.addVersion();
   credentials.addSecretKey(new Text(name), meta.serialize());
   String versionName = buildVersionName(name, nextVersion);
   credentials.addSecretKey(new Text(versionName), material);
   return new KeyVersion(name, versionName, material);
 }
    /** attempts to access tokenCache as from client */
    @Override
    public void map(IntWritable key, IntWritable value, Context context)
        throws IOException, InterruptedException {
      // get token storage and a key
      Credentials ts = context.getCredentials();
      byte[] key1 = ts.getSecretKey(new Text("alias1"));
      Collection<Token<? extends TokenIdentifier>> dts = ts.getAllTokens();
      int dts_size = 0;
      if (dts != null) dts_size = dts.size();

      if (dts_size != 2) { // one job token and one delegation token
        throw new RuntimeException("tokens are not available"); // fail the test
      }

      if (key1 == null || ts == null || ts.numberOfSecretKeys() != NUM_OF_KEYS) {
        throw new RuntimeException("secret keys are not available"); // fail the test
      }
      super.map(key, value, context);
    }
  public static Credentials getDTfromRemote(String nnAddr, String renewer) throws IOException {
    DataInputStream dis = null;

    try {
      StringBuffer url = new StringBuffer();
      if (renewer != null) {
        url.append(nnAddr)
            .append(GetDelegationTokenServlet.PATH_SPEC)
            .append("?")
            .append(GetDelegationTokenServlet.RENEWER)
            .append("=")
            .append(renewer);
      } else {
        url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
      }

      if (LOG.isDebugEnabled()) {
        LOG.debug("Retrieving token from: " + url);
      }

      URL remoteURL = new URL(url.toString());
      SecurityUtil.fetchServiceTicket(remoteURL);
      URLConnection connection = remoteURL.openConnection();

      InputStream in = connection.getInputStream();
      Credentials ts = new Credentials();
      dis = new DataInputStream(in);
      ts.readFields(dis);
      for (Token<?> token : ts.getAllTokens()) {
        token.setKind(HftpFileSystem.TOKEN_KIND);
        token.setService(
            new Text(
                SecurityUtil.buildDTServiceName(
                    remoteURL.toURI(), DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT)));
      }
      return ts;
    } catch (Exception e) {
      throw new IOException("Unable to obtain remote token", e);
    } finally {
      if (dis != null) dis.close();
    }
  }
Exemple #27
0
 @Override
 public synchronized List<String> getKeys() throws IOException {
   List<String> list = new ArrayList<String>();
   List<Text> keys = credentials.getAllSecretKeys();
   for (Text key : keys) {
     if (key.find("@") == -1) {
       list.add(key.toString());
     }
   }
   return list;
 }
 private void printTokens(JobID jobId, Credentials credentials) throws IOException {
   if (LOG.isDebugEnabled()) {
     LOG.debug("Printing tokens for job: " + jobId);
     for (Token<?> token : credentials.getAllTokens()) {
       if (token.getKind().toString().equals("HDFS_DELEGATION_TOKEN")) {
         LOG.debug(
             "Submitting with "
                 + org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
                     .stringifyToken(token));
       }
     }
   }
 }
Exemple #29
0
 @Override
 public synchronized Metadata getMetadata(String name) throws IOException {
   if (cache.containsKey(name)) {
     return cache.get(name);
   }
   byte[] serialized = credentials.getSecretKey(new Text(name));
   if (serialized == null) {
     return null;
   }
   Metadata result = new Metadata(serialized);
   cache.put(name, result);
   return result;
 }
Exemple #30
0
  private ByteBuffer getSecurityTokens() throws IOException {
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    Closer closer = Closer.create();
    try {
      DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
      credentials.writeTokenStorageToStream(dataOutputBuffer);

      // Remove the AM->RM token so that containers cannot access it
      Iterator<Token<?>> tokenIterator = credentials.getAllTokens().iterator();
      while (tokenIterator.hasNext()) {
        Token<?> token = tokenIterator.next();
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
          tokenIterator.remove();
        }
      }

      return ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
    } catch (Throwable t) {
      throw closer.rethrow(t);
    } finally {
      closer.close();
    }
  }