private static DistributedFileSystem getDFSForToken(
      Token<DelegationTokenIdentifier> token, final Configuration conf) throws Exception {
    DistributedFileSystem dfs = null;
    try {
      // TODO: The service is usually an IPaddress:port. We convert
      // it to dns name and then obtain the filesystem just so that
      // we reuse the existing filesystem handle (that the jobtracker
      // might have for this namenode; the namenode is usually
      // specified as the dns name in the jobtracker).
      // THIS IS A WORKAROUND FOR NOW. NEED TO SOLVE THIS PROBLEM
      // IN A BETTER WAY.
      String[] ipaddr = token.getService().toString().split(":");
      InetAddress iaddr = InetAddress.getByName(ipaddr[0]);
      String dnsName = iaddr.getCanonicalHostName();
      final URI uri = new URI(SCHEME + "://" + dnsName + ":" + ipaddr[1]);
      dfs =
          (DistributedFileSystem)
              UserGroupInformation.getLoginUser()
                  .doAs(
                      new PrivilegedExceptionAction<DistributedFileSystem>() {
                        public DistributedFileSystem run() throws IOException {
                          return (DistributedFileSystem) FileSystem.get(uri, conf);
                        }
                      });

    } catch (Exception e) {
      LOG.warn("Failed to create a dfs to renew for:" + token.getService(), e);
      throw e;
    }
    return dfs;
  }
Exemplo n.º 2
0
  /** @return a string representation of the token */
  public static String stringifyToken(final Token<?> token) throws IOException {
    DelegationTokenIdentifier ident = new DelegationTokenIdentifier();
    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
    DataInputStream in = new DataInputStream(buf);
    ident.readFields(in);

    if (token.getService().getLength() > 0) {
      return ident + " on " + token.getService();
    } else {
      return ident.toString();
    }
  }
  /**
   * Sets the connector information needed to communicate with Accumulo in this job.
   *
   * <p><b>WARNING:</b> Some tokens, when serialized, divulge sensitive information in the
   * configuration as a means to pass the token to MapReduce tasks. This information is BASE64
   * encoded to provide a charset safe conversion to a string, but this conversion is not intended
   * to be secure. {@link PasswordToken} is one example that is insecure in this way; however {@link
   * DelegationToken}s, acquired using {@link
   * SecurityOperations#getDelegationToken(DelegationTokenConfig)}, is not subject to this concern.
   *
   * @param job the Hadoop job instance to be configured
   * @param principal a valid Accumulo user name (user must have Table.CREATE permission)
   * @param token the user's password
   * @since 1.5.0
   */
  public static void setConnectorInfo(JobConf job, String principal, AuthenticationToken token)
      throws AccumuloSecurityException {
    if (token instanceof KerberosToken) {
      log.info("Received KerberosToken, attempting to fetch DelegationToken");
      try {
        Instance instance = getInstance(job);
        Connector conn = instance.getConnector(principal, token);
        token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
      } catch (Exception e) {
        log.warn(
            "Failed to automatically obtain DelegationToken, Mappers/Reducers will likely fail to communicate with Accumulo",
            e);
      }
    }
    // DelegationTokens can be passed securely from user to task without serializing insecurely in
    // the configuration
    if (token instanceof DelegationTokenImpl) {
      DelegationTokenImpl delegationToken = (DelegationTokenImpl) token;

      // Convert it into a Hadoop Token
      AuthenticationTokenIdentifier identifier = delegationToken.getIdentifier();
      Token<AuthenticationTokenIdentifier> hadoopToken =
          new Token<>(
              identifier.getBytes(),
              delegationToken.getPassword(),
              identifier.getKind(),
              delegationToken.getServiceName());

      // Add the Hadoop Token to the Job so it gets serialized and passed along.
      job.getCredentials().addToken(hadoopToken.getService(), hadoopToken);
    }

    InputConfigurator.setConnectorInfo(CLASS, job, principal, token);
  }
Exemplo n.º 4
0
  private void addRMDelegationToken(final String renewer, final Credentials credentials)
      throws IOException, YarnException {
    // Get the ResourceManager delegation rmToken
    final org.apache.hadoop.yarn.api.records.Token rmDelegationToken =
        yarnClient.getRMDelegationToken(new Text(renewer));

    Token<RMDelegationTokenIdentifier> token;
    // TODO: Use the utility method getRMDelegationTokenService in ClientRMProxy to remove the
    // separate handling of
    // TODO: HA and non-HA cases when hadoop dependency is changed to hadoop 2.4 or above
    if (conf.getBoolean(RM_HA_ENABLED, DEFAULT_RM_HA_ENABLED)) {
      LOG.info("Yarn Resource Manager HA is enabled");
      token = getRMHAToken(rmDelegationToken);
    } else {
      LOG.info("Yarn Resource Manager HA is not enabled");
      InetSocketAddress rmAddress =
          conf.getSocketAddr(
              YarnConfiguration.RM_ADDRESS,
              YarnConfiguration.DEFAULT_RM_ADDRESS,
              YarnConfiguration.DEFAULT_RM_PORT);

      token = ConverterUtils.convertFromYarn(rmDelegationToken, rmAddress);
    }

    LOG.info("RM dt {}", token);

    credentials.addToken(token.getService(), token);
  }
  @SuppressWarnings("unchecked")
  public static synchronized void registerDelegationTokensForRenewal(
      JobID jobId, Credentials ts, Configuration conf) {
    if (ts == null) return; // nothing to add

    Collection<Token<? extends TokenIdentifier>> tokens = ts.getAllTokens();
    long now = System.currentTimeMillis();

    for (Token<? extends TokenIdentifier> t : tokens) {
      // currently we only check for HDFS delegation tokens
      // later we can add more different types.
      if (!t.getKind().equals(kindHdfs)) {
        continue;
      }
      Token<DelegationTokenIdentifier> dt = (Token<DelegationTokenIdentifier>) t;

      // first renew happens immediately
      DelegationTokenToRenew dtr = new DelegationTokenToRenew(jobId, dt, conf, now);

      addTokenToList(dtr);

      setTimerForTokenRenewal(dtr, true);
      LOG.info(
          "registering token for renewal for service ="
              + dt.getService()
              + " and jobID = "
              + jobId);
    }
  }
  @Test
  public void testGetTokensForNamenodes() throws IOException {

    Credentials credentials = new Credentials();
    TokenCache.obtainTokensForNamenodesInternal(credentials, new Path[] {p1, p2}, jConf);

    // this token is keyed by hostname:port key.
    String fs_addr = SecurityUtil.buildDTServiceName(p1.toUri(), NameNode.DEFAULT_PORT);
    Token<DelegationTokenIdentifier> nnt = TokenCache.getDelegationToken(credentials, fs_addr);
    System.out.println("dt for " + p1 + "(" + fs_addr + ")" + " = " + nnt);
    assertNotNull("Token for nn is null", nnt);

    // verify the size
    Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens();
    assertEquals("number of tokens is not 1", 1, tns.size());

    boolean found = false;
    for (Token<? extends TokenIdentifier> t : tns) {
      if (t.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
          && t.getService().equals(new Text(fs_addr))) {
        found = true;
      }
      assertTrue("didn't find token for " + p1, found);
    }
  }
  @Test
  public void testGetTokensForViewFS() throws IOException, URISyntaxException {
    Configuration conf = new Configuration(jConf);
    FileSystem dfs = dfsCluster.getFileSystem();
    String serviceName = dfs.getCanonicalServiceName();

    Path p1 = new Path("/mount1");
    Path p2 = new Path("/mount2");
    p1 = dfs.makeQualified(p1);
    p2 = dfs.makeQualified(p2);

    conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString());
    conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString());
    Credentials credentials = new Credentials();
    Path lp1 = new Path("viewfs:///dir1");
    Path lp2 = new Path("viewfs:///dir2");
    Path[] paths = new Path[2];
    paths[0] = lp1;
    paths[1] = lp2;
    TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);

    Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens();
    assertEquals("number of tokens is not 1", 1, tns.size());

    boolean found = false;
    for (Token<? extends TokenIdentifier> tt : tns) {
      System.out.println("token=" + tt);
      if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
          && tt.getService().equals(new Text(serviceName))) {
        found = true;
      }
      assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found);
    }
  }
Exemplo n.º 8
0
 /**
  * Obtain the tokens needed by the job and put them in the UGI
  *
  * @param conf
  */
 protected void downloadTokensAndSetupUGI(Configuration conf) {
   try {
     this.currentUser = UserGroupInformation.getCurrentUser();
     if (UserGroupInformation.isSecurityEnabled()) {
       // Read the file-system tokens from the localized tokens-file.
       Path jobSubmitDir =
           FileContext.getLocalFSFileContext()
               .makeQualified(
                   new Path(new File(DragonJobConfig.JOB_SUBMIT_DIR).getAbsolutePath()));
       Path jobTokenFile = new Path(jobSubmitDir, DragonJobConfig.APPLICATION_TOKENS_FILE);
       fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
       LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile=" + jobTokenFile);
       for (Token<? extends TokenIdentifier> tk : fsTokens.getAllTokens()) {
         if (LOG.isDebugEnabled()) {
           LOG.debug(
               "Token of kind "
                   + tk.getKind()
                   + "in current ugi in the AppMaster for service "
                   + tk.getService());
         }
         currentUser.addToken(tk); // For use by AppMaster itself.
       }
     }
   } catch (IOException e) {
     throw new YarnException(e);
   }
 }
Exemplo n.º 9
0
 public static HdfsProtos.BlockTokenIdentifierProto toProto(Token<?> blockToken) {
   return HdfsProtos.BlockTokenIdentifierProto.newBuilder()
       .setIdentifier(ByteString.copyFrom(blockToken.getIdentifier()))
       .setPassword(ByteString.copyFrom(blockToken.getPassword()))
       .setKind(blockToken.getKind().toString())
       .setService(blockToken.getService().toString())
       .build();
 }
 @Override
 public Token<StramDelegationTokenIdentifier> selectToken(
     Text text, Collection<Token<? extends TokenIdentifier>> clctn) {
   Token<StramDelegationTokenIdentifier> token = null;
   if (text != null) {
     for (Token<? extends TokenIdentifier> ctoken : clctn) {
       if (StramDelegationTokenIdentifier.IDENTIFIER_KIND.equals(ctoken.getKind())
           && text.equals(ctoken.getService())) {
         token = (Token<StramDelegationTokenIdentifier>) ctoken;
       }
     }
   }
   return token;
 }
Exemplo n.º 11
0
  // cancel a token
  private static void cancelToken(DelegationTokenToRenew t) {
    Token<DelegationTokenIdentifier> token = t.token;
    Configuration conf = t.conf;

    if (token.getKind().equals(kindHdfs)) {
      try {
        DistributedFileSystem dfs = getDFSForToken(token, conf);
        if (LOG.isDebugEnabled())
          LOG.debug("canceling token " + token.getService() + " for dfs=" + dfs);
        dfs.cancelDelegationToken(token);
      } catch (Exception e) {
        LOG.warn("Failed to cancel " + token, e);
      }
    }
  }
Exemplo n.º 12
0
    @Override
    public void run() {
      Token<DelegationTokenIdentifier> token = dttr.token;
      long newExpirationDate = 0;
      try {
        newExpirationDate = renewDelegationToken(dttr);
      } catch (Exception e) {
        return; // message logged in renewDT method
      }
      if (LOG.isDebugEnabled())
        LOG.debug("renewing for:" + token.getService() + ";newED=" + newExpirationDate);

      // new expiration date
      dttr.expirationDate = newExpirationDate;
      setTimerForTokenRenewal(dttr, false); // set the next one
    }
Exemplo n.º 13
0
  /** Obtain Kerberos security token for HBase. */
  private static void obtainTokenForHBase(Credentials credentials, Configuration conf)
      throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
      LOG.info("Attempting to obtain Kerberos security token for HBase");
      try {
        // ----
        // Intended call: HBaseConfiguration.addHbaseResources(conf);
        Class.forName("org.apache.hadoop.hbase.HBaseConfiguration")
            .getMethod("addHbaseResources", Configuration.class)
            .invoke(null, conf);
        // ----

        LOG.info("HBase security setting: {}", conf.get("hbase.security.authentication"));

        if (!"kerberos".equals(conf.get("hbase.security.authentication"))) {
          LOG.info("HBase has not been configured to use Kerberos.");
          return;
        }

        LOG.info("Obtaining Kerberos security token for HBase");
        // ----
        // Intended call: Token<AuthenticationTokenIdentifier> token = TokenUtil.obtainToken(conf);
        Token<?> token =
            (Token<?>)
                Class.forName("org.apache.hadoop.hbase.security.token.TokenUtil")
                    .getMethod("obtainToken", Configuration.class)
                    .invoke(null, conf);
        // ----

        if (token == null) {
          LOG.error("No Kerberos security token for HBase available");
          return;
        }

        credentials.addToken(token.getService(), token);
        LOG.info("Added HBase Kerberos security token to credentials.");
      } catch (ClassNotFoundException
          | NoSuchMethodException
          | IllegalAccessException
          | InvocationTargetException e) {
        LOG.info(
            "HBase is not available (not packaged with this application): {} : \"{}\".",
            e.getClass().getSimpleName(),
            e.getMessage());
      }
    }
  }
Exemplo n.º 14
0
  /**
   * The function will get the token information from parameters and call SecuredHDFS to verify the
   * token.
   *
   * <p>All token properties will be deserialized from string to a Token object
   *
   * @param protData input parameters
   * @param context servlet context which contains the NN address
   * @throws SecurityException Thrown when authentication fails
   */
  public static void verifyToken(ProtocolData protData, ServletContext context) {
    try {
      if (UserGroupInformation.isSecurityEnabled()) {
        Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
        String tokenString = protData.getToken();
        token.decodeFromUrlString(tokenString);

        verifyToken(
            token.getIdentifier(),
            token.getPassword(),
            token.getKind(),
            token.getService(),
            context);
      }
    } catch (IOException e) {
      throw new SecurityException("Failed to verify delegation token " + e, e);
    }
  }
Exemplo n.º 15
0
 /**
  * @return true if this token corresponds to a logical address rather than a specific jobtracker.
  */
 public static boolean isTokenForLogicalAddress(Token<?> token) {
   return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
 }
Exemplo n.º 16
0
  @Override
  public void secureBulkLoadHFiles(
      RpcController controller,
      SecureBulkLoadHFilesRequest request,
      RpcCallback<SecureBulkLoadHFilesResponse> done) {
    final List<Pair<byte[], String>> familyPaths = new ArrayList<Pair<byte[], String>>();
    for (ClientProtos.BulkLoadHFileRequest.FamilyPath el : request.getFamilyPathList()) {
      familyPaths.add(new Pair(el.getFamily().toByteArray(), el.getPath()));
    }

    Token userToken = null;
    if (userProvider.isHadoopSecurityEnabled()) {
      userToken =
          new Token(
              request.getFsToken().getIdentifier().toByteArray(),
              request.getFsToken().getPassword().toByteArray(),
              new Text(request.getFsToken().getKind()),
              new Text(request.getFsToken().getService()));
    }
    final String bulkToken = request.getBulkToken();
    User user = getActiveUser();
    final UserGroupInformation ugi = user.getUGI();
    if (userToken != null) {
      ugi.addToken(userToken);
    } else if (userProvider.isHadoopSecurityEnabled()) {
      // we allow this to pass through in "simple" security mode
      // for mini cluster testing
      ResponseConverter.setControllerException(
          controller, new DoNotRetryIOException("User token cannot be null"));
      done.run(SecureBulkLoadHFilesResponse.newBuilder().setLoaded(false).build());
      return;
    }

    HRegion region = env.getRegion();
    boolean bypass = false;
    if (region.getCoprocessorHost() != null) {
      try {
        bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
      } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
        done.run(SecureBulkLoadHFilesResponse.newBuilder().setLoaded(false).build());
        return;
      }
    }
    boolean loaded = false;
    if (!bypass) {
      // Get the target fs (HBase region server fs) delegation token
      // Since we have checked the permission via 'preBulkLoadHFile', now let's give
      // the 'request user' necessary token to operate on the target fs.
      // After this point the 'doAs' user will hold two tokens, one for the source fs
      // ('request user'), another for the target fs (HBase region server principal).
      if (userProvider.isHadoopSecurityEnabled()) {
        FsDelegationToken targetfsDelegationToken = new FsDelegationToken(userProvider, "renewer");
        try {
          targetfsDelegationToken.acquireDelegationToken(fs);
        } catch (IOException e) {
          ResponseConverter.setControllerException(controller, e);
          done.run(SecureBulkLoadHFilesResponse.newBuilder().setLoaded(false).build());
          return;
        }
        Token<?> targetFsToken = targetfsDelegationToken.getUserToken();
        if (targetFsToken != null
            && (userToken == null || !targetFsToken.getService().equals(userToken.getService()))) {
          ugi.addToken(targetFsToken);
        }
      }

      loaded =
          ugi.doAs(
              new PrivilegedAction<Boolean>() {
                @Override
                public Boolean run() {
                  FileSystem fs = null;
                  try {
                    Configuration conf = env.getConfiguration();
                    fs = FileSystem.get(conf);
                    for (Pair<byte[], String> el : familyPaths) {
                      Path p = new Path(el.getSecond());
                      Path stageFamily = new Path(bulkToken, Bytes.toString(el.getFirst()));
                      if (!fs.exists(stageFamily)) {
                        fs.mkdirs(stageFamily);
                        fs.setPermission(stageFamily, PERM_ALL_ACCESS);
                      }
                    }
                    // We call bulkLoadHFiles as requesting user
                    // To enable access prior to staging
                    return env.getRegion()
                        .bulkLoadHFiles(
                            familyPaths, true, new SecureBulkLoadListener(fs, bulkToken, conf));
                  } catch (Exception e) {
                    LOG.error("Failed to complete bulk load", e);
                  }
                  return false;
                }
              });
    }
    if (region.getCoprocessorHost() != null) {
      try {
        loaded = region.getCoprocessorHost().postBulkLoadHFile(familyPaths, loaded);
      } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
        done.run(SecureBulkLoadHFilesResponse.newBuilder().setLoaded(false).build());
        return;
      }
    }
    done.run(SecureBulkLoadHFilesResponse.newBuilder().setLoaded(loaded).build());
  }
Exemplo n.º 17
0
  @Test
  public void testGetTokensForHftpFS() throws IOException, URISyntaxException {
    HftpFileSystem hfs = mock(HftpFileSystem.class);

    DelegationTokenSecretManager dtSecretManager =
        NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem());
    String renewer = "renewer";
    jConf.set(JTConfig.JT_USER_NAME, renewer);
    DelegationTokenIdentifier dtId =
        new DelegationTokenIdentifier(new Text("user"), new Text(renewer), null);
    final Token<DelegationTokenIdentifier> t =
        new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);

    final URI uri = new URI("hftp://host:2222/file1");
    final String fs_addr = SecurityUtil.buildDTServiceName(uri, NameNode.DEFAULT_PORT);
    t.setService(new Text(fs_addr));

    // when(hfs.getUri()).thenReturn(uri);
    Mockito.doAnswer(
            new Answer<URI>() {
              @Override
              public URI answer(InvocationOnMock invocation) throws Throwable {
                return uri;
              }
            })
        .when(hfs)
        .getUri();

    // when(hfs.getDelegationToken()).thenReturn((Token<? extends TokenIdentifier>) t);
    Mockito.doAnswer(
            new Answer<Token<DelegationTokenIdentifier>>() {
              @Override
              public Token<DelegationTokenIdentifier> answer(InvocationOnMock invocation)
                  throws Throwable {
                return t;
              }
            })
        .when(hfs)
        .getDelegationToken(renewer);

    // when(hfs.getDelegationTokens()).thenReturn((Token<? extends TokenIdentifier>) t);
    Mockito.doAnswer(
            new Answer<List<Token<DelegationTokenIdentifier>>>() {
              @Override
              public List<Token<DelegationTokenIdentifier>> answer(InvocationOnMock invocation)
                  throws Throwable {
                return Collections.singletonList(t);
              }
            })
        .when(hfs)
        .getDelegationTokens(renewer);

    // when(hfs.getCanonicalServiceName).thenReturn(fs_addr);
    Mockito.doAnswer(
            new Answer<String>() {
              @Override
              public String answer(InvocationOnMock invocation) throws Throwable {
                return fs_addr;
              }
            })
        .when(hfs)
        .getCanonicalServiceName();

    Credentials credentials = new Credentials();
    Path p = new Path(uri.toString());
    System.out.println("Path for hftp=" + p + "; fs_addr=" + fs_addr + "; rn=" + renewer);
    TokenCache.obtainTokensForNamenodesInternal(hfs, credentials, jConf);

    Collection<Token<? extends TokenIdentifier>> tns = credentials.getAllTokens();
    assertEquals("number of tokens is not 1", 1, tns.size());

    boolean found = false;
    for (Token<? extends TokenIdentifier> tt : tns) {
      System.out.println("token=" + tt);
      if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
          && tt.getService().equals(new Text(fs_addr))) {
        found = true;
        assertEquals("different token", tt, t);
      }
      assertTrue("didn't find token for " + p, found);
    }
  }
Exemplo n.º 18
0
 private void verifyServiceInToken(
     ServletContext context, HttpServletRequest request, String expected) throws IOException {
   UserGroupInformation ugi = JspHelper.getUGI(context, request, conf);
   Token<? extends TokenIdentifier> tokenInUgi = ugi.getTokens().iterator().next();
   Assert.assertEquals(expected, tokenInUgi.getService().toString());
 }
Exemplo n.º 19
0
 public static String getServiceAddressFromToken(Token<?> token) {
   String service = token.getService().toString();
   return isTokenForLogicalAddress(token)
       ? service.substring(HA_DT_SERVICE_PREFIX.length())
       : service;
 }