Пример #1
0
 @Before
 public void setup() {
   Logger rootLogger = LogManager.getRootLogger();
   rootLogger.setLevel(Level.DEBUG);
   ExitUtil.disableSystemExit();
   conf = new YarnConfiguration();
   UserGroupInformation.setConfiguration(conf);
   conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
   conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
 }
  @Test
  public void testAdminAclsWithFileSystemBasedConfigurationProvider()
      throws IOException, YarnException {
    configuration.set(
        YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
        "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");

    // upload default configurations
    uploadDefaultConfiguration();

    try {
      rm = new MockRM(configuration);
      rm.init(configuration);
      rm.start();
    } catch (Exception ex) {
      fail("Should not get any exceptions");
    }

    String aclStringBefore = rm.adminService.getAccessControlList().getAclString().trim();

    YarnConfiguration yarnConf = new YarnConfiguration();
    yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL, "world:anyone:rwcda");
    uploadConfiguration(yarnConf, "yarn-site.xml");

    rm.adminService.refreshAdminAcls(RefreshAdminAclsRequest.newInstance());

    String aclStringAfter = rm.adminService.getAccessControlList().getAclString().trim();

    Assert.assertTrue(!aclStringAfter.equals(aclStringBefore));
    Assert.assertEquals(aclStringAfter, "world:anyone:rwcda");
  }
Пример #3
0
  @Before
  public void setUp() throws Exception {
    resourceMgrDelegate = mock(ResourceMgrDelegate.class);
    conf = new YarnConfiguration();
    conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
    clientCache = new ClientCache(conf, resourceMgrDelegate);
    clientCache = spy(clientCache);
    yarnRunner = new YARNRunner(conf, resourceMgrDelegate, clientCache);
    yarnRunner = spy(yarnRunner);
    submissionContext = mock(ApplicationSubmissionContext.class);
    doAnswer(
            new Answer<ApplicationSubmissionContext>() {
              @Override
              public ApplicationSubmissionContext answer(InvocationOnMock invocation)
                  throws Throwable {
                return submissionContext;
              }
            })
        .when(yarnRunner)
        .createApplicationSubmissionContext(
            any(Configuration.class), any(String.class), any(Credentials.class));

    appId = recordFactory.newRecordInstance(ApplicationId.class);
    appId.setClusterTimestamp(System.currentTimeMillis());
    appId.setId(1);
    jobId = TypeConverter.fromYarn(appId);
    if (testWorkDir.exists()) {
      FileContext.getLocalFSFileContext().delete(new Path(testWorkDir.toString()), true);
    }
    testWorkDir.mkdirs();
  }
  private void uploadDefaultConfiguration() throws IOException {
    Configuration conf = new Configuration();
    uploadConfiguration(conf, "core-site.xml");

    YarnConfiguration yarnConf = new YarnConfiguration();
    yarnConf.set(
        YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
        "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
    uploadConfiguration(yarnConf, "yarn-site.xml");

    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    uploadConfiguration(csConf, "capacity-scheduler.xml");

    Configuration hadoopPolicyConf = new Configuration(false);
    hadoopPolicyConf.addResource(YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE);
    uploadConfiguration(hadoopPolicyConf, "hadoop-policy.xml");
  }
Пример #5
0
  @Test(timeout = 20000)
  public void testHistoryServerToken() throws Exception {
    // Set the master principal in the config
    conf.set(YarnConfiguration.RM_PRINCIPAL, "foo@LOCAL");

    final String masterPrincipal = Master.getMasterPrincipal(conf);

    final MRClientProtocol hsProxy = mock(MRClientProtocol.class);
    when(hsProxy.getDelegationToken(any(GetDelegationTokenRequest.class)))
        .thenAnswer(
            new Answer<GetDelegationTokenResponse>() {
              public GetDelegationTokenResponse answer(InvocationOnMock invocation) {
                GetDelegationTokenRequest request =
                    (GetDelegationTokenRequest) invocation.getArguments()[0];
                // check that the renewer matches the cluster's RM principal
                assertEquals(masterPrincipal, request.getRenewer());

                DelegationToken token = recordFactory.newRecordInstance(DelegationToken.class);
                // none of these fields matter for the sake of the test
                token.setKind("");
                token.setService("");
                token.setIdentifier(ByteBuffer.allocate(0));
                token.setPassword(ByteBuffer.allocate(0));
                GetDelegationTokenResponse tokenResponse =
                    recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
                tokenResponse.setDelegationToken(token);
                return tokenResponse;
              }
            });

    UserGroupInformation.createRemoteUser("someone")
        .doAs(
            new PrivilegedExceptionAction<Void>() {
              @Override
              public Void run() throws Exception {
                yarnRunner = new YARNRunner(conf, null, null);
                yarnRunner.getDelegationTokenFromHS(hsProxy);
                verify(hsProxy).getDelegationToken(any(GetDelegationTokenRequest.class));
                return null;
              }
            });
  }
Пример #6
0
  /**
   * Initial DragonAppMaster Get and CheckOut necessary parameters from system environment eg:
   * container_Id,host,port,http_port,submitTime
   *
   * @param args
   */
  public static void main(String[] args) {
    try {
      String containerIdStr = System.getenv(ApplicationConstants.AM_CONTAINER_ID_ENV);
      String nodeHostString = System.getenv(ApplicationConstants.NM_HOST_ENV);
      String nodePortString = System.getenv(ApplicationConstants.NM_PORT_ENV);
      String nodeHttpPortString = System.getenv(ApplicationConstants.NM_HTTP_PORT_ENV);
      String appSubmitTimeStr = System.getenv(ApplicationConstants.APP_SUBMIT_TIME_ENV);

      validateInputParam(containerIdStr, ApplicationConstants.AM_CONTAINER_ID_ENV);
      validateInputParam(nodeHostString, ApplicationConstants.NM_HOST_ENV);
      validateInputParam(nodePortString, ApplicationConstants.NM_PORT_ENV);
      validateInputParam(nodeHttpPortString, ApplicationConstants.NM_HTTP_PORT_ENV);
      validateInputParam(appSubmitTimeStr, ApplicationConstants.APP_SUBMIT_TIME_ENV);
      ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
      ApplicationAttemptId applicationAttemptId = containerId.getApplicationAttemptId();
      long appSubmitTime = Long.parseLong(appSubmitTimeStr);

      DragonAppMaster appMaster =
          new DragonAppMaster(
              applicationAttemptId,
              containerId,
              nodeHostString,
              Integer.parseInt(nodePortString),
              Integer.parseInt(nodeHttpPortString),
              appSubmitTime);
      Runtime.getRuntime().addShutdownHook(new CompositeServiceShutdownHook(appMaster));
      YarnConfiguration conf = new YarnConfiguration(new DragonConfiguration());
      conf.addResource(new Path(DragonJobConfig.JOB_CONF_FILE));
      String jobUserName = System.getenv(ApplicationConstants.Environment.USER.name());
      conf.set(DragonJobConfig.USER_NAME, jobUserName);

      // Do not automatically close FileSystem objects so that in case of
      // SIGTERM I have a chance to write out the job history. I'll be closing
      // the objects myself.
      conf.setBoolean("fs.automatic.close", false);
      initAndStartAppMaster(appMaster, conf, jobUserName);
    } catch (Throwable t) {
      LOG.fatal("Error starting MRAppMaster", t);
      System.exit(1);
    }
  }
Пример #7
0
  @Test
  public void testSuccessfulContainerLaunch()
      throws InterruptedException, IOException, YarnException {

    FileContext localFS = FileContext.getLocalFSFileContext();

    localFS.delete(new Path(localDir.getAbsolutePath()), true);
    localFS.delete(new Path(localLogDir.getAbsolutePath()), true);
    localFS.delete(new Path(remoteLogDir.getAbsolutePath()), true);
    localDir.mkdir();
    localLogDir.mkdir();
    remoteLogDir.mkdir();

    YarnConfiguration conf = new YarnConfiguration();

    Context context =
        new NMContext(
            new NMContainerTokenSecretManager(conf),
            new NMTokenSecretManagerInNM(),
            null,
            null,
            new NMNullStateStoreService()) {
          @Override
          public int getHttpPort() {
            return 1234;
          }
        };

    conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
    conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogDir.getAbsolutePath());

    ContainerExecutor exec = new DefaultContainerExecutor();
    exec.setConf(conf);

    DeletionService del = new DeletionService(exec);
    Dispatcher dispatcher = new AsyncDispatcher();
    LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
    NodeHealthCheckerService healthChecker =
        new NodeHealthCheckerService(NodeManager.getNodeHealthScriptRunner(conf), dirsHandler);
    healthChecker.init(conf);
    NodeManagerMetrics metrics = NodeManagerMetrics.create();
    NodeStatusUpdater nodeStatusUpdater =
        new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics) {
          @Override
          protected ResourceTracker getRMClient() {
            return new LocalRMInterface();
          };

          @Override
          protected void stopRMProxy() {
            return;
          }

          @Override
          protected void startStatusUpdater() {
            return; // Don't start any updating thread.
          }

          @Override
          public long getRMIdentifier() {
            return SIMULATED_RM_IDENTIFIER;
          }
        };

    DummyContainerManager containerManager =
        new DummyContainerManager(
            context,
            exec,
            del,
            nodeStatusUpdater,
            metrics,
            new ApplicationACLsManager(conf),
            dirsHandler);
    nodeStatusUpdater.init(conf);
    ((NMContext) context).setContainerManager(containerManager);
    nodeStatusUpdater.start();
    containerManager.init(conf);
    containerManager.start();

    ContainerLaunchContext launchContext =
        recordFactory.newRecordInstance(ContainerLaunchContext.class);
    ApplicationId applicationId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 0);
    ContainerId cID = ContainerId.newContainerId(applicationAttemptId, 0);

    String user = "******";
    StartContainerRequest scRequest =
        StartContainerRequest.newInstance(
            launchContext,
            TestContainerManager.createContainerToken(
                cID,
                SIMULATED_RM_IDENTIFIER,
                context.getNodeId(),
                user,
                context.getContainerTokenSecretManager()));
    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);

    BaseContainerManagerTest.waitForContainerState(containerManager, cID, ContainerState.RUNNING);

    List<ContainerId> containerIds = new ArrayList<ContainerId>();
    containerIds.add(cID);
    StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds);
    containerManager.stopContainers(stopRequest);
    BaseContainerManagerTest.waitForContainerState(containerManager, cID, ContainerState.COMPLETE);

    containerManager.stop();
  }
  @Test
  public void testRMInitialsWithFileSystemBasedConfigurationProvider() throws Exception {
    configuration.set(
        YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
        "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");

    // upload configurations
    final File excludeHostsFile = new File(tmpDir.toString(), "excludeHosts");
    if (excludeHostsFile.exists()) {
      excludeHostsFile.delete();
    }
    if (!excludeHostsFile.createNewFile()) {
      Assert.fail("Can not create " + "excludeHosts");
    }
    PrintWriter fileWriter = new PrintWriter(excludeHostsFile);
    fileWriter.write("0.0.0.0:123");
    fileWriter.close();
    uploadToRemoteFileSystem(new Path(excludeHostsFile.getAbsolutePath()));

    YarnConfiguration yarnConf = new YarnConfiguration();
    yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL, "world:anyone:rwcda");
    yarnConf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, this.workingPath + "/excludeHosts");
    uploadConfiguration(yarnConf, "yarn-site.xml");

    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    csConf.set("yarn.scheduler.capacity.maximum-applications", "5000");
    uploadConfiguration(csConf, "capacity-scheduler.xml");

    String aclsString = "alice,bob users,wheel";
    Configuration newConf = new Configuration();
    newConf.set("security.applicationclient.protocol.acl", aclsString);
    uploadConfiguration(newConf, "hadoop-policy.xml");

    Configuration conf = new Configuration();
    conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true);
    conf.set("hadoop.proxyuser.test.groups", "test_groups");
    conf.set("hadoop.proxyuser.test.hosts", "test_hosts");
    conf.setClass(
        CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
        MockUnixGroupsMapping.class,
        GroupMappingServiceProvider.class);
    uploadConfiguration(conf, "core-site.xml");

    // update the groups
    MockUnixGroupsMapping.updateGroups();

    ResourceManager resourceManager = null;
    try {
      try {
        resourceManager = new ResourceManager();
        resourceManager.init(configuration);
        resourceManager.start();
      } catch (Exception ex) {
        fail("Should not get any exceptions");
      }

      // validate values for excludeHosts
      Set<String> excludeHosts =
          resourceManager.getRMContext().getNodesListManager().getHostsReader().getExcludedHosts();
      Assert.assertTrue(excludeHosts.size() == 1);
      Assert.assertTrue(excludeHosts.contains("0.0.0.0:123"));

      // validate values for admin-acls
      String aclStringAfter =
          resourceManager.adminService.getAccessControlList().getAclString().trim();
      Assert.assertEquals(aclStringAfter, "world:anyone:rwcda");

      // validate values for queue configuration
      CapacityScheduler cs = (CapacityScheduler) resourceManager.getRMContext().getScheduler();
      int maxAppsAfter = cs.getConfiguration().getMaximumSystemApplications();
      Assert.assertEquals(maxAppsAfter, 5000);

      // verify service Acls for AdminService
      ServiceAuthorizationManager adminServiceServiceManager =
          resourceManager.adminService.getServer().getServiceAuthorizationManager();
      verifyServiceACLsRefresh(
          adminServiceServiceManager,
          org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
          aclsString);

      // verify service ACLs for ClientRMService
      ServiceAuthorizationManager clientRMServiceServiceManager =
          resourceManager
              .getRMContext()
              .getClientRMService()
              .getServer()
              .getServiceAuthorizationManager();
      verifyServiceACLsRefresh(
          clientRMServiceServiceManager,
          org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
          aclsString);

      // verify service ACLs for ApplicationMasterService
      ServiceAuthorizationManager appMasterService =
          resourceManager
              .getRMContext()
              .getApplicationMasterService()
              .getServer()
              .getServiceAuthorizationManager();
      verifyServiceACLsRefresh(
          appMasterService,
          org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
          aclsString);

      // verify service ACLs for ResourceTrackerService
      ServiceAuthorizationManager RTService =
          resourceManager
              .getRMContext()
              .getResourceTrackerService()
              .getServer()
              .getServiceAuthorizationManager();
      verifyServiceACLsRefresh(
          RTService, org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class, aclsString);

      // verify ProxyUsers and ProxyHosts
      Assert.assertTrue(
          ProxyUsers.getProxyGroups().get("hadoop.proxyuser.test.groups").size() == 1);
      Assert.assertTrue(
          ProxyUsers.getProxyGroups().get("hadoop.proxyuser.test.groups").contains("test_groups"));

      Assert.assertTrue(ProxyUsers.getProxyHosts().get("hadoop.proxyuser.test.hosts").size() == 1);
      Assert.assertTrue(
          ProxyUsers.getProxyHosts().get("hadoop.proxyuser.test.hosts").contains("test_hosts"));

      // verify UserToGroupsMappings
      List<String> groupAfter =
          Groups.getUserToGroupsMappingService(configuration)
              .getGroups(UserGroupInformation.getCurrentUser().getUserName());
      Assert.assertTrue(
          groupAfter.contains("test_group_D")
              && groupAfter.contains("test_group_E")
              && groupAfter.contains("test_group_F")
              && groupAfter.size() == 3);
    } finally {
      if (resourceManager != null) {
        resourceManager.stop();
      }
    }
  }
  @Test
  public void testNonLeaderKeyReception()
      throws InterruptedException, StorageInitializtionException, Exception {
    // create a groupMembershipService that will be leader
    RMContextImpl rmContext = new RMContextImpl();
    rmContext.setDistributedEnabled(true);
    rmContext.setHAEnabled(true);
    GroupMembershipService groupMembershipService = new GroupMembershipService(null, rmContext);
    groupMembershipService.init(conf);
    NMTokenSecretManagerInRM nmTokenSecretManager = new NMTokenSecretManagerInRM(conf, rmContext);
    // create a resrouce tracker
    conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
    conf.setBoolean(YarnConfiguration.DISTRIBUTED_RM, true);
    MockRM mockRM = new MockRM(conf);
    mockRM.init(conf);

    try {
      groupMembershipService.start();
      rmContext.setRMGroupMembershipService(groupMembershipService);
      rmContext.setStateStore(new NDBRMStateStore());

      while (!rmContext.isLeader()) {
        Thread.sleep(1000);
      }

      mockRM.start();

      if (mockRM.getRMContext().isDistributedEnabled() && !mockRM.getRMContext().isLeader()) {
        conf.set(
            YarnConfiguration.EVENT_RT_CONFIG_PATH, "target/test-classes/RT_EventAPIConfig.ini");
        NdbRtStreamingProcessor rtStreamingProcessor =
            new NdbRtStreamingProcessor(mockRM.getRMContext());
        RMStorageFactory.kickTheNdbEventStreamingAPI(false, conf);
        new Thread(rtStreamingProcessor).start();
      }

      // this should be a resource tracker not a scheduler
      assertFalse(mockRM.getRMContext().isLeader());

      // simulate creation of a token on the sheduler
      nmTokenSecretManager.start();

      assertNotNull("did not roll master key", nmTokenSecretManager.getCurrentKey());
      Thread.sleep(1000);
      dummyUpdate();
      Thread.sleep(1000);
      RMStateStore.RMState state = rmContext.getStateStore().loadState(rmContext);
      assertEquals(
          "key not persisted to the database",
          state.getSecretTokenMamagerKey(RMStateStore.KeyType.CURRENTNMTOKENMASTERKEY),
          nmTokenSecretManager.getCurrentKey());

      assertEquals(
          nmTokenSecretManager.getCurrentKey(),
          mockRM.getRMContext().getNMTokenSecretManager().getCurrentKey());
      assertEquals(
          nmTokenSecretManager.getNextKey(),
          mockRM.getRMContext().getNMTokenSecretManager().getNextKey());
    } finally {
      groupMembershipService.stop();
      mockRM.stop();
      nmTokenSecretManager.stop();
      DefaultMetricsSystem.shutdown();
    }
  }
Пример #10
0
  @Test
  public void testSuccessfulContainerLaunch() throws InterruptedException, IOException {

    FileContext localFS = FileContext.getLocalFSFileContext();

    localFS.delete(new Path(localDir.getAbsolutePath()), true);
    localFS.delete(new Path(localLogDir.getAbsolutePath()), true);
    localFS.delete(new Path(remoteLogDir.getAbsolutePath()), true);
    localDir.mkdir();
    localLogDir.mkdir();
    remoteLogDir.mkdir();

    Context context = new NMContext();

    YarnConfiguration conf = new YarnConfiguration();
    conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
    conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogDir.getAbsolutePath());

    ContainerExecutor exec = new DefaultContainerExecutor();
    exec.setConf(conf);

    DeletionService del = new DeletionService(exec);
    Dispatcher dispatcher = new AsyncDispatcher();
    NodeHealthCheckerService healthChecker = new NodeHealthCheckerService();
    healthChecker.init(conf);
    LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
    NodeManagerMetrics metrics = NodeManagerMetrics.create();
    ContainerTokenSecretManager containerTokenSecretManager = new ContainerTokenSecretManager();
    NodeStatusUpdater nodeStatusUpdater =
        new NodeStatusUpdaterImpl(
            context, dispatcher, healthChecker, metrics, containerTokenSecretManager) {
          @Override
          protected ResourceTracker getRMClient() {
            return new LocalRMInterface();
          };

          @Override
          protected void startStatusUpdater() {
            return; // Don't start any updating thread.
          }
        };

    DummyContainerManager containerManager =
        new DummyContainerManager(
            context,
            exec,
            del,
            nodeStatusUpdater,
            metrics,
            containerTokenSecretManager,
            new ApplicationACLsManager(conf),
            dirsHandler);
    containerManager.init(conf);
    containerManager.start();

    ContainerLaunchContext launchContext =
        recordFactory.newRecordInstance(ContainerLaunchContext.class);
    ContainerId cID = recordFactory.newRecordInstance(ContainerId.class);
    ApplicationId applicationId = recordFactory.newRecordInstance(ApplicationId.class);
    applicationId.setClusterTimestamp(0);
    applicationId.setId(0);
    ApplicationAttemptId applicationAttemptId =
        recordFactory.newRecordInstance(ApplicationAttemptId.class);
    applicationAttemptId.setApplicationId(applicationId);
    applicationAttemptId.setAttemptId(0);
    cID.setApplicationAttemptId(applicationAttemptId);
    launchContext.setContainerId(cID);
    launchContext.setUser("testing");
    launchContext.setResource(recordFactory.newRecordInstance(Resource.class));
    StartContainerRequest request = recordFactory.newRecordInstance(StartContainerRequest.class);
    request.setContainerLaunchContext(launchContext);
    containerManager.startContainer(request);

    BaseContainerManagerTest.waitForContainerState(containerManager, cID, ContainerState.RUNNING);

    StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class);
    stopRequest.setContainerId(cID);
    containerManager.stopContainer(stopRequest);
    BaseContainerManagerTest.waitForContainerState(containerManager, cID, ContainerState.COMPLETE);

    containerManager.stop();
  }