Пример #1
0
  // This is to test container tokens are generated when the containers are
  // acquired by the AM, not when the containers are allocated
  @Test
  public void testContainerTokenGeneratedOnPullRequest() throws Exception {
    YarnConfiguration conf = new YarnConfiguration();
    YarnAPIStorageFactory.setConfiguration(conf);
    RMStorageFactory.setConfiguration(conf);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    MockRM rm1 = new MockRM(conf);
    try {
      rm1.start();

      MockNM nm1 = rm1.registerNode("127.0.0.1:1234", 8000);
      RMApp app1 = rm1.submitApp(200);
      MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
      // request a container.
      am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>());
      ContainerId containerId2 = ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
      rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED);

      RMContainer container = rm1.getResourceScheduler().getRMContainer(containerId2);
      // no container token is generated.
      Assert.assertEquals(containerId2, container.getContainerId());
      Assert.assertNull(container.getContainer().getContainerToken());

      // acquire the container.
      List<Container> containers =
          am1.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>())
              .getAllocatedContainers();
      Assert.assertEquals(containerId2, containers.get(0).getId());
      // container token is generated.
      Assert.assertNotNull(containers.get(0).getContainerToken());
    } finally {
      rm1.stop();
    }
  }
  @Test
  public void testAdminAclsWithFileSystemBasedConfigurationProvider()
      throws IOException, YarnException {
    configuration.set(
        YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
        "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");

    // upload default configurations
    uploadDefaultConfiguration();

    try {
      rm = new MockRM(configuration);
      rm.init(configuration);
      rm.start();
    } catch (Exception ex) {
      fail("Should not get any exceptions");
    }

    String aclStringBefore = rm.adminService.getAccessControlList().getAclString().trim();

    YarnConfiguration yarnConf = new YarnConfiguration();
    yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL, "world:anyone:rwcda");
    uploadConfiguration(yarnConf, "yarn-site.xml");

    rm.adminService.refreshAdminAcls(RefreshAdminAclsRequest.newInstance());

    String aclStringAfter = rm.adminService.getAccessControlList().getAclString().trim();

    Assert.assertTrue(!aclStringAfter.equals(aclStringBefore));
    Assert.assertEquals(aclStringAfter, "world:anyone:rwcda");
  }
Пример #3
0
  @Before
  public void setUp() throws Exception {
    InlineDispatcher rmDispatcher = new InlineDispatcher();

    YarnConfiguration conf = new YarnConfiguration();
    YarnAPIStorageFactory.setConfiguration(conf);
    RMStorageFactory.setConfiguration(conf);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    RMUtilities.InitializeDB();
    TransactionStateManager tsm = new TransactionStateManager();
    tsm.init(conf);
    tsm.start();
    rmContext =
        new RMContextImpl(
            rmDispatcher,
            null,
            null,
            null,
            mock(DelegationTokenRenewer.class),
            null,
            null,
            null,
            conf,
            tsm);
    NodesListManager nodesListManager = mock(NodesListManager.class);
    HostsFileReader reader = mock(HostsFileReader.class);
    when(nodesListManager.getHostsReader()).thenReturn(reader);
    ((RMContextImpl) rmContext).setNodesListManager(nodesListManager);
    scheduler = mock(YarnScheduler.class);
    doAnswer(
            new Answer<Void>() {
              @Override
              public Void answer(InvocationOnMock invocation) throws Throwable {
                final SchedulerEvent event = (SchedulerEvent) (invocation.getArguments()[0]);
                eventType = event.getType();
                if (eventType == SchedulerEventType.NODE_UPDATE) {
                  List<UpdatedContainerInfo> lastestContainersInfoList =
                      ((NodeUpdateSchedulerEvent) event)
                          .getRMNode()
                          .pullContainerUpdates(event.getTransactionState());
                  for (UpdatedContainerInfo lastestContainersInfo : lastestContainersInfoList) {
                    completedContainers.addAll(lastestContainersInfo.getCompletedContainers());
                  }
                }
                return null;
              }
            })
        .when(scheduler)
        .handle(any(SchedulerEvent.class));

    rmDispatcher.register(SchedulerEventType.class, new TestSchedulerEventDispatcher());

    rmDispatcher.register(
        NodesListManagerEventType.class, new TestNodeListManagerEventDispatcher());

    NodeId nodeId = BuilderUtils.newNodeId("localhost", 0);
    node = new RMNodeImpl(nodeId, rmContext, nodeId.getHost(), 0, 0, null, null, null);
    nodesListManagerEvent = null;
  }
Пример #4
0
 @Before
 public void setup() {
   Logger rootLogger = LogManager.getRootLogger();
   rootLogger.setLevel(Level.DEBUG);
   ExitUtil.disableSystemExit();
   conf = new YarnConfiguration();
   UserGroupInformation.setConfiguration(conf);
   conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
   conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
 }
Пример #5
0
  /**
   * Sets up this class for use in unit testing. It spins up the YARN mini-cluster and also sets up
   * various default classes.
   *
   * @throws IOException if there's an error accessing the local filesystem
   * @throws SQLException if there's an error querying the embedded DB
   */
  @BeforeClass
  public static void setupClass() throws IOException, SQLException {
    conf = new YarnConfiguration();
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    miniCluster = new MiniYARNCluster("test", 1, 1, 1);
    miniCluster.init(conf);
    miniCluster.start();

    conflictHandler = new ObjectConflictHandler();
    conflictHandler.setConf(conf);

    destinationObjectFactory = new DestinationObjectFactory();
    destinationObjectFactory.setConf(conf);
  }
 @Before
 public void setUp() throws Exception {
   conf = new YarnConfiguration();
   conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
   mgr = new NullRMNodeLabelsManager();
   mgr.init(conf);
 }
Пример #7
0
  @Before
  public void setUp() throws Exception {
    resourceMgrDelegate = mock(ResourceMgrDelegate.class);
    conf = new YarnConfiguration();
    conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
    clientCache = new ClientCache(conf, resourceMgrDelegate);
    clientCache = spy(clientCache);
    yarnRunner = new YARNRunner(conf, resourceMgrDelegate, clientCache);
    yarnRunner = spy(yarnRunner);
    submissionContext = mock(ApplicationSubmissionContext.class);
    doAnswer(
            new Answer<ApplicationSubmissionContext>() {
              @Override
              public ApplicationSubmissionContext answer(InvocationOnMock invocation)
                  throws Throwable {
                return submissionContext;
              }
            })
        .when(yarnRunner)
        .createApplicationSubmissionContext(
            any(Configuration.class), any(String.class), any(Credentials.class));

    appId = recordFactory.newRecordInstance(ApplicationId.class);
    appId.setClusterTimestamp(System.currentTimeMillis());
    appId.setId(1);
    jobId = TypeConverter.fromYarn(appId);
    if (testWorkDir.exists()) {
      FileContext.getLocalFSFileContext().delete(new Path(testWorkDir.toString()), true);
    }
    testWorkDir.mkdirs();
  }
Пример #8
0
  @SuppressWarnings("rawtypes")
  static Path createConfigurationFileInFs(
      FileSystem fs, String appHome, Map stormConf, YarnConfiguration yarnConf) throws IOException {
    // dump stringwriter's content into FS conf/storm.yaml
    Path confDst =
        new Path(fs.getHomeDirectory(), appHome + Path.SEPARATOR + STORM_CONF_PATH_STRING);
    Path dirDst = confDst.getParent();
    fs.mkdirs(dirDst);

    // storm.yaml
    FSDataOutputStream out = fs.create(confDst);
    Yaml yaml = new Yaml();
    OutputStreamWriter writer = new OutputStreamWriter(out);
    rmNulls(stormConf);
    yaml.dump(stormConf, writer);
    writer.close();
    out.close();

    // yarn-site.xml
    Path yarn_site_xml = new Path(dirDst, "yarn-site.xml");
    out = fs.create(yarn_site_xml);
    writer = new OutputStreamWriter(out);
    yarnConf.writeXml(writer);
    writer.close();
    out.close();

    // logback.xml
    Path logback_xml = new Path(dirDst, "logback.xml");
    out = fs.create(logback_xml);
    CreateLogbackXML(out);
    out.close();

    return dirDst;
  }
  public ClientRMProtocol getClientResourceManager() {
    if (clientResourceManager != null) return clientResourceManager;

    YarnConfiguration yarnConf = new YarnConfiguration(conf);
    YarnRPC rpc = YarnRPC.create(yarnConf);
    InetSocketAddress rmAddress =
        NetUtils.createSocketAddr(
            yarnConf.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS));

    LOG.info("Connecting to the resource manager (client) at " + rmAddress);

    clientResourceManager =
        (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, rmAddress, conf);

    return clientResourceManager;
  }
  private void uploadDefaultConfiguration() throws IOException {
    Configuration conf = new Configuration();
    uploadConfiguration(conf, "core-site.xml");

    YarnConfiguration yarnConf = new YarnConfiguration();
    yarnConf.set(
        YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
        "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
    uploadConfiguration(yarnConf, "yarn-site.xml");

    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    uploadConfiguration(csConf, "capacity-scheduler.xml");

    Configuration hadoopPolicyConf = new Configuration(false);
    hadoopPolicyConf.addResource(YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE);
    uploadConfiguration(hadoopPolicyConf, "hadoop-policy.xml");
  }
  public AMRMProtocol getAMResourceManager() {
    if (amResourceManager != null) return amResourceManager;

    LOG.debug("Using configuration: " + conf);

    YarnConfiguration yarnConf = new YarnConfiguration(conf);
    YarnRPC rpc = YarnRPC.create(yarnConf);
    InetSocketAddress rmAddress =
        NetUtils.createSocketAddr(
            yarnConf.get(
                YarnConfiguration.RM_SCHEDULER_ADDRESS,
                YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS));

    LOG.info("Connecting to the resource manager (scheduling) at " + rmAddress);
    amResourceManager = (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rmAddress, conf);

    return amResourceManager;
  }
Пример #12
0
  @Test(timeout = 20000)
  public void testWarnCommandOpts() throws Exception {
    Logger logger = Logger.getLogger(YARNRunner.class);

    ByteArrayOutputStream bout = new ByteArrayOutputStream();
    Layout layout = new SimpleLayout();
    Appender appender = new WriterAppender(layout, bout);
    logger.addAppender(appender);

    JobConf jobConf = new JobConf();

    jobConf.set(
        MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,
        "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
    jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar");

    YARNRunner yarnRunner = new YARNRunner(jobConf);

    File jobxml = new File(testWorkDir, MRJobConfig.JOB_CONF_FILE);
    OutputStream out = new FileOutputStream(jobxml);
    conf.writeXml(out);
    out.close();

    File jobsplit = new File(testWorkDir, MRJobConfig.JOB_SPLIT);
    out = new FileOutputStream(jobsplit);
    out.close();

    File jobsplitmetainfo = new File(testWorkDir, MRJobConfig.JOB_SPLIT_METAINFO);
    out = new FileOutputStream(jobsplitmetainfo);
    out.close();

    File appTokens = new File(testWorkDir, MRJobConfig.APPLICATION_TOKENS_FILE);
    out = new FileOutputStream(appTokens);
    out.close();

    @SuppressWarnings("unused")
    ApplicationSubmissionContext submissionContext =
        yarnRunner.createApplicationSubmissionContext(
            jobConf, testWorkDir.toString(), new Credentials());

    String logMsg = bout.toString();
    assertTrue(
        logMsg.contains(
            "WARN - Usage of -Djava.library.path in "
                + "yarn.app.mapreduce.am.admin-command-opts can cause programs to no "
                + "longer function if hadoop native libraries are used. These values "
                + "should be set as part of the LD_LIBRARY_PATH in the app master JVM "
                + "env using yarn.app.mapreduce.am.admin.user.env config settings."));
    assertTrue(
        logMsg.contains(
            "WARN - Usage of -Djava.library.path in "
                + "yarn.app.mapreduce.am.command-opts can cause programs to no longer "
                + "function if hadoop native libraries are used. These values should "
                + "be set as part of the LD_LIBRARY_PATH in the app master JVM env "
                + "using yarn.app.mapreduce.am.env config settings."));
  }
Пример #13
0
  /**
   * Initial DragonAppMaster Get and CheckOut necessary parameters from system environment eg:
   * container_Id,host,port,http_port,submitTime
   *
   * @param args
   */
  public static void main(String[] args) {
    try {
      String containerIdStr = System.getenv(ApplicationConstants.AM_CONTAINER_ID_ENV);
      String nodeHostString = System.getenv(ApplicationConstants.NM_HOST_ENV);
      String nodePortString = System.getenv(ApplicationConstants.NM_PORT_ENV);
      String nodeHttpPortString = System.getenv(ApplicationConstants.NM_HTTP_PORT_ENV);
      String appSubmitTimeStr = System.getenv(ApplicationConstants.APP_SUBMIT_TIME_ENV);

      validateInputParam(containerIdStr, ApplicationConstants.AM_CONTAINER_ID_ENV);
      validateInputParam(nodeHostString, ApplicationConstants.NM_HOST_ENV);
      validateInputParam(nodePortString, ApplicationConstants.NM_PORT_ENV);
      validateInputParam(nodeHttpPortString, ApplicationConstants.NM_HTTP_PORT_ENV);
      validateInputParam(appSubmitTimeStr, ApplicationConstants.APP_SUBMIT_TIME_ENV);
      ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
      ApplicationAttemptId applicationAttemptId = containerId.getApplicationAttemptId();
      long appSubmitTime = Long.parseLong(appSubmitTimeStr);

      DragonAppMaster appMaster =
          new DragonAppMaster(
              applicationAttemptId,
              containerId,
              nodeHostString,
              Integer.parseInt(nodePortString),
              Integer.parseInt(nodeHttpPortString),
              appSubmitTime);
      Runtime.getRuntime().addShutdownHook(new CompositeServiceShutdownHook(appMaster));
      YarnConfiguration conf = new YarnConfiguration(new DragonConfiguration());
      conf.addResource(new Path(DragonJobConfig.JOB_CONF_FILE));
      String jobUserName = System.getenv(ApplicationConstants.Environment.USER.name());
      conf.set(DragonJobConfig.USER_NAME, jobUserName);

      // Do not automatically close FileSystem objects so that in case of
      // SIGTERM I have a chance to write out the job history. I'll be closing
      // the objects myself.
      conf.setBoolean("fs.automatic.close", false);
      initAndStartAppMaster(appMaster, conf, jobUserName);
    } catch (Throwable t) {
      LOG.fatal("Error starting MRAppMaster", t);
      System.exit(1);
    }
  }
  @Override
  protected void serviceInit(Configuration conf) throws Exception {
    // set if node labels enabled
    nodeLabelsEnabled =
        conf.getBoolean(
            YarnConfiguration.NODE_LABELS_ENABLED, YarnConfiguration.DEFAULT_NODE_LABELS_ENABLED);

    isDistributedNodeLabelConfiguration =
        YarnConfiguration.isDistributedNodeLabelConfiguration(conf);

    labelCollections.put(NO_LABEL, new RMNodeLabel(NO_LABEL));
  }
  @Before
  public void configure() throws IOException {
    admin = mock(ResourceManagerAdministrationProtocol.class);

    haadmin = mock(HAServiceProtocol.class);
    when(haadmin.getServiceStatus())
        .thenReturn(new HAServiceStatus(HAServiceProtocol.HAServiceState.INITIALIZING));

    final HAServiceTarget haServiceTarget = mock(HAServiceTarget.class);
    when(haServiceTarget.getProxy(any(Configuration.class), anyInt())).thenReturn(haadmin);
    rmAdminCLI =
        new RMAdminCLI(new Configuration()) {

          @Override
          protected ResourceManagerAdministrationProtocol createAdminProtocol() throws IOException {
            return admin;
          }

          @Override
          protected HAServiceTarget resolveTarget(String rmId) {
            return haServiceTarget;
          }
        };

    YarnConfiguration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
    rmAdminCLIWithHAEnabled =
        new RMAdminCLI(conf) {

          @Override
          protected ResourceManagerAdministrationProtocol createAdminProtocol() throws IOException {
            return admin;
          }

          @Override
          protected HAServiceTarget resolveTarget(String rmId) {
            return haServiceTarget;
          }
        };
  }
 public YarnClientRMConnection(YarnConfiguration config) {
   this.config = config;
   InetSocketAddress remoteAddress =
       NetUtils.createSocketAddr(
           config.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS));
   Configuration appsManagerServerConf = new Configuration(config);
   appsManagerServerConf.setClass(
       YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CLIENT_RESOURCEMANAGER,
       ClientRMSecurityInfo.class,
       SecurityInfo.class);
   YarnRPC rpc = YarnRPC.create(appsManagerServerConf);
   crmp =
       ((ClientRMProtocol)
           rpc.getProxy(ClientRMProtocol.class, remoteAddress, appsManagerServerConf));
 }
Пример #17
0
  @Test(timeout = 20000)
  public void testHistoryServerToken() throws Exception {
    // Set the master principal in the config
    conf.set(YarnConfiguration.RM_PRINCIPAL, "foo@LOCAL");

    final String masterPrincipal = Master.getMasterPrincipal(conf);

    final MRClientProtocol hsProxy = mock(MRClientProtocol.class);
    when(hsProxy.getDelegationToken(any(GetDelegationTokenRequest.class)))
        .thenAnswer(
            new Answer<GetDelegationTokenResponse>() {
              public GetDelegationTokenResponse answer(InvocationOnMock invocation) {
                GetDelegationTokenRequest request =
                    (GetDelegationTokenRequest) invocation.getArguments()[0];
                // check that the renewer matches the cluster's RM principal
                assertEquals(masterPrincipal, request.getRenewer());

                DelegationToken token = recordFactory.newRecordInstance(DelegationToken.class);
                // none of these fields matter for the sake of the test
                token.setKind("");
                token.setService("");
                token.setIdentifier(ByteBuffer.allocate(0));
                token.setPassword(ByteBuffer.allocate(0));
                GetDelegationTokenResponse tokenResponse =
                    recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
                tokenResponse.setDelegationToken(token);
                return tokenResponse;
              }
            });

    UserGroupInformation.createRemoteUser("someone")
        .doAs(
            new PrivilegedExceptionAction<Void>() {
              @Override
              public Void run() throws Exception {
                yarnRunner = new YARNRunner(conf, null, null);
                yarnRunner.getDelegationTokenFromHS(hsProxy);
                verify(hsProxy).getDelegationToken(any(GetDelegationTokenRequest.class));
                return null;
              }
            });
  }
Пример #18
0
 @Test(timeout = 20000)
 public void testJobSubmissionFailure() throws Exception {
   when(resourceMgrDelegate.submitApplication(any(ApplicationSubmissionContext.class)))
       .thenReturn(appId);
   ApplicationReport report = mock(ApplicationReport.class);
   when(report.getApplicationId()).thenReturn(appId);
   when(report.getDiagnostics()).thenReturn(failString);
   when(report.getYarnApplicationState()).thenReturn(YarnApplicationState.FAILED);
   when(resourceMgrDelegate.getApplicationReport(appId)).thenReturn(report);
   Credentials credentials = new Credentials();
   File jobxml = new File(testWorkDir, "job.xml");
   OutputStream out = new FileOutputStream(jobxml);
   conf.writeXml(out);
   out.close();
   try {
     yarnRunner.submitJob(jobId, testWorkDir.getAbsolutePath().toString(), credentials);
   } catch (IOException io) {
     LOG.info("Logging exception:", io);
     assertTrue(io.getLocalizedMessage().contains(failString));
   }
 }
Пример #19
0
  @Override
  protected void serviceInit(Configuration conf) throws Exception {
    String auth = conf.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);
    if (auth == null || "simple".equals(auth)) {
      isSecurityEnabled = false;
    } else if ("kerberos".equals(auth)) {
      isSecurityEnabled = true;
    } else {
      LOG.warn(
          "Unrecongized attribute value for "
              + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
              + " of "
              + auth);
    }
    String proxy = YarnConfiguration.getProxyHostAndPort(conf);
    String[] proxyParts = proxy.split(":");
    proxyHost = proxyParts[0];

    fetcher = new AppReportFetcher(conf);
    bindAddress = conf.get(YarnConfiguration.PROXY_ADDRESS);
    if (bindAddress == null || bindAddress.isEmpty()) {
      throw new YarnRuntimeException(
          YarnConfiguration.PROXY_ADDRESS + " is not set so the proxy will not run.");
    }
    LOG.info("Instantiating Proxy at " + bindAddress);
    String[] parts = StringUtils.split(bindAddress, ':');
    port = 0;
    if (parts.length == 2) {
      bindAddress = parts[0];
      port = Integer.parseInt(parts[1]);
    }
    acl =
        new AccessControlList(
            conf.get(YarnConfiguration.YARN_ADMIN_ACL, YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
    super.serviceInit(conf);
  }
Пример #20
0
  @Test
  public void testSuccessfulContainerLaunch()
      throws InterruptedException, IOException, YarnException {

    FileContext localFS = FileContext.getLocalFSFileContext();

    localFS.delete(new Path(localDir.getAbsolutePath()), true);
    localFS.delete(new Path(localLogDir.getAbsolutePath()), true);
    localFS.delete(new Path(remoteLogDir.getAbsolutePath()), true);
    localDir.mkdir();
    localLogDir.mkdir();
    remoteLogDir.mkdir();

    YarnConfiguration conf = new YarnConfiguration();

    Context context =
        new NMContext(
            new NMContainerTokenSecretManager(conf),
            new NMTokenSecretManagerInNM(),
            null,
            null,
            new NMNullStateStoreService()) {
          @Override
          public int getHttpPort() {
            return 1234;
          }
        };

    conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
    conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogDir.getAbsolutePath());

    ContainerExecutor exec = new DefaultContainerExecutor();
    exec.setConf(conf);

    DeletionService del = new DeletionService(exec);
    Dispatcher dispatcher = new AsyncDispatcher();
    LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
    NodeHealthCheckerService healthChecker =
        new NodeHealthCheckerService(NodeManager.getNodeHealthScriptRunner(conf), dirsHandler);
    healthChecker.init(conf);
    NodeManagerMetrics metrics = NodeManagerMetrics.create();
    NodeStatusUpdater nodeStatusUpdater =
        new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics) {
          @Override
          protected ResourceTracker getRMClient() {
            return new LocalRMInterface();
          };

          @Override
          protected void stopRMProxy() {
            return;
          }

          @Override
          protected void startStatusUpdater() {
            return; // Don't start any updating thread.
          }

          @Override
          public long getRMIdentifier() {
            return SIMULATED_RM_IDENTIFIER;
          }
        };

    DummyContainerManager containerManager =
        new DummyContainerManager(
            context,
            exec,
            del,
            nodeStatusUpdater,
            metrics,
            new ApplicationACLsManager(conf),
            dirsHandler);
    nodeStatusUpdater.init(conf);
    ((NMContext) context).setContainerManager(containerManager);
    nodeStatusUpdater.start();
    containerManager.init(conf);
    containerManager.start();

    ContainerLaunchContext launchContext =
        recordFactory.newRecordInstance(ContainerLaunchContext.class);
    ApplicationId applicationId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 0);
    ContainerId cID = ContainerId.newContainerId(applicationAttemptId, 0);

    String user = "******";
    StartContainerRequest scRequest =
        StartContainerRequest.newInstance(
            launchContext,
            TestContainerManager.createContainerToken(
                cID,
                SIMULATED_RM_IDENTIFIER,
                context.getNodeId(),
                user,
                context.getContainerTokenSecretManager()));
    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);

    BaseContainerManagerTest.waitForContainerState(containerManager, cID, ContainerState.RUNNING);

    List<ContainerId> containerIds = new ArrayList<ContainerId>();
    containerIds.add(cID);
    StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds);
    containerManager.stopContainers(stopRequest);
    BaseContainerManagerTest.waitForContainerState(containerManager, cID, ContainerState.COMPLETE);

    containerManager.stop();
  }
Пример #21
0
  public static void main(String[] args) throws Exception {
    System.err.println("Inside the AM!!!");
    LOG.info("Starting the AM!!!!");

    Options opts = new Options();
    opts.addOption(
        "app_attempt_id", true, "App Attempt ID. Not to be used " + "unless for testing purposes");

    CommandLine cl = new GnuParser().parse(opts, args);

    Map<String, String> envs = System.getenv();
    _appAttemptID = Records.newRecord(ApplicationAttemptId.class);
    if (cl.hasOption("app_attempt_id")) {
      String appIdStr = cl.getOptionValue("app_attempt_id", "");
      _appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr);
    } else if (envs.containsKey(ApplicationConstants.AM_CONTAINER_ID_ENV)) {
      ContainerId containerId =
          ConverterUtils.toContainerId(envs.get(ApplicationConstants.AM_CONTAINER_ID_ENV));
      _appAttemptID = containerId.getApplicationAttemptId();
    } else {
      throw new IllegalArgumentException(
          "Container and application attempt IDs not set in the environment");
    }

    @SuppressWarnings("rawtypes")
    Map storm_conf = Config.readStormConfig(null);
    YarnConfiguration hadoopConf = new YarnConfiguration();

    StormAMRMClient client = new StormAMRMClient(_appAttemptID, storm_conf, hadoopConf);
    client.init(hadoopConf);
    client.start();

    BlockingQueue<Container> launcherQueue = new LinkedBlockingQueue<Container>();

    try {
      InetSocketAddress addr =
          NetUtils.createSocketAddr(
              hadoopConf.get(
                  YarnConfiguration.RM_SCHEDULER_ADDRESS,
                  YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS));
      int port = Utils.getInt(storm_conf.get(Config.MASTER_THRIFT_PORT));
      RegisterApplicationMasterResponse resp =
          client.registerApplicationMaster(addr.getHostName(), port, null);
      LOG.info("Got a registration response " + resp);
      LOG.info("Max Capability " + resp.getMaximumResourceCapability());
      client.setMaxResource(resp.getMaximumResourceCapability());
      MasterServer server = new MasterServer(storm_conf, client);
      LOG.info("Starting HB thread");
      initAndStartHeartbeat(
          client, launcherQueue, (Integer) storm_conf.get(Config.MASTER_HEARTBEAT_INTERVAL_MILLIS));
      LOG.info("Starting launcher");
      initAndStartLauncher(client, launcherQueue);
      client.startAllSupervisors();
      server.serve();
      client.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, "AllDone", null);
    } finally {
      client.stop();
    }
    System.err.println("See Ya!!!");
    System.exit(0);
  }
 static {
   YarnConfiguration.addDefaultResource(YarnConfiguration.CS_CONFIGURATION_FILE);
 }
  @Test
  public void testRMInitialsWithFileSystemBasedConfigurationProvider() throws Exception {
    configuration.set(
        YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
        "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");

    // upload configurations
    final File excludeHostsFile = new File(tmpDir.toString(), "excludeHosts");
    if (excludeHostsFile.exists()) {
      excludeHostsFile.delete();
    }
    if (!excludeHostsFile.createNewFile()) {
      Assert.fail("Can not create " + "excludeHosts");
    }
    PrintWriter fileWriter = new PrintWriter(excludeHostsFile);
    fileWriter.write("0.0.0.0:123");
    fileWriter.close();
    uploadToRemoteFileSystem(new Path(excludeHostsFile.getAbsolutePath()));

    YarnConfiguration yarnConf = new YarnConfiguration();
    yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL, "world:anyone:rwcda");
    yarnConf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, this.workingPath + "/excludeHosts");
    uploadConfiguration(yarnConf, "yarn-site.xml");

    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    csConf.set("yarn.scheduler.capacity.maximum-applications", "5000");
    uploadConfiguration(csConf, "capacity-scheduler.xml");

    String aclsString = "alice,bob users,wheel";
    Configuration newConf = new Configuration();
    newConf.set("security.applicationclient.protocol.acl", aclsString);
    uploadConfiguration(newConf, "hadoop-policy.xml");

    Configuration conf = new Configuration();
    conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true);
    conf.set("hadoop.proxyuser.test.groups", "test_groups");
    conf.set("hadoop.proxyuser.test.hosts", "test_hosts");
    conf.setClass(
        CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
        MockUnixGroupsMapping.class,
        GroupMappingServiceProvider.class);
    uploadConfiguration(conf, "core-site.xml");

    // update the groups
    MockUnixGroupsMapping.updateGroups();

    ResourceManager resourceManager = null;
    try {
      try {
        resourceManager = new ResourceManager();
        resourceManager.init(configuration);
        resourceManager.start();
      } catch (Exception ex) {
        fail("Should not get any exceptions");
      }

      // validate values for excludeHosts
      Set<String> excludeHosts =
          resourceManager.getRMContext().getNodesListManager().getHostsReader().getExcludedHosts();
      Assert.assertTrue(excludeHosts.size() == 1);
      Assert.assertTrue(excludeHosts.contains("0.0.0.0:123"));

      // validate values for admin-acls
      String aclStringAfter =
          resourceManager.adminService.getAccessControlList().getAclString().trim();
      Assert.assertEquals(aclStringAfter, "world:anyone:rwcda");

      // validate values for queue configuration
      CapacityScheduler cs = (CapacityScheduler) resourceManager.getRMContext().getScheduler();
      int maxAppsAfter = cs.getConfiguration().getMaximumSystemApplications();
      Assert.assertEquals(maxAppsAfter, 5000);

      // verify service Acls for AdminService
      ServiceAuthorizationManager adminServiceServiceManager =
          resourceManager.adminService.getServer().getServiceAuthorizationManager();
      verifyServiceACLsRefresh(
          adminServiceServiceManager,
          org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
          aclsString);

      // verify service ACLs for ClientRMService
      ServiceAuthorizationManager clientRMServiceServiceManager =
          resourceManager
              .getRMContext()
              .getClientRMService()
              .getServer()
              .getServiceAuthorizationManager();
      verifyServiceACLsRefresh(
          clientRMServiceServiceManager,
          org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
          aclsString);

      // verify service ACLs for ApplicationMasterService
      ServiceAuthorizationManager appMasterService =
          resourceManager
              .getRMContext()
              .getApplicationMasterService()
              .getServer()
              .getServiceAuthorizationManager();
      verifyServiceACLsRefresh(
          appMasterService,
          org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,
          aclsString);

      // verify service ACLs for ResourceTrackerService
      ServiceAuthorizationManager RTService =
          resourceManager
              .getRMContext()
              .getResourceTrackerService()
              .getServer()
              .getServiceAuthorizationManager();
      verifyServiceACLsRefresh(
          RTService, org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class, aclsString);

      // verify ProxyUsers and ProxyHosts
      Assert.assertTrue(
          ProxyUsers.getProxyGroups().get("hadoop.proxyuser.test.groups").size() == 1);
      Assert.assertTrue(
          ProxyUsers.getProxyGroups().get("hadoop.proxyuser.test.groups").contains("test_groups"));

      Assert.assertTrue(ProxyUsers.getProxyHosts().get("hadoop.proxyuser.test.hosts").size() == 1);
      Assert.assertTrue(
          ProxyUsers.getProxyHosts().get("hadoop.proxyuser.test.hosts").contains("test_hosts"));

      // verify UserToGroupsMappings
      List<String> groupAfter =
          Groups.getUserToGroupsMappingService(configuration)
              .getGroups(UserGroupInformation.getCurrentUser().getUserName());
      Assert.assertTrue(
          groupAfter.contains("test_group_D")
              && groupAfter.contains("test_group_E")
              && groupAfter.contains("test_group_F")
              && groupAfter.size() == 3);
    } finally {
      if (resourceManager != null) {
        resourceManager.stop();
      }
    }
  }
  @Test
  public void testRMHAWithFileSystemBasedConfiguration() throws IOException, YarnException {
    StateChangeRequestInfo requestInfo =
        new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
    configuration.set(
        YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
        "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
    configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
    configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
    configuration.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2");
    int base = 100;
    for (String confKey : YarnConfiguration.getServiceAddressConfKeys(configuration)) {
      configuration.set(HAUtil.addSuffix(confKey, "rm1"), "0.0.0.0:" + (base + 20));
      configuration.set(HAUtil.addSuffix(confKey, "rm2"), "0.0.0.0:" + (base + 40));
      base = base * 2;
    }
    Configuration conf1 = new Configuration(configuration);
    conf1.set(YarnConfiguration.RM_HA_ID, "rm1");
    Configuration conf2 = new Configuration(configuration);
    conf2.set(YarnConfiguration.RM_HA_ID, "rm2");

    // upload default configurations
    uploadDefaultConfiguration();

    MockRM rm1 = null;
    MockRM rm2 = null;
    try {
      rm1 = new MockRM(conf1);
      rm1.init(conf1);
      rm1.start();
      Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.STANDBY);

      rm2 = new MockRM(conf2);
      rm2.init(conf1);
      rm2.start();
      Assert.assertTrue(rm2.getRMContext().getHAServiceState() == HAServiceState.STANDBY);

      rm1.adminService.transitionToActive(requestInfo);
      Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.ACTIVE);

      CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
      csConf.set("yarn.scheduler.capacity.maximum-applications", "5000");
      uploadConfiguration(csConf, "capacity-scheduler.xml");

      rm1.adminService.refreshQueues(RefreshQueuesRequest.newInstance());

      int maxApps =
          ((CapacityScheduler) rm1.getRMContext().getScheduler())
              .getConfiguration()
              .getMaximumSystemApplications();
      Assert.assertEquals(maxApps, 5000);

      // Before failover happens, the maxApps is
      // still the default value on the standby rm : rm2
      int maxAppsBeforeFailOver =
          ((CapacityScheduler) rm2.getRMContext().getScheduler())
              .getConfiguration()
              .getMaximumSystemApplications();
      Assert.assertEquals(maxAppsBeforeFailOver, 10000);

      // Do the failover
      rm1.adminService.transitionToStandby(requestInfo);
      rm2.adminService.transitionToActive(requestInfo);
      Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.STANDBY);
      Assert.assertTrue(rm2.getRMContext().getHAServiceState() == HAServiceState.ACTIVE);

      int maxAppsAfter =
          ((CapacityScheduler) rm2.getRMContext().getScheduler())
              .getConfiguration()
              .getMaximumSystemApplications();

      Assert.assertEquals(maxAppsAfter, 5000);
    } finally {
      if (rm1 != null) {
        rm1.stop();
      }
      if (rm2 != null) {
        rm2.stop();
      }
    }
  }
  @Test
  public void testNonLeaderKeyReception()
      throws InterruptedException, StorageInitializtionException, Exception {
    // create a groupMembershipService that will be leader
    RMContextImpl rmContext = new RMContextImpl();
    rmContext.setDistributedEnabled(true);
    rmContext.setHAEnabled(true);
    GroupMembershipService groupMembershipService = new GroupMembershipService(null, rmContext);
    groupMembershipService.init(conf);
    NMTokenSecretManagerInRM nmTokenSecretManager = new NMTokenSecretManagerInRM(conf, rmContext);
    // create a resrouce tracker
    conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
    conf.setBoolean(YarnConfiguration.DISTRIBUTED_RM, true);
    MockRM mockRM = new MockRM(conf);
    mockRM.init(conf);

    try {
      groupMembershipService.start();
      rmContext.setRMGroupMembershipService(groupMembershipService);
      rmContext.setStateStore(new NDBRMStateStore());

      while (!rmContext.isLeader()) {
        Thread.sleep(1000);
      }

      mockRM.start();

      if (mockRM.getRMContext().isDistributedEnabled() && !mockRM.getRMContext().isLeader()) {
        conf.set(
            YarnConfiguration.EVENT_RT_CONFIG_PATH, "target/test-classes/RT_EventAPIConfig.ini");
        NdbRtStreamingProcessor rtStreamingProcessor =
            new NdbRtStreamingProcessor(mockRM.getRMContext());
        RMStorageFactory.kickTheNdbEventStreamingAPI(false, conf);
        new Thread(rtStreamingProcessor).start();
      }

      // this should be a resource tracker not a scheduler
      assertFalse(mockRM.getRMContext().isLeader());

      // simulate creation of a token on the sheduler
      nmTokenSecretManager.start();

      assertNotNull("did not roll master key", nmTokenSecretManager.getCurrentKey());
      Thread.sleep(1000);
      dummyUpdate();
      Thread.sleep(1000);
      RMStateStore.RMState state = rmContext.getStateStore().loadState(rmContext);
      assertEquals(
          "key not persisted to the database",
          state.getSecretTokenMamagerKey(RMStateStore.KeyType.CURRENTNMTOKENMASTERKEY),
          nmTokenSecretManager.getCurrentKey());

      assertEquals(
          nmTokenSecretManager.getCurrentKey(),
          mockRM.getRMContext().getNMTokenSecretManager().getCurrentKey());
      assertEquals(
          nmTokenSecretManager.getNextKey(),
          mockRM.getRMContext().getNMTokenSecretManager().getNextKey());
    } finally {
      groupMembershipService.stop();
      mockRM.stop();
      nmTokenSecretManager.stop();
      DefaultMetricsSystem.shutdown();
    }
  }
Пример #26
0
  @Test(timeout = 30000)
  public void testExcessReservationThanNodeManagerCapacity() throws Exception {
    YarnConfiguration conf = new YarnConfiguration();
    YarnAPIStorageFactory.setConfiguration(conf);
    RMStorageFactory.setConfiguration(conf);
    MockRM rm = new MockRM(conf);
    try {
      rm.start();

      // Register node1
      MockNM nm1 = rm.registerNode("127.0.0.1:1234", 2 * GB, 4);
      MockNM nm2 = rm.registerNode("127.0.0.1:2234", 3 * GB, 4);

      nm1.nodeHeartbeat(true);
      nm2.nodeHeartbeat(true);
      // HOP :: Sleep to allow previous events to be processed
      Thread.sleep(
          conf.getInt(
                  YarnConfiguration.HOPS_PENDING_EVENTS_RETRIEVAL_PERIOD,
                  YarnConfiguration.DEFAULT_HOPS_PENDING_EVENTS_RETRIEVAL_PERIOD)
              * 2);
      // wait..
      int waitCount = 20;
      int size = rm.getRMContext().getActiveRMNodes().size();
      while ((size = rm.getRMContext().getActiveRMNodes().size()) != 2 && waitCount-- > 0) {
        LOG.info("Waiting for node managers to register : " + size);
        Thread.sleep(100);
      }
      Assert.assertEquals(2, rm.getRMContext().getActiveRMNodes().size());
      // Submit an application
      RMApp app1 = rm.submitApp(128);

      // kick the scheduling
      nm1.nodeHeartbeat(true);
      RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
      MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId(), nm1);
      am1.registerAppAttempt();

      LOG.info("sending container requests ");
      am1.addRequests(new String[] {"*"}, 3 * GB, 1, 1);
      AllocateResponse alloc1Response = am1.schedule(); // send the request

      // kick the scheduler
      nm1.nodeHeartbeat(true);
      int waitCounter = 20;
      LOG.info("heartbeating nm1");
      while (alloc1Response.getAllocatedContainers().size() < 1 && waitCounter-- > 0) {
        LOG.info("Waiting for containers to be created for app 1...");
        Thread.sleep(500);
        alloc1Response = am1.schedule();
      }
      LOG.info("received container : " + alloc1Response.getAllocatedContainers().size());

      // No container should be allocated.
      // Internally it should not been reserved.
      Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 0);

      LOG.info("heartbeating nm2");
      waitCounter = 20;
      nm2.nodeHeartbeat(true);
      while (alloc1Response.getAllocatedContainers().size() < 1 && waitCounter-- > 0) {
        LOG.info("Waiting for containers to be created for app 1...");
        Thread.sleep(500);
        alloc1Response = am1.schedule();
      }
      LOG.info("received container : " + alloc1Response.getAllocatedContainers().size());
      Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 1);
    } finally {
      rm.stop();
    }
  }
Пример #27
0
  @Test(timeout = 20000)
  public void testAMAdminCommandOpts() throws Exception {
    JobConf jobConf = new JobConf();

    jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true");
    jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m");

    YARNRunner yarnRunner = new YARNRunner(jobConf);

    File jobxml = new File(testWorkDir, MRJobConfig.JOB_CONF_FILE);
    OutputStream out = new FileOutputStream(jobxml);
    conf.writeXml(out);
    out.close();

    File jobsplit = new File(testWorkDir, MRJobConfig.JOB_SPLIT);
    out = new FileOutputStream(jobsplit);
    out.close();

    File jobsplitmetainfo = new File(testWorkDir, MRJobConfig.JOB_SPLIT_METAINFO);
    out = new FileOutputStream(jobsplitmetainfo);
    out.close();

    File appTokens = new File(testWorkDir, MRJobConfig.APPLICATION_TOKENS_FILE);
    out = new FileOutputStream(appTokens);
    out.close();

    ApplicationSubmissionContext submissionContext =
        yarnRunner.createApplicationSubmissionContext(
            jobConf, testWorkDir.toString(), new Credentials());

    ContainerLaunchContext containerSpec = submissionContext.getAMContainerSpec();
    List<String> commands = containerSpec.getCommands();

    int index = 0;
    int adminIndex = 0;
    int adminPos = -1;
    int userIndex = 0;
    int userPos = -1;

    for (String command : commands) {
      if (command != null) {
        adminPos = command.indexOf("-Djava.net.preferIPv4Stack=true");
        if (adminPos >= 0) adminIndex = index;

        userPos = command.indexOf("-Xmx1024m");
        if (userPos >= 0) userIndex = index;
      }

      index++;
    }

    // Check both admin java opts and user java opts are in the commands
    assertTrue("AM admin command opts not in the commands.", adminPos > 0);
    assertTrue("AM user command opts not in the commands.", userPos > 0);

    // Check the admin java opts is before user java opts in the commands
    if (adminIndex == userIndex) {
      assertTrue("AM admin command opts is after user command opts.", adminPos < userPos);
    } else {
      assertTrue("AM admin command opts is after user command opts.", adminIndex < userIndex);
    }
  }
Пример #28
0
  @Test
  public void testSuccessfulContainerLaunch() throws InterruptedException, IOException {

    FileContext localFS = FileContext.getLocalFSFileContext();

    localFS.delete(new Path(localDir.getAbsolutePath()), true);
    localFS.delete(new Path(localLogDir.getAbsolutePath()), true);
    localFS.delete(new Path(remoteLogDir.getAbsolutePath()), true);
    localDir.mkdir();
    localLogDir.mkdir();
    remoteLogDir.mkdir();

    Context context = new NMContext();

    YarnConfiguration conf = new YarnConfiguration();
    conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
    conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogDir.getAbsolutePath());

    ContainerExecutor exec = new DefaultContainerExecutor();
    exec.setConf(conf);

    DeletionService del = new DeletionService(exec);
    Dispatcher dispatcher = new AsyncDispatcher();
    NodeHealthCheckerService healthChecker = new NodeHealthCheckerService();
    healthChecker.init(conf);
    LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
    NodeManagerMetrics metrics = NodeManagerMetrics.create();
    ContainerTokenSecretManager containerTokenSecretManager = new ContainerTokenSecretManager();
    NodeStatusUpdater nodeStatusUpdater =
        new NodeStatusUpdaterImpl(
            context, dispatcher, healthChecker, metrics, containerTokenSecretManager) {
          @Override
          protected ResourceTracker getRMClient() {
            return new LocalRMInterface();
          };

          @Override
          protected void startStatusUpdater() {
            return; // Don't start any updating thread.
          }
        };

    DummyContainerManager containerManager =
        new DummyContainerManager(
            context,
            exec,
            del,
            nodeStatusUpdater,
            metrics,
            containerTokenSecretManager,
            new ApplicationACLsManager(conf),
            dirsHandler);
    containerManager.init(conf);
    containerManager.start();

    ContainerLaunchContext launchContext =
        recordFactory.newRecordInstance(ContainerLaunchContext.class);
    ContainerId cID = recordFactory.newRecordInstance(ContainerId.class);
    ApplicationId applicationId = recordFactory.newRecordInstance(ApplicationId.class);
    applicationId.setClusterTimestamp(0);
    applicationId.setId(0);
    ApplicationAttemptId applicationAttemptId =
        recordFactory.newRecordInstance(ApplicationAttemptId.class);
    applicationAttemptId.setApplicationId(applicationId);
    applicationAttemptId.setAttemptId(0);
    cID.setApplicationAttemptId(applicationAttemptId);
    launchContext.setContainerId(cID);
    launchContext.setUser("testing");
    launchContext.setResource(recordFactory.newRecordInstance(Resource.class));
    StartContainerRequest request = recordFactory.newRecordInstance(StartContainerRequest.class);
    request.setContainerLaunchContext(launchContext);
    containerManager.startContainer(request);

    BaseContainerManagerTest.waitForContainerState(containerManager, cID, ContainerState.RUNNING);

    StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class);
    stopRequest.setContainerId(cID);
    containerManager.stopContainer(stopRequest);
    BaseContainerManagerTest.waitForContainerState(containerManager, cID, ContainerState.COMPLETE);

    containerManager.stop();
  }
 @Before
 public void setUp() throws Exception {
   conf = new YarnConfiguration();
   conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
 }