@Override public void start() { // / Create the AMInfo for the current AppMaster if (amInfos == null) { amInfos = new LinkedList<AMInfo>(); } AMInfo amInfo = DragonBuilderUtils.newAMInfo( appAttemptId, startTime, containerID, nmHost, nmPort, nmHttpPort); amInfos.add(amInfo); job = createJob(getConfig()); // metrics system init is really init & start. // It's more test friendly to put it here. DefaultMetricsSystem.initialize("DragonAppMaster"); // create a job event for job intialization JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT); // Send init to the job (this does NOT trigger job execution) // This is a synchronous call, not an event through dispatcher. We want // job-init to be done completely here. jobEventDispatcher.handle(initJobEvent); super.start(); // All components have started, start the job. startJobs(); }
/** Start listening for edits via RPC. */ public void start() throws IOException { Preconditions.checkState(!isStarted(), "JN already running"); validateAndCreateJournalDir(localDir); DefaultMetricsSystem.initialize("JournalNode"); JvmMetrics.create( "JournalNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf); SecurityUtil.login( conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_USER_NAME_KEY, socAddr.getHostName()); registerJNMXBean(); httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.start(); }
@SuppressWarnings("unchecked") @Override public void serviceStart() throws Exception { // start all the components startServices(); super.serviceStart(); this.state = DAGAppMasterState.IDLE; // metrics system init is really init & start. // It's more test friendly to put it here. DefaultMetricsSystem.initialize("DAGAppMaster"); this.appsStartTime = clock.getTime(); AMStartedEvent startEvent = new AMStartedEvent(appAttemptID, startTime, appsStartTime, appSubmitTime); dispatcher.getEventHandler().handle(new DAGHistoryEvent(startEvent)); this.lastDAGCompletionTime = clock.getTime(); if (!isSession) { startDAG(); } else { LOG.info("In Session mode. Waiting for DAG over RPC"); this.dagSubmissionTimer = new Timer(true); this.dagSubmissionTimer.scheduleAtFixedRate( new TimerTask() { @Override public void run() { checkAndHandleSessionTimeout(); } }, sessionTimeoutInterval, sessionTimeoutInterval / 10); } }
public static NameNode createNameNode(String argv[], Configuration conf) throws IOException { if (conf == null) { conf = new HdfsConfiguration(); } StartupOption startOpt = parseArguments(argv); if (startOpt == null) { printUsage(System.err); return null; } setStartupOption(conf, startOpt); switch (startOpt) { // HOP case DROP_AND_CREATE_DB: { // delete everything other than inode and blocks table. this is tmp fix for safe mode dropAndCreateDB(conf); return null; } case FORMAT: { boolean aborted = format(conf, startOpt.getForceFormat(), startOpt.getInteractiveFormat()); terminate(aborted ? 1 : 0); return null; // avoid javac warning } case GENCLUSTERID: { System.err.println("Generating new cluster id:"); System.out.println(StorageInfo.newClusterID()); terminate(0); return null; } case FINALIZE: { throw new UnsupportedOperationException("HOP: FINALIZE is not supported anymore"); } case BOOTSTRAPSTANDBY: { throw new UnsupportedOperationException("HOP: BOOTSTRAPSTANDBY is not supported anymore"); } case INITIALIZESHAREDEDITS: { throw new UnsupportedOperationException( "HOP: INITIALIZESHAREDEDITS is not supported anymore"); } case BACKUP: case CHECKPOINT: { throw new UnsupportedOperationException( "HOP: BACKUP/CHECKPOINT is not supported anymore"); } case RECOVER: { new UnsupportedOperationException("Hops. Metadata recovery is not supported"); return null; } default: { DefaultMetricsSystem.initialize("NameNode"); return new NameNode(conf); } } }