private ISoftwareManagementTask createExternalTask( ChunkContext chunkContext, String targetName, String clusterName, StatusUpdater statusUpdater) { ISoftwareManagementTask task; SoftwareManager softwareMgr = softwareMgrs.getSoftwareManagerByClusterName(clusterName); ClusterBlueprint clusterBlueprint = getFromJobExecutionContext( chunkContext, JobConstants.CLUSTER_BLUEPRINT_JOB_PARAM, ClusterBlueprint.class); if (clusterBlueprint == null) { clusterBlueprint = lockClusterEntityMgr.getClusterEntityMgr().toClusterBluePrint(clusterName); putIntoJobExecutionContext( chunkContext, JobConstants.CLUSTER_BLUEPRINT_JOB_PARAM, clusterBlueprint); } task = SoftwareManagementTaskFactory.createExternalMgtTask( targetName, managementOperation, clusterBlueprint, statusUpdater, lockClusterEntityMgr, softwareMgr, chunkContext); return task; }
@Override public RepeatStatus executeStep( ChunkContext chunkContext, JobExecutionStatusHolder jobExecutionStatusHolder) throws Exception { // This step is only for app manager like ClouderaMgr and Ambari String clusterName = getJobParameters(chunkContext).getString(JobConstants.CLUSTER_NAME_JOB_PARAM); SoftwareManager softwareMgr = softwareMgrs.getSoftwareManagerByClusterName(clusterName); String appMgrName = softwareMgr.getName(); if (Constants.IRONFAN.equals(appMgrName)) { // we do not config any local repo for Ironfan return RepeatStatus.FINISHED; } ClusterCreate clusterConfig = clusterManager.getClusterConfigMgr().getClusterConfig(clusterName); String localRepoURL = clusterConfig.getLocalRepoURL(); logger.info("Use the following URL as the local yum server:" + localRepoURL); if (!CommonUtil.isBlank(localRepoURL)) { // Setup local repo file on each node for ClouderaMgr/Ambari. logger.info( "ConfigLocalRepoStep: start to setup local repo on each node for ClouderaMgr/Ambari."); List<NodeEntity> nodes = getNodesToBeSetLocalRepo(chunkContext, clusterName); String appMgrRepoID = Configuration.getString( Constants.SERENGETI_NODE_YUM_CLOUDERA_MANAGER_REPO_ID, Constants.NODE_APPMANAGER_YUM_CLOUDERA_MANAGER_REPO_ID); if (appMgrName.equals(Constants.AMBARI_PLUGIN_TYPE)) { appMgrRepoID = Configuration.getString( Constants.SERENGETI_NODE_YUM_AMBARI_REPO_ID, Constants.NODE_APPMANAGER_YUM_AMBARI_REPO_ID); } setLocalRepoService.setLocalRepoForNodes(clusterName, nodes, appMgrRepoID, localRepoURL); } return RepeatStatus.FINISHED; }
@Override public RepeatStatus executeStep( ChunkContext chunkContext, JobExecutionStatusHolder jobExecutionStatusHolder) throws Exception { String targetName = getJobParameters(chunkContext).getString(JobConstants.TARGET_NAME_JOB_PARAM); String clusterName = getJobParameters(chunkContext).getString(JobConstants.CLUSTER_NAME_JOB_PARAM); if (targetName == null) { targetName = clusterName; } String jobName = chunkContext.getStepContext().getJobName(); logger.info( "target : " + targetName + ", operation: " + managementOperation + ", jobname: " + jobName); serviceSyncup.syncUp(clusterName); logger.debug("Try to sync up service status for cluster " + clusterName); boolean vmPowerOn = false; String vmPowerOnStr = getJobParameters(chunkContext).getString(JobConstants.IS_VM_POWER_ON); if (vmPowerOnStr != null) { logger.info("vm original status is power on? " + vmPowerOnStr); vmPowerOn = Boolean.parseBoolean(vmPowerOnStr); } if (checkVMStatus && targetName.split("-").length == 3 && !vmPowerOn) { return RepeatStatus.FINISHED; } // Only check host time for cluster config, disk fix, scale up (management // operation configure), start (management operation start) and create // (resume only) SoftwareManager softwareMgr = null; try { softwareMgr = softwareMgrs.getSoftwareManagerByClusterName(clusterName); } catch (SoftwareManagerCollectorException e) { if (ManagementOperation.PRE_DESTROY.equals(managementOperation) || ManagementOperation.DESTROY.equals(managementOperation)) { return RepeatStatus.FINISHED; } throw e; } if (ManagementOperation.CONFIGURE.equals(managementOperation) || ManagementOperation.START.equals(managementOperation) || JobConstants.RESUME_CLUSTER_JOB_NAME.equals(jobName)) { logger.info("Start to check host time."); List<NodeEntity> nodes = lockClusterEntityMgr.getClusterEntityMgr().findAllNodes(clusterName); Set<String> hostnames = new HashSet<String>(); for (NodeEntity node : nodes) { // for software operation, we can only handle VMs who are already VM_READY // Add this filter to tolerate some vm failures in cluster start boolean force = JobUtils.getJobParameterForceClusterOperation(chunkContext); if (force && (node.getStatus() != NodeStatus.VM_READY)) { continue; } hostnames.add(node.getHostName()); } ClusterCreate clusterSpec = clusterManager.getClusterSpec(clusterName); SyncHostsUtils.SyncHosts(clusterSpec, hostnames, softwareMgr); } StatusUpdater statusUpdater = new DefaultStatusUpdater(jobExecutionStatusHolder, getJobExecutionId(chunkContext)); ISoftwareManagementTask task = null; String appMgrName = softwareMgr.getName(); validateUserExistense(); if (!Constants.IRONFAN.equals(appMgrName)) { task = createExternalTask(chunkContext, targetName, clusterName, statusUpdater); } else { task = createThriftTask(chunkContext, targetName, statusUpdater); } if (task != null) { Map<String, Object> ret = task.call(); if (!(Boolean) ret.get("succeed")) { String errorMessage = (String) ret.get("errorMessage"); putIntoJobExecutionContext(chunkContext, JobConstants.CURRENT_ERROR_MESSAGE, errorMessage); putIntoJobExecutionContext(chunkContext, JobConstants.SOFTWARE_MANAGEMENT_STEP_FAILE, true); throw TaskException.EXECUTION_FAILED(errorMessage); } } return RepeatStatus.FINISHED; }