private boolean manageYarnServices(String action) { boolean status = true; try { HadoopServiceManager hadoopServiceManager = HadoopUtils.getServiceManagerInstance(this.clusterConfig, this.compConfig); // Manage ResourceManager Process if (!hadoopServiceManager.manageServiceOnNode( HadoopUtils.getResourceManagerHost(this.compConfig), HadoopConstants.Roles.RESOURCEMANAGER, action)) { status = false; } // Manage NodeManager Process on each Slave Node if (!hadoopServiceManager.manageServiceOnNodes( HadoopUtils.getSlaveHosts(this.compConfig), HadoopConstants.Roles.NODEMANAGER, action)) { status = false; } boolean webAppProxyServerEnabled = (Boolean) this.compConfig.getAdvanceConfProperty( HadoopConstants.AdvanceConfKeys.WEBAPPPROXYSERVER_ENABLED); if (webAppProxyServerEnabled) { if (!hadoopServiceManager.manageServiceOnNode( HadoopUtils.getWebAppProxyServerHost(this.compConfig), HadoopConstants.Roles.WEBAPPPROXYSERVER, action)) { status = false; } } boolean jobHistoryServerEnabled = (Boolean) this.compConfig.getAdvanceConfProperty( HadoopConstants.AdvanceConfKeys.JOBHISTORYSERVER_ENABLED); if (jobHistoryServerEnabled) { if (!hadoopServiceManager.manageServiceOnNode( HadoopUtils.getJobHistoryServerHost(this.compConfig), HadoopConstants.Roles.JOBHISTORYSERVER, action)) { status = false; } } } catch (Exception e) { HadoopUtils.addAndLogError( LOG, clusterConfig, "Could not " + action + " Hadoop YARN services", Constant.Component.Name.HADOOP, e); status = false; } return status; }
private boolean manageHdfsServices(String action) { boolean status = true; try { HadoopServiceManager hadoopServiceManager = HadoopUtils.getServiceManagerInstance(this.clusterConfig, this.compConfig); boolean isHaEnabled = (Boolean) this.compConfig.getAdvanceConfProperty(HadoopConstants.AdvanceConfKeys.HA_ENABLED); if (!hadoopServiceManager.manageServiceOnNodes( HadoopUtils.getJournalNodeHosts(this.compConfig), HadoopConstants.Roles.JOURNALNODE, action)) { status = false; } if (isHaEnabled) { boolean isAutomaticFailoverEnabled = (Boolean) this.compConfig.getAdvanceConfProperty( HadoopConstants.AdvanceConfKeys.HA_AUTOMATIC_FAILOVER_ENABLED); if (!hadoopServiceManager.manageServiceOnNode( HadoopUtils.getActiveNameNodeHost(this.compConfig), HadoopConstants.Roles.NAMENODE, action)) { status = false; } if (!hadoopServiceManager.manageServiceOnNode( HadoopUtils.getStandByNameNodeHost(this.compConfig), HadoopConstants.Roles.NAMENODE, action)) { status = false; } if (isAutomaticFailoverEnabled) { if (!hadoopServiceManager.manageServiceOnNodes( HadoopUtils.getHaNameNodeHosts(this.compConfig), HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER, action)) { status = false; } } } else { if (hadoopServiceManager.manageServiceOnNode( HadoopUtils.getNameNodeHost(this.compConfig), HadoopConstants.Roles.NAMENODE, action)) { status = false; } } if (HadoopUtils.getSecondaryNameNodeHost(this.compConfig) != null) { if (!hadoopServiceManager.manageServiceOnNode( HadoopUtils.getSecondaryNameNodeHost(this.compConfig), HadoopConstants.Roles.SECONDARYNAMENODE, action)) { status = false; } } // Start DataNode Process on each Slave Node if (!hadoopServiceManager.manageServiceOnNodes( HadoopUtils.getSlaveHosts(this.compConfig), HadoopConstants.Roles.DATANODE, action)) { status = false; } } catch (Exception e) { HadoopUtils.addAndLogError( LOG, clusterConfig, "Could not format NameNode and start Hadoop services", Constant.Component.Name.HADOOP, e); status = false; } return status; }
private boolean initializeAndStartHdfs() { try { HadoopCommandsManager hadoopCmdManager = HadoopUtils.getCommandsManagerInstance(this.clusterConfig, this.compConfig); HadoopServiceManager hadoopServiceManager = HadoopUtils.getServiceManagerInstance(this.clusterConfig, this.compConfig); boolean isHaEnabled = (Boolean) this.compConfig.getAdvanceConfProperty(HadoopConstants.AdvanceConfKeys.HA_ENABLED); if (isHaEnabled) { if (!hadoopServiceManager.manageServiceOnNodes( HadoopUtils.getJournalNodeHosts(this.compConfig), HadoopConstants.Roles.JOURNALNODE, HadoopConstants.Command.Action.START)) { return false; } } // Format NameNode and return false if format command fails if (!hadoopCmdManager.formatNameNode()) { return false; } hadoopServiceManager.manageServiceOnNode( HadoopUtils.getNameNodeHost(this.compConfig), HadoopConstants.Roles.NAMENODE, HadoopConstants.Command.Action.START); boolean isAutomaticFailoverEnabled = false; if (isHaEnabled) { Hadoop2CommandsManager hadoop2CmdManager = (Hadoop2CommandsManager) hadoopCmdManager; isAutomaticFailoverEnabled = (Boolean) this.compConfig.getAdvanceConfProperty( HadoopConstants.AdvanceConfKeys.HA_AUTOMATIC_FAILOVER_ENABLED); hadoop2CmdManager.initializeStandByNameNode(); hadoopServiceManager.manageServiceOnNode( HadoopUtils.getStandByNameNodeHost(this.compConfig), HadoopConstants.Roles.NAMENODE, HadoopConstants.Command.Action.START); if (isAutomaticFailoverEnabled) { hadoop2CmdManager.initializeHAInZooKeeper(); hadoopServiceManager.manageServiceOnNode( HadoopUtils.getActiveNameNodeHost(this.compConfig), HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER, HadoopConstants.Command.Action.START); hadoopServiceManager.manageServiceOnNode( HadoopUtils.getStandByNameNodeHost(this.compConfig), HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER, HadoopConstants.Command.Action.START); } } else if (HadoopUtils.getSecondaryNameNodeHost(this.compConfig) != null) { hadoopServiceManager.manageServiceOnNode( HadoopUtils.getSecondaryNameNodeHost(this.compConfig), HadoopConstants.Roles.SECONDARYNAMENODE, HadoopConstants.Command.Action.START); } // Start DataNode Process on each Slave Node hadoopServiceManager.manageServiceOnNodes( HadoopUtils.getSlaveHosts(this.compConfig), HadoopConstants.Roles.DATANODE, HadoopConstants.Command.Action.START); return true; } catch (Exception e) { HadoopUtils.addAndLogError( LOG, clusterConfig, "Could not start Hadoop HDFS services", Constant.Component.Name.HADOOP, e); return false; } }