コード例 #1
0
 /*
  * (non-Javadoc)
  *
  * @see com.impetus.ankush2.hadoop.deployer.configurator.HadoopConfigurator#
  * configureNode(com.impetus.ankush2.framework.config.NodeConfig)
  */
 @Override
 public boolean configureNode(NodeConfig nodeConfig) {
   return configureEnvironmentVariables(nodeConfig, false)
       && configureSiteXmlFiles(nodeConfig)
       && configureSlavesFile(nodeConfig, HadoopUtils.getSlaveHosts(this.compConfig))
       && configureGangliaMetrics(nodeConfig);
 }
コード例 #2
0
  private boolean manageYarnServices(String action) {
    boolean status = true;
    try {
      HadoopServiceManager hadoopServiceManager =
          HadoopUtils.getServiceManagerInstance(this.clusterConfig, this.compConfig);

      // Manage ResourceManager Process
      if (!hadoopServiceManager.manageServiceOnNode(
          HadoopUtils.getResourceManagerHost(this.compConfig),
          HadoopConstants.Roles.RESOURCEMANAGER,
          action)) {
        status = false;
      }

      // Manage NodeManager Process on each Slave Node
      if (!hadoopServiceManager.manageServiceOnNodes(
          HadoopUtils.getSlaveHosts(this.compConfig), HadoopConstants.Roles.NODEMANAGER, action)) {
        status = false;
      }

      boolean webAppProxyServerEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(
                  HadoopConstants.AdvanceConfKeys.WEBAPPPROXYSERVER_ENABLED);

      if (webAppProxyServerEnabled) {
        if (!hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getWebAppProxyServerHost(this.compConfig),
            HadoopConstants.Roles.WEBAPPPROXYSERVER,
            action)) {
          status = false;
        }
      }

      boolean jobHistoryServerEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(
                  HadoopConstants.AdvanceConfKeys.JOBHISTORYSERVER_ENABLED);
      if (jobHistoryServerEnabled) {
        if (!hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getJobHistoryServerHost(this.compConfig),
            HadoopConstants.Roles.JOBHISTORYSERVER,
            action)) {
          status = false;
        }
      }
    } catch (Exception e) {
      HadoopUtils.addAndLogError(
          LOG,
          clusterConfig,
          "Could not " + action + " Hadoop YARN services",
          Constant.Component.Name.HADOOP,
          e);
      status = false;
    }
    return status;
  }
コード例 #3
0
 @Override
 protected void addNodeRolesToMap(NodeRolesMap nodeRolesMap) {
   nodeRolesMap.put(
       this.compConfig.getAdvanceConfStringProperty(HadoopConstants.AdvanceConfKeys.NAMENODE),
       HadoopConstants.Roles.NAMENODE);
   nodeRolesMap.put(
       this.compConfig.getAdvanceConfStringProperty(
           HadoopConstants.AdvanceConfKeys.RESOURCE_MANAGER),
       HadoopConstants.Roles.RESOURCEMANAGER);
   Set<String> slavesList = HadoopUtils.getSlaveHosts(compConfig);
   for (String slave : slavesList) {
     nodeRolesMap.put(slave, HadoopConstants.Roles.DATANODE);
     nodeRolesMap.put(slave, HadoopConstants.Roles.NODEMANAGER);
   }
 }
コード例 #4
0
  private boolean manageHdfsServices(String action) {
    boolean status = true;
    try {
      HadoopServiceManager hadoopServiceManager =
          HadoopUtils.getServiceManagerInstance(this.clusterConfig, this.compConfig);

      boolean isHaEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(HadoopConstants.AdvanceConfKeys.HA_ENABLED);

      if (!hadoopServiceManager.manageServiceOnNodes(
          HadoopUtils.getJournalNodeHosts(this.compConfig),
          HadoopConstants.Roles.JOURNALNODE,
          action)) {
        status = false;
      }

      if (isHaEnabled) {
        boolean isAutomaticFailoverEnabled =
            (Boolean)
                this.compConfig.getAdvanceConfProperty(
                    HadoopConstants.AdvanceConfKeys.HA_AUTOMATIC_FAILOVER_ENABLED);

        if (!hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getActiveNameNodeHost(this.compConfig),
            HadoopConstants.Roles.NAMENODE,
            action)) {
          status = false;
        }

        if (!hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getStandByNameNodeHost(this.compConfig),
            HadoopConstants.Roles.NAMENODE,
            action)) {
          status = false;
        }

        if (isAutomaticFailoverEnabled) {
          if (!hadoopServiceManager.manageServiceOnNodes(
              HadoopUtils.getHaNameNodeHosts(this.compConfig),
              HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER,
              action)) {

            status = false;
          }
        }
      } else {
        if (hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getNameNodeHost(this.compConfig), HadoopConstants.Roles.NAMENODE, action)) {
          status = false;
        }
      }

      if (HadoopUtils.getSecondaryNameNodeHost(this.compConfig) != null) {
        if (!hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getSecondaryNameNodeHost(this.compConfig),
            HadoopConstants.Roles.SECONDARYNAMENODE,
            action)) {
          status = false;
        }
      }

      // Start DataNode Process on each Slave Node
      if (!hadoopServiceManager.manageServiceOnNodes(
          HadoopUtils.getSlaveHosts(this.compConfig), HadoopConstants.Roles.DATANODE, action)) {
        status = false;
      }
    } catch (Exception e) {
      HadoopUtils.addAndLogError(
          LOG,
          clusterConfig,
          "Could not format NameNode and start Hadoop services",
          Constant.Component.Name.HADOOP,
          e);
      status = false;
    }
    return status;
  }
コード例 #5
0
  private boolean initializeAndStartHdfs() {
    try {
      HadoopCommandsManager hadoopCmdManager =
          HadoopUtils.getCommandsManagerInstance(this.clusterConfig, this.compConfig);
      HadoopServiceManager hadoopServiceManager =
          HadoopUtils.getServiceManagerInstance(this.clusterConfig, this.compConfig);

      boolean isHaEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(HadoopConstants.AdvanceConfKeys.HA_ENABLED);

      if (isHaEnabled) {
        if (!hadoopServiceManager.manageServiceOnNodes(
            HadoopUtils.getJournalNodeHosts(this.compConfig),
            HadoopConstants.Roles.JOURNALNODE,
            HadoopConstants.Command.Action.START)) {
          return false;
        }
      }

      // Format NameNode and return false if format command fails
      if (!hadoopCmdManager.formatNameNode()) {
        return false;
      }

      hadoopServiceManager.manageServiceOnNode(
          HadoopUtils.getNameNodeHost(this.compConfig),
          HadoopConstants.Roles.NAMENODE,
          HadoopConstants.Command.Action.START);

      boolean isAutomaticFailoverEnabled = false;
      if (isHaEnabled) {
        Hadoop2CommandsManager hadoop2CmdManager = (Hadoop2CommandsManager) hadoopCmdManager;
        isAutomaticFailoverEnabled =
            (Boolean)
                this.compConfig.getAdvanceConfProperty(
                    HadoopConstants.AdvanceConfKeys.HA_AUTOMATIC_FAILOVER_ENABLED);

        hadoop2CmdManager.initializeStandByNameNode();

        hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getStandByNameNodeHost(this.compConfig),
            HadoopConstants.Roles.NAMENODE,
            HadoopConstants.Command.Action.START);

        if (isAutomaticFailoverEnabled) {

          hadoop2CmdManager.initializeHAInZooKeeper();
          hadoopServiceManager.manageServiceOnNode(
              HadoopUtils.getActiveNameNodeHost(this.compConfig),
              HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER,
              HadoopConstants.Command.Action.START);

          hadoopServiceManager.manageServiceOnNode(
              HadoopUtils.getStandByNameNodeHost(this.compConfig),
              HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER,
              HadoopConstants.Command.Action.START);
        }
      } else if (HadoopUtils.getSecondaryNameNodeHost(this.compConfig) != null) {
        hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getSecondaryNameNodeHost(this.compConfig),
            HadoopConstants.Roles.SECONDARYNAMENODE,
            HadoopConstants.Command.Action.START);
      }

      // Start DataNode Process on each Slave Node
      hadoopServiceManager.manageServiceOnNodes(
          HadoopUtils.getSlaveHosts(this.compConfig),
          HadoopConstants.Roles.DATANODE,
          HadoopConstants.Command.Action.START);

      return true;
    } catch (Exception e) {
      HadoopUtils.addAndLogError(
          LOG,
          clusterConfig,
          "Could not start Hadoop HDFS services",
          Constant.Component.Name.HADOOP,
          e);
      return false;
    }
  }