Ejemplo n.º 1
0
  @Override
  public boolean setupPasswordlessSSH(Set<String> nodes) {
    try {
      LOG.info(
          "Configuring Passwordless SSH for Hadoop 2.x cluster", Constant.Component.Name.HADOOP);

      if (!this.generateRsaKeysForHadoopNodes(nodes)) {
        return false;
      }

      Set<String> sourceHosts = new HashSet<String>();
      sourceHosts.add(HadoopUtils.getNameNodeHost(this.compConfig));
      sourceHosts.add(HadoopUtils.getResourceManagerHost(this.compConfig));

      boolean isHaEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(HadoopConstants.AdvanceConfKeys.HA_ENABLED);

      if (isHaEnabled) {
        sourceHosts.add(HadoopUtils.getActiveNameNodeHost(this.compConfig));
        sourceHosts.add(HadoopUtils.getStandByNameNodeHost(this.compConfig));
      }

      for (String sourceHost : sourceHosts) {
        if (this.clusterConfig.getNodes().containsKey(sourceHost)) {
          if (!HadoopUtils.setupPasswordlessSSH(LOG, this.clusterConfig, sourceHost, nodes)) {
            return false;
          }
        } else {
          SSHExec connection = SSHUtils.connectToNode(sourceHost, this.clusterConfig.getAuthConf());
          if (!HadoopUtils.setupPasswordlessSSH(
              LOG, this.clusterConfig, sourceHost, nodes, connection)) {
            return false;
          }
          if (connection != null) {
            connection.disconnect();
          }
        }
      }

      return true;
    } catch (Exception e) {
      HadoopUtils.addAndLogError(
          LOG,
          clusterConfig,
          "Could not configure passwordless SSH for Hadoop",
          Constant.Component.Name.HADOOP,
          e);
      return false;
    }
  }
Ejemplo n.º 2
0
  private boolean manageHdfsServices(String action) {
    boolean status = true;
    try {
      HadoopServiceManager hadoopServiceManager =
          HadoopUtils.getServiceManagerInstance(this.clusterConfig, this.compConfig);

      boolean isHaEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(HadoopConstants.AdvanceConfKeys.HA_ENABLED);

      if (!hadoopServiceManager.manageServiceOnNodes(
          HadoopUtils.getJournalNodeHosts(this.compConfig),
          HadoopConstants.Roles.JOURNALNODE,
          action)) {
        status = false;
      }

      if (isHaEnabled) {
        boolean isAutomaticFailoverEnabled =
            (Boolean)
                this.compConfig.getAdvanceConfProperty(
                    HadoopConstants.AdvanceConfKeys.HA_AUTOMATIC_FAILOVER_ENABLED);

        if (!hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getActiveNameNodeHost(this.compConfig),
            HadoopConstants.Roles.NAMENODE,
            action)) {
          status = false;
        }

        if (!hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getStandByNameNodeHost(this.compConfig),
            HadoopConstants.Roles.NAMENODE,
            action)) {
          status = false;
        }

        if (isAutomaticFailoverEnabled) {
          if (!hadoopServiceManager.manageServiceOnNodes(
              HadoopUtils.getHaNameNodeHosts(this.compConfig),
              HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER,
              action)) {

            status = false;
          }
        }
      } else {
        if (hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getNameNodeHost(this.compConfig), HadoopConstants.Roles.NAMENODE, action)) {
          status = false;
        }
      }

      if (HadoopUtils.getSecondaryNameNodeHost(this.compConfig) != null) {
        if (!hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getSecondaryNameNodeHost(this.compConfig),
            HadoopConstants.Roles.SECONDARYNAMENODE,
            action)) {
          status = false;
        }
      }

      // Start DataNode Process on each Slave Node
      if (!hadoopServiceManager.manageServiceOnNodes(
          HadoopUtils.getSlaveHosts(this.compConfig), HadoopConstants.Roles.DATANODE, action)) {
        status = false;
      }
    } catch (Exception e) {
      HadoopUtils.addAndLogError(
          LOG,
          clusterConfig,
          "Could not format NameNode and start Hadoop services",
          Constant.Component.Name.HADOOP,
          e);
      status = false;
    }
    return status;
  }
Ejemplo n.º 3
0
  private boolean initializeAndStartHdfs() {
    try {
      HadoopCommandsManager hadoopCmdManager =
          HadoopUtils.getCommandsManagerInstance(this.clusterConfig, this.compConfig);
      HadoopServiceManager hadoopServiceManager =
          HadoopUtils.getServiceManagerInstance(this.clusterConfig, this.compConfig);

      boolean isHaEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(HadoopConstants.AdvanceConfKeys.HA_ENABLED);

      if (isHaEnabled) {
        if (!hadoopServiceManager.manageServiceOnNodes(
            HadoopUtils.getJournalNodeHosts(this.compConfig),
            HadoopConstants.Roles.JOURNALNODE,
            HadoopConstants.Command.Action.START)) {
          return false;
        }
      }

      // Format NameNode and return false if format command fails
      if (!hadoopCmdManager.formatNameNode()) {
        return false;
      }

      hadoopServiceManager.manageServiceOnNode(
          HadoopUtils.getNameNodeHost(this.compConfig),
          HadoopConstants.Roles.NAMENODE,
          HadoopConstants.Command.Action.START);

      boolean isAutomaticFailoverEnabled = false;
      if (isHaEnabled) {
        Hadoop2CommandsManager hadoop2CmdManager = (Hadoop2CommandsManager) hadoopCmdManager;
        isAutomaticFailoverEnabled =
            (Boolean)
                this.compConfig.getAdvanceConfProperty(
                    HadoopConstants.AdvanceConfKeys.HA_AUTOMATIC_FAILOVER_ENABLED);

        hadoop2CmdManager.initializeStandByNameNode();

        hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getStandByNameNodeHost(this.compConfig),
            HadoopConstants.Roles.NAMENODE,
            HadoopConstants.Command.Action.START);

        if (isAutomaticFailoverEnabled) {

          hadoop2CmdManager.initializeHAInZooKeeper();
          hadoopServiceManager.manageServiceOnNode(
              HadoopUtils.getActiveNameNodeHost(this.compConfig),
              HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER,
              HadoopConstants.Command.Action.START);

          hadoopServiceManager.manageServiceOnNode(
              HadoopUtils.getStandByNameNodeHost(this.compConfig),
              HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER,
              HadoopConstants.Command.Action.START);
        }
      } else if (HadoopUtils.getSecondaryNameNodeHost(this.compConfig) != null) {
        hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getSecondaryNameNodeHost(this.compConfig),
            HadoopConstants.Roles.SECONDARYNAMENODE,
            HadoopConstants.Command.Action.START);
      }

      // Start DataNode Process on each Slave Node
      hadoopServiceManager.manageServiceOnNodes(
          HadoopUtils.getSlaveHosts(this.compConfig),
          HadoopConstants.Roles.DATANODE,
          HadoopConstants.Command.Action.START);

      return true;
    } catch (Exception e) {
      HadoopUtils.addAndLogError(
          LOG,
          clusterConfig,
          "Could not start Hadoop HDFS services",
          Constant.Component.Name.HADOOP,
          e);
      return false;
    }
  }