Ejemplo n.º 1
0
  @Override
  public boolean setupPasswordlessSSH(Set<String> nodes) {
    try {
      LOG.info(
          "Configuring Passwordless SSH for Hadoop 2.x cluster", Constant.Component.Name.HADOOP);

      if (!this.generateRsaKeysForHadoopNodes(nodes)) {
        return false;
      }

      Set<String> sourceHosts = new HashSet<String>();
      sourceHosts.add(HadoopUtils.getNameNodeHost(this.compConfig));
      sourceHosts.add(HadoopUtils.getResourceManagerHost(this.compConfig));

      boolean isHaEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(HadoopConstants.AdvanceConfKeys.HA_ENABLED);

      if (isHaEnabled) {
        sourceHosts.add(HadoopUtils.getActiveNameNodeHost(this.compConfig));
        sourceHosts.add(HadoopUtils.getStandByNameNodeHost(this.compConfig));
      }

      for (String sourceHost : sourceHosts) {
        if (this.clusterConfig.getNodes().containsKey(sourceHost)) {
          if (!HadoopUtils.setupPasswordlessSSH(LOG, this.clusterConfig, sourceHost, nodes)) {
            return false;
          }
        } else {
          SSHExec connection = SSHUtils.connectToNode(sourceHost, this.clusterConfig.getAuthConf());
          if (!HadoopUtils.setupPasswordlessSSH(
              LOG, this.clusterConfig, sourceHost, nodes, connection)) {
            return false;
          }
          if (connection != null) {
            connection.disconnect();
          }
        }
      }

      return true;
    } catch (Exception e) {
      HadoopUtils.addAndLogError(
          LOG,
          clusterConfig,
          "Could not configure passwordless SSH for Hadoop",
          Constant.Component.Name.HADOOP,
          e);
      return false;
    }
  }
Ejemplo n.º 2
0
  /*
   * (non-Javadoc)
   *
   * @see com.impetus.ankush2.hadoop.deployer.configurator.HadoopConfigurator#
   * configureHdfsSiteXml(com.impetus.ankush2.framework.config.NodeConfig)
   */
  @Override
  protected boolean configureHdfsSiteXml(NodeConfig nodeConfig) {
    boolean status = true;
    try {
      String hdfsSiteXmlPath =
          HadoopUtils.getHadoopConfDir(this.compConfig)
              + HadoopConstants.FileName.ConfigurationFile.XML_HDFS_SITE;

      LOG.info(
          "Configuring " + HadoopConstants.FileName.ConfigurationFile.XML_HDFS_SITE + " file",
          Constant.Component.Name.HADOOP,
          nodeConfig.getHost());

      Map<String, String> paramList = new HashMap<String, String>();
      paramList.put(
          "dfs.replication",
          (String)
              this.compConfig.getAdvanceConfProperty(
                  HadoopConstants.AdvanceConfKeys.DFS_REPLICATION_FACTOR));
      paramList.put("dfs.permissions.enabled", "true");
      paramList.put(
          "dfs.namenode.name.dir",
          HadoopConstants.URI_FILE_PREFIX
              + (String)
                  this.compConfig.getAdvanceConfProperty(
                      HadoopConstants.AdvanceConfKeys.DFS_NAME_DIR));
      paramList.put(
          "dfs.datanode.data.dir",
          HadoopConstants.URI_FILE_PREFIX
              + (String)
                  this.compConfig.getAdvanceConfProperty(
                      HadoopConstants.AdvanceConfKeys.DFS_DATA_DIR));

      boolean isHaEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(HadoopConstants.AdvanceConfKeys.HA_ENABLED);

      if (isHaEnabled) {
        String nameserviceId =
            (String)
                this.compConfig.getAdvanceConfProperty(
                    HadoopConstants.AdvanceConfKeys.HA_NAMESERVICEID);
        String namenodeId1 =
            (String)
                this.compConfig.getAdvanceConfProperty(
                    HadoopConstants.AdvanceConfKeys.HA_NAMENODEID1);
        String namenodeId2 =
            (String)
                this.compConfig.getAdvanceConfProperty(
                    HadoopConstants.AdvanceConfKeys.HA_NAMENODEID2);

        String activeNameNodeHost = HadoopUtils.getActiveNameNodeHost(this.compConfig);
        String standByNameNodeHost = HadoopUtils.getStandByNameNodeHost(this.compConfig);

        paramList.put("dfs.nameservices", nameserviceId);
        paramList.put("dfs.ha.namenodes." + nameserviceId, namenodeId1 + "," + namenodeId2);
        paramList.put(
            "dfs.namenode.rpc-address." + nameserviceId + "." + namenodeId1,
            activeNameNodeHost + ":" + Hadoop2Configurator.DEFAULT_PORT_RPC_NAMENODE);

        paramList.put(
            "dfs.namenode.rpc-address." + nameserviceId + "." + namenodeId2,
            standByNameNodeHost + ":" + Hadoop2Configurator.DEFAULT_PORT_RPC_NAMENODE);

        paramList.put(
            "dfs.namenode.http-address." + nameserviceId + "." + namenodeId1,
            activeNameNodeHost + ":" + Hadoop2Configurator.DEFAULT_PORT_HTTP_NAMENODE);

        paramList.put(
            "dfs.namenode.http-address." + nameserviceId + "." + namenodeId2,
            standByNameNodeHost + ":" + Hadoop2Configurator.DEFAULT_PORT_HTTP_NAMENODE);

        StringBuilder journalNodeList = new StringBuilder("qjournal://");
        for (String journalNodeHost : HadoopUtils.getJournalNodeHosts(this.compConfig)) {
          journalNodeList.append(
              journalNodeHost + ":" + Hadoop2Configurator.DEFAULT_PORT_RPC_JOURNAL_NODE);
          journalNodeList.append(";");
        }
        String valJournalNodeProp =
            journalNodeList.toString().substring(0, journalNodeList.length() - 1)
                + "/"
                + nameserviceId;
        paramList.put("dfs.namenode.shared.edits.dir", valJournalNodeProp);
        paramList.put(
            "dfs.journalnode.edits.dir",
            (String)
                this.compConfig.getAdvanceConfProperty(
                    HadoopConstants.AdvanceConfKeys.HA_JOURNALNODE_EDITS_DIR));
        paramList.put(
            "dfs.client.failover.proxy.provider." + nameserviceId,
            "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
        paramList.put("dfs.ha.fencing.methods", "shell(/bin/true)");

        boolean isAutomaticFailoverEnabled =
            (Boolean)
                this.compConfig.getAdvanceConfProperty(
                    HadoopConstants.AdvanceConfKeys.HA_AUTOMATIC_FAILOVER_ENABLED);

        if (isAutomaticFailoverEnabled) {
          paramList.put("dfs.ha.automatic-failover.enabled." + nameserviceId, "true");

          String zkEnsembleId =
              (String)
                  this.compConfig.getAdvanceConfProperty(
                      HadoopConstants.AdvanceConfKeys.HA_ZK_ENSEMBLEID);

          String zkQuorumValue =
              ZookeeperUtils.getZookeeperConnectionString(this.clusterConfig, zkEnsembleId);
          paramList.put("ha.zookeeper.quorum", zkQuorumValue);
        } else {
          paramList.put("dfs.ha.automatic-failover.enabled." + nameserviceId, "false");
        }
      } else if (HadoopUtils.getSecondaryNameNodeHost(this.compConfig) != null) {
        String valAddrSNNIp = HadoopUtils.getSecondaryNameNodeHost(this.compConfig);
        paramList.put(
            "dfs.namenode.secondary.http-address",
            valAddrSNNIp + ":" + Hadoop2Configurator.DEFAULT_PORT_HTTP_SECONDARYNAMENODE);
      }

      for (String propertyName : paramList.keySet()) {
        status =
            HadoopUtils.addPropertyToFile(
                clusterConfig,
                nodeConfig,
                hdfsSiteXmlPath,
                propertyName,
                paramList.get(propertyName));

        if (!status) {
          HadoopUtils.addAndLogError(
              LOG,
              clusterConfig,
              "Could not add " + propertyName + " property to " + hdfsSiteXmlPath + " file",
              Constant.Component.Name.HADOOP,
              nodeConfig.getHost());
          return false;
        }
      }
      return true;

    } catch (Exception e) {
      HadoopUtils.addAndLogError(
          LOG,
          clusterConfig,
          "Could not update Hadoop yarn-site.xml  file",
          Constant.Component.Name.HADOOP,
          nodeConfig.getHost(),
          e);
      return false;
    }
  }
Ejemplo n.º 3
0
  private boolean manageHdfsServices(String action) {
    boolean status = true;
    try {
      HadoopServiceManager hadoopServiceManager =
          HadoopUtils.getServiceManagerInstance(this.clusterConfig, this.compConfig);

      boolean isHaEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(HadoopConstants.AdvanceConfKeys.HA_ENABLED);

      if (!hadoopServiceManager.manageServiceOnNodes(
          HadoopUtils.getJournalNodeHosts(this.compConfig),
          HadoopConstants.Roles.JOURNALNODE,
          action)) {
        status = false;
      }

      if (isHaEnabled) {
        boolean isAutomaticFailoverEnabled =
            (Boolean)
                this.compConfig.getAdvanceConfProperty(
                    HadoopConstants.AdvanceConfKeys.HA_AUTOMATIC_FAILOVER_ENABLED);

        if (!hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getActiveNameNodeHost(this.compConfig),
            HadoopConstants.Roles.NAMENODE,
            action)) {
          status = false;
        }

        if (!hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getStandByNameNodeHost(this.compConfig),
            HadoopConstants.Roles.NAMENODE,
            action)) {
          status = false;
        }

        if (isAutomaticFailoverEnabled) {
          if (!hadoopServiceManager.manageServiceOnNodes(
              HadoopUtils.getHaNameNodeHosts(this.compConfig),
              HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER,
              action)) {

            status = false;
          }
        }
      } else {
        if (hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getNameNodeHost(this.compConfig), HadoopConstants.Roles.NAMENODE, action)) {
          status = false;
        }
      }

      if (HadoopUtils.getSecondaryNameNodeHost(this.compConfig) != null) {
        if (!hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getSecondaryNameNodeHost(this.compConfig),
            HadoopConstants.Roles.SECONDARYNAMENODE,
            action)) {
          status = false;
        }
      }

      // Start DataNode Process on each Slave Node
      if (!hadoopServiceManager.manageServiceOnNodes(
          HadoopUtils.getSlaveHosts(this.compConfig), HadoopConstants.Roles.DATANODE, action)) {
        status = false;
      }
    } catch (Exception e) {
      HadoopUtils.addAndLogError(
          LOG,
          clusterConfig,
          "Could not format NameNode and start Hadoop services",
          Constant.Component.Name.HADOOP,
          e);
      status = false;
    }
    return status;
  }
Ejemplo n.º 4
0
  private boolean initializeAndStartHdfs() {
    try {
      HadoopCommandsManager hadoopCmdManager =
          HadoopUtils.getCommandsManagerInstance(this.clusterConfig, this.compConfig);
      HadoopServiceManager hadoopServiceManager =
          HadoopUtils.getServiceManagerInstance(this.clusterConfig, this.compConfig);

      boolean isHaEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(HadoopConstants.AdvanceConfKeys.HA_ENABLED);

      if (isHaEnabled) {
        if (!hadoopServiceManager.manageServiceOnNodes(
            HadoopUtils.getJournalNodeHosts(this.compConfig),
            HadoopConstants.Roles.JOURNALNODE,
            HadoopConstants.Command.Action.START)) {
          return false;
        }
      }

      // Format NameNode and return false if format command fails
      if (!hadoopCmdManager.formatNameNode()) {
        return false;
      }

      hadoopServiceManager.manageServiceOnNode(
          HadoopUtils.getNameNodeHost(this.compConfig),
          HadoopConstants.Roles.NAMENODE,
          HadoopConstants.Command.Action.START);

      boolean isAutomaticFailoverEnabled = false;
      if (isHaEnabled) {
        Hadoop2CommandsManager hadoop2CmdManager = (Hadoop2CommandsManager) hadoopCmdManager;
        isAutomaticFailoverEnabled =
            (Boolean)
                this.compConfig.getAdvanceConfProperty(
                    HadoopConstants.AdvanceConfKeys.HA_AUTOMATIC_FAILOVER_ENABLED);

        hadoop2CmdManager.initializeStandByNameNode();

        hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getStandByNameNodeHost(this.compConfig),
            HadoopConstants.Roles.NAMENODE,
            HadoopConstants.Command.Action.START);

        if (isAutomaticFailoverEnabled) {

          hadoop2CmdManager.initializeHAInZooKeeper();
          hadoopServiceManager.manageServiceOnNode(
              HadoopUtils.getActiveNameNodeHost(this.compConfig),
              HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER,
              HadoopConstants.Command.Action.START);

          hadoopServiceManager.manageServiceOnNode(
              HadoopUtils.getStandByNameNodeHost(this.compConfig),
              HadoopConstants.Roles.DFSZKFAILOVERCONTROLLER,
              HadoopConstants.Command.Action.START);
        }
      } else if (HadoopUtils.getSecondaryNameNodeHost(this.compConfig) != null) {
        hadoopServiceManager.manageServiceOnNode(
            HadoopUtils.getSecondaryNameNodeHost(this.compConfig),
            HadoopConstants.Roles.SECONDARYNAMENODE,
            HadoopConstants.Command.Action.START);
      }

      // Start DataNode Process on each Slave Node
      hadoopServiceManager.manageServiceOnNodes(
          HadoopUtils.getSlaveHosts(this.compConfig),
          HadoopConstants.Roles.DATANODE,
          HadoopConstants.Command.Action.START);

      return true;
    } catch (Exception e) {
      HadoopUtils.addAndLogError(
          LOG,
          clusterConfig,
          "Could not start Hadoop HDFS services",
          Constant.Component.Name.HADOOP,
          e);
      return false;
    }
  }