Ejemplo n.º 1
0
  private boolean setJavaInHadoopEnvScript(
      NodeConfig nodeConfig, String filePath, String targetText) {
    Result res = null;
    try {
      String javaHome = this.clusterConfig.getJavaConf().getHomeDir();

      String replacementText =
          AppStoreWrapper.getAnkushConfReader()
              .getStringValue("hadoop2.javahome.hadoopenv.replacementtext");

      replacementText =
          replacementText.replaceAll(
              AppStoreWrapper.getAnkushConfReader()
                  .getStringValue("hadoop2.template.javahome.value"),
              javaHome);

      AnkushTask updateJavaHome =
          new ReplaceText(
              targetText,
              replacementText,
              filePath,
              false,
              this.clusterConfig.getAuthConf().getPassword());

      res = nodeConfig.getConnection().exec(updateJavaHome);
      if (!res.isSuccess) {
        if (!res.isSuccess) {
          String errMsg = "Could not update Java in Hadoop environment script " + filePath;
          HadoopUtils.addAndLogError(
              LOG, clusterConfig, errMsg, Constant.Component.Name.HADOOP, nodeConfig.getHost());
          return false;
        }
      }
      return true;
    } catch (Exception e) {
      String errMsg =
          "Could not add environment variables for Hadoop in "
              + Constant.LinuxEnvFiles.ETC_ENVIRONMENT
              + " file";
      HadoopUtils.addAndLogError(
          LOG, clusterConfig, errMsg, Constant.Component.Name.HADOOP, nodeConfig.getHost(), e);
      return false;
    }
  }
Ejemplo n.º 2
0
  private boolean setJavaInHadoop(NodeConfig nodeConfig) {
    try {
      LOG.info(
          "Configuring Java in Hadoop environment scripts",
          Constant.Component.Name.HADOOP,
          nodeConfig.getHost());

      LinkedHashMap<String, String> scriptTargetTextMap = new LinkedHashMap<String, String>();
      scriptTargetTextMap.put(
          HadoopUtils.getHadoopConfDir(this.compConfig)
              + HadoopConstants.FileName.ConfigurationFile.ENV_HADOOP,
          AppStoreWrapper.getAnkushConfReader()
              .getStringValue("hadoop2.javahome.hadoopenv.targettext"));
      scriptTargetTextMap.put(
          HadoopUtils.getHadoopConfDir(this.compConfig)
              + HadoopConstants.FileName.ConfigurationFile.ENV_YARN,
          AppStoreWrapper.getAnkushConfReader()
              .getStringValue("hadoop2.javahome.yarnenv.targettext"));

      scriptTargetTextMap.put(
          HadoopUtils.getHadoopConfDir(this.compConfig)
              + HadoopConstants.FileName.ConfigurationFile.ENV_MAPRED,
          AppStoreWrapper.getAnkushConfReader()
              .getStringValue("hadoop2.javahome.yarnenv.targettext"));

      for (String hadoopEnvScript : scriptTargetTextMap.keySet()) {
        if (!this.setJavaInHadoopEnvScript(
            nodeConfig, hadoopEnvScript, scriptTargetTextMap.get(hadoopEnvScript))) {
          return false;
        }
      }
      return true;
    } catch (Exception e) {
      String errMsg = "Could not update Java in Hadoop environment scripts";
      HadoopUtils.addAndLogError(
          LOG, clusterConfig, errMsg, Constant.Component.Name.HADOOP, nodeConfig.getHost(), e);
      return false;
    }
  }
Ejemplo n.º 3
0
/** @author hokam */
public class ServerCrashManager {

  // Ankush logger.
  private AnkushLogger log = new AnkushLogger(ServerCrashManager.class);

  // cluster manager.
  private static GenericManager<Cluster, Long> clusterManager =
      AppStoreWrapper.getManager(Constant.Manager.CLUSTER, Cluster.class);

  // node manager.
  private static GenericManager<Node, Long> nodeManager =
      AppStoreWrapper.getManager(Constant.Manager.NODE, Node.class);

  // node manager.
  private static GenericManager<Operation, Long> operationManager =
      AppStoreWrapper.getManager(
          com.impetus.ankush2.constant.Constant.Manager.OPERATION, Operation.class);

  /** Method to handle deploying and removing state clusters. */
  public void handleDeployingRemovingClusters() {

    // Creating property map for deploying state.
    Map<String, Object> deployingStateMap = new HashMap<String, Object>();
    deployingStateMap.put(
        com.impetus.ankush2.constant.Constant.Keys.STATE,
        com.impetus.ankush2.constant.Constant.Cluster.State.DEPLOYING.toString());
    // Creating property map for removing state.
    Map<String, Object> removingStateMap = new HashMap<String, Object>();
    removingStateMap.put(
        com.impetus.ankush2.constant.Constant.Keys.STATE,
        com.impetus.ankush2.constant.Constant.Cluster.State.REMOVING.toString());

    // Map<String, Object> registeringStateMap = new HashMap<String,
    // Object>();
    // registeringStateMap.put(com.impetus.ankush2.constant.Constant.Keys.STATE,
    // com.impetus.ankush2.constant.Constant.Cluster.State.REGISTERING);

    // making list of maps
    List<Map<String, Object>> maps = new ArrayList<Map<String, Object>>();
    maps.add(deployingStateMap);
    maps.add(removingStateMap);
    // maps.add(registeringStateMap);

    // list of deploying + removing state clusters.
    List<Cluster> clusters = clusterManager.getAllByDisjunctionveNormalQuery(maps);

    // iterating over the all deploying/removing state clusters.
    for (Cluster cluster : clusters) {
      // getting clusterable object.
      try {
        // setting state as crashed.
        cluster.setState(Constant.Cluster.State.SERVER_CRASHED.toString());
        // getting cluster conf.
        ClusterConfig conf = cluster.getClusterConfig();
        // setting id of cluster inside conf.
        conf.setClusterId(cluster.getId());
        // setting state as error.
        conf.setState(com.impetus.ankush2.constant.Constant.Cluster.State.ERROR);
        // adding error message.
        conf.addError("Deploy", "Server crashed unexpectedly.");
        // saving cluster conf.
        cluster.setClusterConf(conf);
        // saving cluster.
        clusterManager.save(cluster);
      } catch (Exception e) {
        log.error(e.getMessage());
        try {
          // setting server crashed as state.
          cluster.setState(Constant.Cluster.State.SERVER_CRASHED.toString());
          // saving in db.
          clusterManager.save(cluster);
        } catch (Exception subExe) {
          log.error(subExe.getMessage());
        }
      }
    }
  }

  /** Method to handle deploying, removing and adding state nodes. */
  public void handleDeployingRemovingNodes() {

    // Creating property map for deploying state.
    Map<String, Object> deployingStateMap = new HashMap<String, Object>();
    deployingStateMap.put(
        com.impetus.ankush2.constant.Constant.Keys.STATE,
        com.impetus.ankush2.constant.Constant.Node.State.DEPLOYING.toString());
    // Creating property map for removing state.
    Map<String, Object> removingStateMap = new HashMap<String, Object>();
    removingStateMap.put(
        com.impetus.ankush2.constant.Constant.Keys.STATE,
        com.impetus.ankush2.constant.Constant.Node.State.REMOVING.toString());

    Map<String, Object> addingStateMap = new HashMap<String, Object>();
    addingStateMap.put(
        com.impetus.ankush2.constant.Constant.Keys.STATE,
        com.impetus.ankush2.constant.Constant.Node.State.ADDING.toString());

    // making list of maps
    List<Map<String, Object>> maps = new ArrayList<Map<String, Object>>();
    maps.add(deployingStateMap);
    maps.add(removingStateMap);
    maps.add(addingStateMap);

    try {
      // list of deploying + removing state nodes.
      List<Node> nodes = nodeManager.getAllByDisjunctionveNormalQuery(maps);
      // setting node state as Server_Crashed
      for (Node node : nodes) {
        node.setState(Constant.Node.State.SERVER_CRASHED.toString());
        nodeManager.save(node);
      }
    } catch (Exception e) {
      log.error(e.getMessage());
    }
  }

  /** Function that changed the state of all inprogress operations to ServerCrashed */
  public void handleInProgressOperations() {
    try {
      // Creating property map for deploying state.
      Map<String, Object> inProgressStatusMap = new HashMap<String, Object>();
      inProgressStatusMap.put(
          com.impetus.ankush2.constant.Constant.Keys.STATUS,
          com.impetus.ankush2.constant.Constant.Operation.Status.INPROGRESS.toString());

      // making list of maps
      List<Map<String, Object>> maps = new ArrayList<Map<String, Object>>();
      maps.add(inProgressStatusMap);

      List<Operation> operations = operationManager.getAllByPropertyValue(inProgressStatusMap);

      for (Operation operation : operations) {
        try {
          operation.setStatus(
              com.impetus.ankush2.constant.Constant.Operation.Status.ERROR.toString());
          operation.setCompletedAt(new Date());
          operation.getData().put("Error", "Server crashed unexpectedly.");
          operationManager.save(operation);
        } catch (Exception e) {
          log.error(e.getMessage());
          try {
            operation.setStatus(
                com.impetus.ankush2.constant.Constant.Operation.Status.ERROR.toString());
            operationManager.save(operation);
          } catch (Exception subExe) {
            log.error(subExe.getMessage());
          }
        }
      }
    } catch (Exception e) {
      log.error(e.getMessage());
    }
  }
}
Ejemplo n.º 4
0
  /**
   * Configure yarn site xml.
   *
   * @param nodeConfig the node config
   * @return true, if successful
   */
  protected boolean configureYarnSiteXml(NodeConfig nodeConfig) {
    boolean status = true;
    try {
      String yarnSiteXmlPath =
          HadoopUtils.getHadoopConfDir(this.compConfig)
              + HadoopConstants.FileName.ConfigurationFile.XML_YARN_SITE;

      LOG.info(
          "Configuring " + HadoopConstants.FileName.ConfigurationFile.XML_YARN_SITE + " file",
          Constant.Component.Name.HADOOP,
          nodeConfig.getHost());

      String valAuxServices = "mapreduce_shuffle";
      String keyMRShuffleClass = "yarn.nodemanager.aux-services.mapreduce_shuffle.class";

      if (this.isCdh4_4_0()) {
        valAuxServices =
            AppStoreWrapper.getAnkushConfReader()
                .getStringValue("hadoop2.propval.auxservices." + this.compConfig.getVersion());

        keyMRShuffleClass =
            AppStoreWrapper.getAnkushConfReader()
                .getStringValue("hadoop2.propname.mrshuffleclass." + this.compConfig.getVersion());
      }

      Map<String, String> paramList = new HashMap<String, String>();
      String resourceManagerNode = HadoopUtils.getResourceManagerHost(this.compConfig);

      paramList.put("yarn.nodemanager.aux-services", valAuxServices);
      paramList.put(keyMRShuffleClass, "org.apache.hadoop.mapred.ShuffleHandler");

      paramList.put("yarn.resourcemanager.hostname", resourceManagerNode);

      paramList.put(
          "yarn.resourcemanager.address",
          resourceManagerNode + ":" + Hadoop2Configurator.DEFAULT_PORT_RPC_RESOURCEMANAGER);
      paramList.put(
          "yarn.resourcemanager.resource-tracker.address",
          resourceManagerNode + ":" + Hadoop2Configurator.DEFAULT_PORT_RPC_RM_RESOURCETRACKER);
      paramList.put(
          "yarn.resourcemanager.scheduler.address",
          resourceManagerNode + ":" + Hadoop2Configurator.DEFAULT_PORT_RPC_RM_SCHEDULER);
      paramList.put(
          "yarn.resourcemanager.scheduler.class",
          "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler");

      boolean webAppProxyServerEnabled =
          (Boolean)
              this.compConfig.getAdvanceConfProperty(
                  HadoopConstants.AdvanceConfKeys.WEBAPPPROXYSERVER_ENABLED);

      if (webAppProxyServerEnabled) {
        paramList.put(
            "yarn.web-proxy.address",
            HadoopUtils.getWebAppProxyServerHost(this.compConfig)
                + ":"
                + Hadoop2Configurator.DEFAULT_PORT_RPC_RM_WEBAPPPROXYSERVER);
      }

      for (String propertyName : paramList.keySet()) {
        status =
            HadoopUtils.addPropertyToFile(
                clusterConfig,
                nodeConfig,
                yarnSiteXmlPath,
                propertyName,
                paramList.get(propertyName));

        if (!status) {
          HadoopUtils.addAndLogError(
              LOG,
              clusterConfig,
              "Could not add " + propertyName + " property to " + yarnSiteXmlPath + " file",
              Constant.Component.Name.HADOOP,
              nodeConfig.getHost());
          return false;
        }
      }
      return true;

    } catch (Exception e) {
      HadoopUtils.addAndLogError(
          LOG,
          clusterConfig,
          "Could not update Hadoop yarn-site.xml  file",
          Constant.Component.Name.HADOOP,
          nodeConfig.getHost(),
          e);
      return false;
    }
  }