コード例 #1
0
  private int setSpoutInfo(
      StormTopology oldTopology, StormTopology newTopology, int cnt, StormClusterState clusterState)
      throws Exception {
    Map<String, SpoutSpec> oldSpouts = oldTopology.get_spouts();
    Map<String, SpoutSpec> spouts = newTopology.get_spouts();
    for (Entry<String, SpoutSpec> entry : oldSpouts.entrySet()) {
      String spoutName = entry.getKey();
      SpoutSpec oldSpout = entry.getValue();
      SpoutSpec spout = spouts.get(spoutName);
      if (oldSpout.get_common().get_parallelism_hint()
          > spout.get_common().get_parallelism_hint()) {
        int removedTaskNum =
            oldSpout.get_common().get_parallelism_hint()
                - spout.get_common().get_parallelism_hint();
        TreeSet<Integer> taskIds =
            new TreeSet<Integer>(clusterState.task_ids_by_componentId(topologyid, spoutName));
        Iterator<Integer> descendIterator = taskIds.descendingIterator();
        while (--removedTaskNum >= 0) {
          int taskId = descendIterator.next();
          removeTask(topologyid, taskId, clusterState);
          LOG.info("Remove spout task, taskId=" + taskId + " for " + spoutName);
        }

      } else if (oldSpout.get_common().get_parallelism_hint()
          == spout.get_common().get_parallelism_hint()) {
        continue;
      } else {
        int delta =
            spout.get_common().get_parallelism_hint()
                - oldSpout.get_common().get_parallelism_hint();
        Map<Integer, TaskInfo> taskInfoMap = new HashMap<Integer, TaskInfo>();

        for (int i = 1; i <= delta; i++) {
          cnt++;
          TaskInfo taskInfo = new TaskInfo((String) entry.getKey(), "spout");
          taskInfoMap.put(cnt, taskInfo);
          newTasks.add(cnt);
          LOG.info("Setup new spout task, taskId=" + cnt + " for " + spoutName);
        }
        clusterState.add_task(topologyid, taskInfoMap);
      }
    }

    return cnt;
  }
コード例 #2
0
  @Override
  public <T> Object execute(T... args) {
    boolean isSetTaskInfo = false;
    try {
      Boolean reassign = (Boolean) args[1];
      Map<Object, Object> conf = (Map<Object, Object>) args[2]; // args[0]:
      // delay,
      // args[1]:
      // reassign_flag,
      // args[2]:
      // conf
      if (conf != null) {
        boolean isConfUpdate = false;
        Map stormConf = data.getConf();

        // Update topology code
        Map topoConf = StormConfig.read_nimbus_topology_conf(stormConf, topologyid);
        StormTopology rawOldTopology = StormConfig.read_nimbus_topology_code(stormConf, topologyid);
        StormTopology rawNewTopology = NimbusUtils.normalizeTopology(conf, rawOldTopology, true);
        StormTopology sysOldTopology = rawOldTopology.deepCopy();
        StormTopology sysNewTopology = rawNewTopology.deepCopy();
        if (conf.get(Config.TOPOLOGY_ACKER_EXECUTORS) != null) {
          Common.add_acker(topoConf, sysOldTopology);
          Common.add_acker(conf, sysNewTopology);
          int ackerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
          int oldAckerNum = JStormUtils.parseInt(topoConf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
          LOG.info("Update acker from oldAckerNum=" + oldAckerNum + " to ackerNum=" + ackerNum);
          topoConf.put(Config.TOPOLOGY_ACKER_EXECUTORS, ackerNum);
          isConfUpdate = true;
        }

        // If scale-out, setup task info for new added tasks
        setTaskInfo(sysOldTopology, sysNewTopology);
        isSetTaskInfo = true;

        // If everything is OK, write topology code into disk
        StormConfig.write_nimbus_topology_code(
            stormConf, topologyid, Utils.serialize(rawNewTopology));

        // Update topology conf if worker num has been updated
        Set<Object> keys = conf.keySet();
        Integer workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS));
        if (workerNum != null) {
          Integer oldWorkerNum = JStormUtils.parseInt(topoConf.get(Config.TOPOLOGY_WORKERS));
          topoConf.put(Config.TOPOLOGY_WORKERS, workerNum);
          isConfUpdate = true;

          LOG.info("Update worker num from " + oldWorkerNum + " to " + workerNum);
        }

        if (keys.contains(Config.ISOLATION_SCHEDULER_MACHINES)) {
          topoConf.put(
              Config.ISOLATION_SCHEDULER_MACHINES, conf.get(Config.ISOLATION_SCHEDULER_MACHINES));
        }

        if (isConfUpdate) {
          StormConfig.write_nimbus_topology_conf(stormConf, topologyid, topoConf);
        }
      }

      TopologyAssignEvent event = new TopologyAssignEvent();

      event.setTopologyId(topologyid);
      event.setScratch(true);
      event.setOldStatus(oldStatus);
      event.setReassign(reassign);
      if (conf != null) event.setScaleTopology(true);
      TopologyAssign.push(event);
      event.waitFinish();
    } catch (Exception e) {
      LOG.error("do-rebalance error!", e);
      // Rollback the changes on ZK
      if (isSetTaskInfo) {
        try {
          StormClusterState clusterState = data.getStormClusterState();
          clusterState.remove_task(topologyid, newTasks);
        } catch (Exception e1) {
          LOG.error("Failed to rollback the changes on ZK for task-" + newTasks, e);
        }
      }
    }

    DelayStatusTransitionCallback delayCallback =
        new DelayStatusTransitionCallback(
            data, topologyid, oldStatus, StatusType.rebalancing, StatusType.done_rebalance);
    return delayCallback.execute();
  }