/** Get unstopped slots from alive task list 获得所有supervisor已经dead,但是还没有dead的任务 */
  public Set<Integer> getUnstoppedSlots(
      Set<Integer> aliveTasks, Map<String, SupervisorInfo> supInfos, Assignment existAssignment) {
    Set<Integer> ret = new HashSet<Integer>();

    Set<ResourceWorkerSlot> oldWorkers = existAssignment.getWorkers();

    Set<String> aliveSupervisors = supInfos.keySet();

    for (ResourceWorkerSlot worker : oldWorkers) {
      for (Integer taskId : worker.getTasks()) {
        if (aliveTasks.contains(taskId) == false) {
          // task is dead
          continue;
        }

        String oldTaskSupervisorId = worker.getNodeId();

        if (aliveSupervisors.contains(oldTaskSupervisorId) == false) {
          // supervisor is dead
          ret.add(taskId);
          continue;
        }
      }
    }

    return ret;
  }
  /**
   * get all taskids which should be reassigned
   *
   * @param taskToWorkerSlot
   * @param newtaskToWorkerSlot
   * @return Set<Integer> taskid which should reassigned
   */
  public static Set<Integer> getChangeTaskIds(
      Set<ResourceWorkerSlot> oldWorkers, Set<ResourceWorkerSlot> workers) {

    Set<Integer> rtn = new HashSet<Integer>();
    for (ResourceWorkerSlot worker : workers) {
      if (!oldWorkers.contains(worker)) rtn.addAll(worker.getTasks());
    }
    return rtn;
  }
 private Set<ResourceWorkerSlot> getUnstoppedWorkers(
     Set<Integer> aliveTasks, Assignment existAssignment) {
   Set<ResourceWorkerSlot> ret = new HashSet<ResourceWorkerSlot>();
   for (ResourceWorkerSlot worker : existAssignment.getWorkers()) {
     boolean alive = true;
     for (Integer task : worker.getTasks()) {
       if (!aliveTasks.contains(task)) {
         alive = false;
         break;
       }
     }
     if (alive) {
       ret.add(worker);
     }
   }
   return ret;
 }
  /**
   * @param existingAssignment
   * @param taskWorkerSlot
   * @return
   * @throws Exception
   */
  public static Map<Integer, Integer> getTaskStartTimes(
      TopologyAssignContext context,
      NimbusData nimbusData,
      String topologyId,
      Assignment existingAssignment,
      Set<ResourceWorkerSlot> workers)
      throws Exception {

    Map<Integer, Integer> startTimes = new TreeMap<Integer, Integer>();

    if (context.getAssignType() == TopologyAssignContext.ASSIGN_TYPE_NEW) {
      int nowSecs = TimeUtils.current_time_secs();
      for (ResourceWorkerSlot worker : workers) {
        for (Integer changedTaskId : worker.getTasks()) {
          startTimes.put(changedTaskId, nowSecs);
        }
      }

      return startTimes;
    }

    Set<ResourceWorkerSlot> oldWorkers = new HashSet<ResourceWorkerSlot>();

    if (existingAssignment != null) {
      Map<Integer, Integer> taskStartTimeSecs = existingAssignment.getTaskStartTimeSecs();
      if (taskStartTimeSecs != null) {
        startTimes.putAll(taskStartTimeSecs);
      }

      if (existingAssignment.getWorkers() != null) {
        oldWorkers = existingAssignment.getWorkers();
      }
    }

    StormClusterState zkClusterState = nimbusData.getStormClusterState();
    Set<Integer> changeTaskIds = getChangeTaskIds(oldWorkers, workers);
    int nowSecs = TimeUtils.current_time_secs();
    for (Integer changedTaskId : changeTaskIds) {
      startTimes.put(changedTaskId, nowSecs);

      zkClusterState.remove_task_heartbeat(topologyId, changedTaskId);
    }

    LOG.info("Task assignment has been changed " + changeTaskIds);
    return startTimes;
  }
 private static Set<ResourceWorkerSlot> mkLocalAssignment(TopologyAssignContext context) {
   Set<ResourceWorkerSlot> result = new HashSet<ResourceWorkerSlot>();
   Map<String, SupervisorInfo> cluster = context.getCluster();
   if (cluster.size() != 1) throw new RuntimeException();
   SupervisorInfo localSupervisor = null;
   String supervisorId = null;
   for (Entry<String, SupervisorInfo> entry : cluster.entrySet()) {
     supervisorId = entry.getKey();
     localSupervisor = entry.getValue();
   }
   int port = localSupervisor.getWorkerPorts().iterator().next();
   ResourceWorkerSlot worker = new ResourceWorkerSlot(supervisorId, port);
   worker.setTasks(new HashSet<Integer>(context.getAllTaskIds()));
   worker.setHostname(localSupervisor.getHostName());
   result.add(worker);
   return result;
 }
  /**
   * Get free resources 获得所有的空闲资源
   *
   * @param supervisorInfos
   * @param stormClusterState
   * @throws Exception
   */
  public static void getFreeSlots(
      Map<String, SupervisorInfo> supervisorInfos, StormClusterState stormClusterState)
      throws Exception {

    Map<String, Assignment> assignments = Cluster.get_all_assignment(stormClusterState, null);

    for (Entry<String, Assignment> entry : assignments.entrySet()) {
      String topologyId = entry.getKey();
      Assignment assignment = entry.getValue();

      Set<ResourceWorkerSlot> workers = assignment.getWorkers();

      for (ResourceWorkerSlot worker : workers) {

        SupervisorInfo supervisorInfo = supervisorInfos.get(worker.getNodeId());
        if (supervisorInfo == null) {
          // the supervisor is dead
          continue;
        }
        supervisorInfo.getWorkerPorts().remove(worker.getPort());
      }
    }
  }
  public static Map<String, String> getTopologyNodeHost(
      Map<String, SupervisorInfo> supervisorMap,
      Assignment existingAssignment,
      Set<ResourceWorkerSlot> workers) {

    // the following is that remove unused node from allNodeHost
    Set<String> usedNodes = new HashSet<String>();
    for (ResourceWorkerSlot worker : workers) {

      usedNodes.add(worker.getNodeId());
    }

    // map<supervisorId, hostname>
    Map<String, String> allNodeHost = new HashMap<String, String>();

    if (existingAssignment != null) {
      allNodeHost.putAll(existingAssignment.getNodeHost());
    }

    // get alive supervisorMap Map<supervisorId, hostname>
    Map<String, String> nodeHost = SupervisorInfo.getNodeHost(supervisorMap);
    if (nodeHost != null) {
      allNodeHost.putAll(nodeHost);
    }

    Map<String, String> ret = new HashMap<String, String>();

    for (String supervisorId : usedNodes) {
      if (allNodeHost.containsKey(supervisorId)) {
        ret.put(supervisorId, allNodeHost.get(supervisorId));
      } else {
        LOG.warn("Node " + supervisorId + " doesn't in the supervisor list");
      }
    }

    return ret;
  }