/** * make assignments for a topology The nimbus core function, this function has been totally * rewrite * * @param nimbusData NimbusData * @param topologyId String * @param isScratch Boolean: isScratch is false unless rebalancing the topology * @throws Exception */ public Assignment mkAssignment(TopologyAssignEvent event) throws Exception { String topologyId = event.getTopologyId(); LOG.info("Determining assignment for " + topologyId); TopologyAssignContext context = prepareTopologyAssign(event); Set<ResourceWorkerSlot> assignments = null; if (!StormConfig.local_mode(nimbusData.getConf())) { IToplogyScheduler scheduler = schedulers.get(DEFAULT_SCHEDULER_NAME); assignments = scheduler.assignTasks(context); } else { assignments = mkLocalAssignment(context); } Assignment assignment = null; Map<String, String> nodeHost = getTopologyNodeHost(context.getCluster(), context.getOldAssignment(), assignments); Map<Integer, Integer> startTimes = getTaskStartTimes(context, nimbusData, topologyId, context.getOldAssignment(), assignments); String codeDir = StormConfig.masterStormdistRoot(nimbusData.getConf(), topologyId); assignment = new Assignment(codeDir, assignments, nodeHost, startTimes); StormClusterState stormClusterState = nimbusData.getStormClusterState(); stormClusterState.set_assignment(topologyId, assignment); // update task heartbeat's start time NimbusUtils.updateTaskHbStartTime(nimbusData, assignment, topologyId); // Update metrics information in ZK when rebalance or reassignment // Only update metrics monitor status when creating topology if (context.getAssignType() == TopologyAssignContext.ASSIGN_TYPE_REBALANCE || context.getAssignType() == TopologyAssignContext.ASSIGN_TYPE_MONITOR) NimbusUtils.updateMetricsInfo(nimbusData, topologyId, assignment); else metricsMonitor(event); LOG.info("Successfully make assignment for topology id " + topologyId + ": " + assignment); return assignment; }
private static Set<ResourceWorkerSlot> mkLocalAssignment(TopologyAssignContext context) { Set<ResourceWorkerSlot> result = new HashSet<ResourceWorkerSlot>(); Map<String, SupervisorInfo> cluster = context.getCluster(); if (cluster.size() != 1) throw new RuntimeException(); SupervisorInfo localSupervisor = null; String supervisorId = null; for (Entry<String, SupervisorInfo> entry : cluster.entrySet()) { supervisorId = entry.getKey(); localSupervisor = entry.getValue(); } int port = localSupervisor.getWorkerPorts().iterator().next(); ResourceWorkerSlot worker = new ResourceWorkerSlot(supervisorId, port); worker.setTasks(new HashSet<Integer>(context.getAllTaskIds())); worker.setHostname(localSupervisor.getHostName()); result.add(worker); return result; }
/** * @@@ Here maybe exist one problem, some dead slots have been free * * @param context */ protected void freeUsed(TopologyAssignContext context) { Set<Integer> canFree = new HashSet<Integer>(); canFree.addAll(context.getAllTaskIds()); canFree.removeAll(context.getUnstoppedTaskIds()); Map<String, SupervisorInfo> cluster = context.getCluster(); Map<Integer, ResourceAssignment> oldAssigns = context.getOldAssignment().getTaskToResource(); for (Integer task : canFree) { ResourceAssignment oldAssign = oldAssigns.get(task); if (oldAssign == null) { LOG.warn("When free rebalance resource, no ResourceAssignment of task " + task); continue; } SupervisorInfo supervisorInfo = cluster.get(oldAssign.getSupervisorId()); if (supervisorInfo == null) { continue; } supervisorInfo.getCpuPool().free(oldAssign.getCpuSlotNum(), context); supervisorInfo.getMemPool().free(oldAssign.getMemSlotNum(), context); supervisorInfo.getDiskPool().free(oldAssign.getDiskSlot(), context); supervisorInfo.getNetPool().free(oldAssign.getPort(), context); } }