示例#1
0
  public TaskHeartbeatTrigger(
      Map conf,
      String name,
      DisruptorQueue queue,
      BlockingQueue<Object> controlQueue,
      int taskId,
      String componentId,
      TopologyContext sysTopologyCtx,
      ITaskReportErr reportError) {
    this.name = name;
    this.queue = queue;
    this.controlQueue = controlQueue;
    this.opCode = TimerConstants.TASK_HEARTBEAT;

    this.taskId = taskId;
    this.componentId = componentId;
    this.sysTopologyCtx = sysTopologyCtx;

    this.frequence = JStormUtils.parseInt(conf.get(Config.TASK_HEARTBEAT_FREQUENCY_SECS), 10);
    this.firstTime = frequence;

    this.executeThreadHbTime = TimeUtils.current_time_secs();
    this.taskHbTimeout = JStormUtils.parseInt(conf.get(Config.NIMBUS_TASK_TIMEOUT_SECS), 180);
    this.intervalCheck = new IntervalCheck();
    this.intervalCheck.setInterval(taskHbTimeout);
    this.intervalCheck.start();

    this.reportError = reportError;

    this.uptime = new UptimeComputer();
  }
示例#2
0
  @Override
  public void ack(Tuple input) {

    if (ackerNum > 0) {

      Long ack_val = Long.valueOf(0);
      Object pend_val = pending_acks.remove(input);
      if (pend_val != null) {
        ack_val = (Long) (pend_val);
      }

      for (Entry<Long, Long> e : input.getMessageId().getAnchorsToIds().entrySet()) {

        UnanchoredSend.send(
            topologyContext,
            sendTargets,
            taskTransfer,
            Acker.ACKER_ACK_STREAM_ID,
            JStormUtils.mk_list((Object) e.getKey(), JStormUtils.bit_xor(e.getValue(), ack_val)));
      }
    }

    Long delta = tuple_time_delta(tuple_start_times, input);
    if (delta != null) {
      task_stats.bolt_acked_tuple(input.getSourceComponent(), input.getSourceStreamId(), delta);
    }
  }
  public DefaultTopologyAssignContext(TopologyAssignContext context) {
    super(context);

    try {
      sysTopology = Common.system_topology(stormConf, rawTopology);
    } catch (Exception e) {
      throw new FailedAssignTopologyException("Failed to generate system topology");
    }

    sidToHostname = generateSidToHost();
    hostToSid = JStormUtils.reverse_map(sidToHostname);

    if (oldAssignment != null && oldAssignment.getWorkers() != null) {
      oldWorkers = oldAssignment.getWorkers();
    } else {
      oldWorkers = new HashSet<ResourceWorkerSlot>();
    }

    refineDeadTasks();

    componentTasks = JStormUtils.reverse_map(context.getTaskToComponent());

    for (Entry<String, List<Integer>> entry : componentTasks.entrySet()) {
      List<Integer> componentTaskList = entry.getValue();

      Collections.sort(componentTaskList);
    }

    totalWorkerNum = computeWorkerNum();

    unstoppedWorkerNum = computeUnstoppedAssignments();
  }
  private Map<String, Integer> computeComponentWeight(
      StormTopology rawTopology, DefaultTopologyAssignContext context) {
    Map<String, Integer> ret = new HashMap<String, Integer>();

    Map<String, Object> components = ThriftTopologyUtils.getComponents(rawTopology);
    for (Entry<String, Object> entry : components.entrySet()) {
      String componentName = entry.getKey();
      Object component = entry.getValue();

      ComponentCommon common = null;
      if (component instanceof Bolt) {
        common = ((Bolt) component).get_common();
      }
      if (component instanceof SpoutSpec) {
        common = ((SpoutSpec) component).get_common();
      }
      if (component instanceof StateSpoutSpec) {
        common = ((StateSpoutSpec) component).get_common();
      }

      String jsonConfString = common.get_json_conf();
      if (jsonConfString == null) {
        ret.put(componentName, context.DEFAULT_WEIGHT);
        continue;
      }

      Map componentMap = new HashMap();
      componentMap.putAll((Map) JStormUtils.from_json(jsonConfString));

      int weight = computeWeight(componentMap, context);

      ret.put(componentName, weight);
    }
    return ret;
  }
  private int computeWorkerNum() {
    Integer settingNum = JStormUtils.parseInt(stormConf.get(Config.TOPOLOGY_WORKERS));

    int hintSum = 0;

    Map<String, Object> components = ThriftTopologyUtils.getComponents(sysTopology);
    for (Entry<String, Object> entry : components.entrySet()) {
      String componentName = entry.getKey();
      Object component = entry.getValue();

      ComponentCommon common = null;
      if (component instanceof Bolt) {
        common = ((Bolt) component).get_common();
      }
      if (component instanceof SpoutSpec) {
        common = ((SpoutSpec) component).get_common();
      }
      if (component instanceof StateSpoutSpec) {
        common = ((StateSpoutSpec) component).get_common();
      }

      int hint = common.get_parallelism_hint();
      hintSum += hint;
    }

    if (settingNum == null) {
      return hintSum;
    } else {
      return Math.min(settingNum, hintSum);
    }
  }
  private ComponentAssignType getComponentType(
      DefaultTopologyAssignContext defaultContext, Integer taskId) {
    StormTopology sysTopology = defaultContext.getSysTopology();
    final Map<Integer, String> taskToComponent = defaultContext.getTaskToComponent();

    String componentName = taskToComponent.get(taskId);
    if (componentName == null) {
      LOG.warn("No component name of " + taskId);
      return ComponentAssignType.NORMAL;
    }

    ComponentCommon common = ThriftTopologyUtils.getComponentCommon(sysTopology, componentName);

    String jsonConfString = common.get_json_conf();
    if (jsonConfString == null) {
      return ComponentAssignType.NORMAL;
    }

    Map componentMap = new HashMap();
    componentMap.putAll((Map) JStormUtils.from_json(jsonConfString));

    if (JStormServerConfig.getUserDefineAssignmentFromJson(componentMap) != null) {
      // user define assignment
      return ComponentAssignType.USER_DEFINE;
    } else if (ConfigExtension.isUseOldAssignment(componentMap)) {
      // use old assignment
      return ComponentAssignType.USE_OLD;
    } else if (ConfigExtension.isUseOldAssignment(defaultContext.getStormConf())) {
      // use old assignment
      return ComponentAssignType.USE_OLD;
    } else {
      return ComponentAssignType.NORMAL;
    }
  }
示例#7
0
  /**
   * sort slots, the purpose is to ensure that the tasks are assigned in balancing
   *
   * @param allSlots
   * @return List<WorkerSlot>
   */
  public static List<WorkerSlot> sortSlots(Set<WorkerSlot> allSlots, int needSlotNum) {

    Map<String, List<WorkerSlot>> nodeMap = new HashMap<String, List<WorkerSlot>>();

    // group by first
    for (WorkerSlot np : allSlots) {
      String node = np.getNodeId();

      List<WorkerSlot> list = nodeMap.get(node);
      if (list == null) {
        list = new ArrayList<WorkerSlot>();
        nodeMap.put(node, list);
      }

      list.add(np);
    }

    for (Entry<String, List<WorkerSlot>> entry : nodeMap.entrySet()) {
      List<WorkerSlot> ports = entry.getValue();

      Collections.sort(
          ports,
          new Comparator<WorkerSlot>() {

            @Override
            public int compare(WorkerSlot first, WorkerSlot second) {
              String firstNode = first.getNodeId();
              String secondNode = second.getNodeId();
              if (firstNode.equals(secondNode) == false) {
                return firstNode.compareTo(secondNode);
              } else {
                return first.getPort() - second.getPort();
              }
            }
          });
    }

    // interleave
    List<List<WorkerSlot>> splitup = new ArrayList<List<WorkerSlot>>(nodeMap.values());

    Collections.sort(
        splitup,
        new Comparator<List<WorkerSlot>>() {

          public int compare(List<WorkerSlot> o1, List<WorkerSlot> o2) {
            return o2.size() - o1.size();
          }
        });

    List<WorkerSlot> sortedFreeSlots = JStormUtils.interleave_all(splitup);

    if (sortedFreeSlots.size() <= needSlotNum) {
      return sortedFreeSlots;
    }

    // sortedFreeSlots > needSlotNum
    return sortedFreeSlots.subList(0, needSlotNum);
  }
示例#8
0
 public static void put_xor(Map<Long, Long> pending, Long key, Long id) {
   // synchronized (pending) {
   Long curr = pending.get(key);
   if (curr == null) {
     curr = Long.valueOf(0);
   }
   pending.put(key, JStormUtils.bit_xor(curr, id));
   // }
 }
示例#9
0
 private void sendHbMsg() {
   List values = JStormUtils.mk_list(uptime.uptime());
   if (spoutOutputCollector != null) {
     spoutOutputCollector.emit(Common.TOPOLOGY_MASTER_HB_STREAM_ID, values);
   } else if (boltOutputCollector != null) {
     boltOutputCollector.emit(Common.TOPOLOGY_MASTER_HB_STREAM_ID, values);
   } else {
     LOG.warn("Failed to send hearbeat msg. OutputCollector has not been initialized!");
   }
 }
示例#10
0
  private void parseString(String input) {
    Map<String, JSONObject> map = (Map<String, JSONObject>) JStormUtils.from_json(input);

    for (JSONObject jobj : map.values()) {
      FileAttribute attribute = FileAttribute.fromJSONObject(jobj);
      if (attribute != null) {
        files.add(attribute);
      }
    }

    summary = "There are " + files.size() + " files";
  }
示例#11
0
 /**
  * get topology configuration
  *
  * @param id String: topology id
  * @return String
  */
 @Override
 public String getTopologyConf(String id) throws NotAliveException, TException {
   String rtn;
   try {
     Map<Object, Object> topologyConf = StormConfig.read_nimbus_topology_conf(conf, id);
     rtn = JStormUtils.to_json(topologyConf);
   } catch (IOException e) {
     // TODO Auto-generated catch block
     LOG.info("Failed to get configuration of " + id, e);
     throw new TException(e);
   }
   return rtn;
 }
示例#12
0
  public SpoutCollector(
      Integer task_id,
      backtype.storm.spout.ISpout spout,
      CommonStatsRolling task_stats,
      TaskSendTargets sendTargets,
      Map _storm_conf,
      TaskTransfer _transfer_fn,
      TimeOutMap<Long, TupleInfo> pending,
      TopologyContext topology_context,
      DisruptorQueue disruptorAckerQueue,
      ITaskReportErr _report_error) {
    this.sendTargets = sendTargets;
    this.storm_conf = _storm_conf;
    this.transfer_fn = _transfer_fn;
    this.pending = pending;
    this.topology_context = topology_context;

    this.disruptorAckerQueue = disruptorAckerQueue;

    this.task_stats = task_stats;
    this.spout = spout;
    this.task_id = task_id;
    this.report_error = _report_error;

    ackerNum = JStormUtils.parseInt(storm_conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
    isDebug = JStormUtils.parseBoolean(storm_conf.get(Config.TOPOLOGY_DEBUG), false);

    random = new Random();
    random.setSeed(System.currentTimeMillis());

    String componentId = topology_context.getThisComponentId();
    emitTotalTimer =
        Metrics.registerTimer(
            JStormServerUtils.getName(componentId, task_id),
            MetricDef.EMIT_TIME,
            String.valueOf(task_id),
            Metrics.MetricType.TASK);
  }
示例#13
0
  @SuppressWarnings({"rawtypes", "unchecked"})
  public Integer mkTaskMaker(
      Map<Object, Object> stormConf,
      Map<String, ?> cidSpec,
      Map<Integer, String> rtn,
      Integer cnt) {
    if (cidSpec == null) {
      LOG.warn("Component map is empty");
      return cnt;
    }

    Set<?> entrySet = cidSpec.entrySet();
    for (Iterator<?> it = entrySet.iterator(); it.hasNext(); ) {
      Entry entry = (Entry) it.next();
      Object obj = entry.getValue();

      ComponentCommon common = null;
      if (obj instanceof Bolt) {
        common = ((Bolt) obj).get_common();

      } else if (obj instanceof SpoutSpec) {
        common = ((SpoutSpec) obj).get_common();

      } else if (obj instanceof StateSpoutSpec) {
        common = ((StateSpoutSpec) obj).get_common();
      }

      if (common == null) {
        throw new RuntimeException("No ComponentCommon of " + entry.getKey());
      }

      int declared = Thrift.parallelismHint(common);
      Integer parallelism = declared;
      // Map tmp = (Map) Utils_clj.from_json(common.get_json_conf());

      Map newStormConf = new HashMap(stormConf);
      // newStormConf.putAll(tmp);
      Integer maxParallelism =
          JStormUtils.parseInt(newStormConf.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM));
      if (maxParallelism != null) {
        parallelism = Math.min(maxParallelism, declared);
      }

      for (int i = 0; i < parallelism; i++) {
        cnt++;
        rtn.put(cnt, (String) entry.getKey());
      }
    }
    return cnt;
  }
示例#14
0
  /**
   * private ConcurrentHashMap<Integer, WorkerSlot> taskNodeport; private HashMap<Integer, String>
   * tasksToComponent; private Map<String, List<Integer>> componentToSortedTasks; private
   * Map<String, Map<String, Fields>> componentToStreamToFields; private Map<String, Object>
   * defaultResources; private Map<String, Object> userResources; private Map<String, Object>
   * executorData; private Map registeredMetrics;
   *
   * @throws Exception
   */
  private void generateMaps() throws Exception {
    this.tasksToComponent = Cluster.topology_task_info(zkCluster, topologyId);
    LOG.info("Map<taskId, component>:" + tasksToComponent);

    this.componentToSortedTasks = JStormUtils.reverse_map(tasksToComponent);
    for (java.util.Map.Entry<String, List<Integer>> entry : componentToSortedTasks.entrySet()) {
      List<Integer> tasks = entry.getValue();

      Collections.sort(tasks);
    }

    this.defaultResources = new HashMap<String, Object>();
    this.userResources = new HashMap<String, Object>();
    this.executorData = new HashMap<String, Object>();
    this.registeredMetrics = new HashMap();
  }
示例#15
0
  @Override
  public void fail(Tuple input) {
    // if ackerNum == 0, we can just return
    if (ackerNum > 0) {
      pending_acks.remove(input);
      for (Entry<Long, Long> e : input.getMessageId().getAnchorsToIds().entrySet()) {
        UnanchoredSend.send(
            topologyContext,
            sendTargets,
            taskTransfer,
            Acker.ACKER_FAIL_STREAM_ID,
            JStormUtils.mk_list((Object) e.getKey()));
      }
    }

    task_stats.bolt_failed_tuple(input.getSourceComponent(), input.getSourceStreamId());
  }
  /**
   * Sort the task list, higher priorty task will be assigned first.
   *
   * <p>1. get the task list which need to be assigned 2. sort the task list according to
   * Configuration Weight
   *
   * @param context
   * @return
   */
  protected List<Integer> sortAssignTasks(
      final DefaultTopologyAssignContext context, Set<Integer> needAssign) {
    List<Integer> ret = new ArrayList<Integer>();

    StormTopology rawTopology = context.getRawTopology();
    final Map<Integer, String> taskToComponent = context.getTaskToComponent();

    final Map<String, Integer> componentToWeight = computeComponentWeight(rawTopology, context);

    ret = JStormUtils.mk_list(needAssign);
    Collections.sort(
        ret,
        new Comparator<Integer>() {

          private int getWeight(int taskId) {
            String component = taskToComponent.get(taskId);
            if (component == null) {
              // this shouldn't occur
              return context.DEFAULT_WEIGHT;
            }

            Integer weight = componentToWeight.get(component);
            if (weight == null) {
              return context.DEFAULT_WEIGHT;
            } else {
              return weight;
            }
          }

          @Override
          public int compare(Integer first, Integer second) {
            int firstWeight = getWeight(first);
            int secondWeight = getWeight(second);

            if (firstWeight != secondWeight) {
              return (secondWeight - firstWeight);
            } else {
              return (second - first);
            }
          }
        });

    return ret;
  }
示例#17
0
    byte[] getJSonFiles(String dir) throws Exception {
      Map<String, FileAttribute> fileMap = new HashMap<String, FileAttribute>();

      String path = logDir;
      if (dir != null) {
        path = path + File.separator + dir;
      }

      LOG.info("List dir " + path);

      File file = new File(path);

      String[] files = file.list();

      for (String fileName : files) {
        String logFile = Joiner.on(File.separator).join(path, fileName);

        FileAttribute fileAttribute = new FileAttribute();
        fileAttribute.setFileName(fileName);

        File subFile = new File(logFile);

        Date modify = new Date(subFile.lastModified());
        fileAttribute.setModifyTime(TimeFormat.getSecond(modify));

        if (subFile.isFile()) {
          fileAttribute.setIsDir(String.valueOf(false));
          fileAttribute.setSize(String.valueOf(subFile.length()));

          fileMap.put(logFile, fileAttribute);
        } else if (subFile.isDirectory()) {
          fileAttribute.setIsDir(String.valueOf(true));
          fileAttribute.setSize(String.valueOf(4096));

          fileMap.put(logFile, fileAttribute);
        }
      }

      String fileJsonStr = JStormUtils.to_json(fileMap);
      return fileJsonStr.getBytes();
    }
示例#18
0
  public void mkdirs(CuratorFramework zk, String path) throws Exception {

    String npath = PathUtils.normalize_path(path);

    // the node is "/"
    if (npath.equals("/")) {
      return;
    }

    // the node exist
    if (existsNode(zk, npath, false)) {
      return;
    }

    mkdirs(zk, PathUtils.parent_path(npath));
    try {
      createNode(zk, npath, JStormUtils.barr((byte) 7), org.apache.zookeeper.CreateMode.PERSISTENT);
    } catch (KeeperException e) {; // this can happen when multiple clients doing mkdir at same
      // time
      LOG.warn("zookeeper mkdirs for path" + path, e);
    }
  }
示例#19
0
  public BoltCollector(
      int message_timeout_secs,
      ITaskReportErr report_error,
      TaskSendTargets _send_fn,
      Map _storm_conf,
      TaskTransfer _transfer_fn,
      TopologyContext _topology_context,
      Integer task_id,
      RotatingMap<Tuple, Long> tuple_start_times,
      CommonStatsRolling _task_stats) {

    this.rotateTime = 1000L * message_timeout_secs / (Acker.TIMEOUT_BUCKET_NUM - 1);
    this.reportError = report_error;
    this.sendTargets = _send_fn;
    this.storm_conf = _storm_conf;
    this.taskTransfer = _transfer_fn;
    this.topologyContext = _topology_context;
    this.task_id = task_id;
    this.task_stats = _task_stats;

    this.pending_acks = new RotatingMap<Tuple, Long>(Acker.TIMEOUT_BUCKET_NUM);
    // this.pending_acks = new TimeCacheMap<Tuple,
    // Long>(message_timeout_secs,
    // Acker.TIMEOUT_BUCKET_NUM);
    this.tuple_start_times = tuple_start_times;

    this.ackerNum = JStormUtils.parseInt(storm_conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));

    String componentId = topologyContext.getThisComponentId();
    timer =
        Metrics.registerTimer(
            JStormServerUtils.getName(componentId, task_id),
            MetricDef.EMIT_TIME,
            String.valueOf(task_id),
            Metrics.MetricType.TASK);
    random = new Random();
    random.setSeed(System.currentTimeMillis());
  }
示例#20
0
  public static void main(String[] args) throws Exception {

    LinearDRPCTopologyBuilder builder = construct();

    Config conf = new Config();
    conf.setNumWorkers(6);
    if (args.length != 0) {

      try {
        Map yamlConf = LoadConf.LoadYaml(args[0]);
        if (yamlConf != null) {
          conf.putAll(yamlConf);
        }
      } catch (Exception e) {
        System.out.println("Input " + args[0] + " isn't one yaml ");
      }

      StormSubmitter.submitTopology(TOPOLOGY_NAME, conf, builder.createRemoteTopology());
    } else {

      conf.setMaxTaskParallelism(3);
      LocalDRPC drpc = new LocalDRPC();
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology(TOPOLOGY_NAME, conf, builder.createLocalTopology(drpc));

      JStormUtils.sleepMs(50000);

      String[] urlsToTry =
          new String[] {"foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com"};
      for (String url : urlsToTry) {
        System.out.println("Reach of " + url + ": " + drpc.execute(TOPOLOGY_NAME, url));
      }

      cluster.shutdown();
      drpc.shutdown();
    }
  }
示例#21
0
  /**
   * Backup the toplogy's Assignment to ZK @@@ Question Do we need to do backup operation every
   * time?
   *
   * @param assignment
   * @param event
   */
  public void backupAssignment(Assignment assignment, TopologyAssignEvent event) {
    String topologyId = event.getTopologyId();
    String topologyName = event.getTopologyName();
    try {

      StormClusterState zkClusterState = nimbusData.getStormClusterState();
      // one little problem, get tasks twice when assign one topology
      HashMap<Integer, String> tasks = Cluster.topology_task_info(zkClusterState, topologyId);

      Map<String, List<Integer>> componentTasks = JStormUtils.reverse_map(tasks);

      for (Entry<String, List<Integer>> entry : componentTasks.entrySet()) {
        List<Integer> keys = entry.getValue();

        Collections.sort(keys);
      }

      AssignmentBak assignmentBak = new AssignmentBak(componentTasks, assignment);
      zkClusterState.backup_assignment(topologyName, assignmentBak);

    } catch (Exception e) {
      LOG.warn("Failed to backup " + topologyId + " assignment " + assignment, e);
    }
  }
  @Override
  public <T> Object execute(T... args) {
    boolean isSetTaskInfo = false;
    try {
      Boolean reassign = (Boolean) args[1];
      Map<Object, Object> conf = (Map<Object, Object>) args[2]; // args[0]:
      // delay,
      // args[1]:
      // reassign_flag,
      // args[2]:
      // conf
      if (conf != null) {
        boolean isConfUpdate = false;
        Map stormConf = data.getConf();

        // Update topology code
        Map topoConf = StormConfig.read_nimbus_topology_conf(stormConf, topologyid);
        StormTopology rawOldTopology = StormConfig.read_nimbus_topology_code(stormConf, topologyid);
        StormTopology rawNewTopology = NimbusUtils.normalizeTopology(conf, rawOldTopology, true);
        StormTopology sysOldTopology = rawOldTopology.deepCopy();
        StormTopology sysNewTopology = rawNewTopology.deepCopy();
        if (conf.get(Config.TOPOLOGY_ACKER_EXECUTORS) != null) {
          Common.add_acker(topoConf, sysOldTopology);
          Common.add_acker(conf, sysNewTopology);
          int ackerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
          int oldAckerNum = JStormUtils.parseInt(topoConf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
          LOG.info("Update acker from oldAckerNum=" + oldAckerNum + " to ackerNum=" + ackerNum);
          topoConf.put(Config.TOPOLOGY_ACKER_EXECUTORS, ackerNum);
          isConfUpdate = true;
        }

        // If scale-out, setup task info for new added tasks
        setTaskInfo(sysOldTopology, sysNewTopology);
        isSetTaskInfo = true;

        // If everything is OK, write topology code into disk
        StormConfig.write_nimbus_topology_code(
            stormConf, topologyid, Utils.serialize(rawNewTopology));

        // Update topology conf if worker num has been updated
        Set<Object> keys = conf.keySet();
        Integer workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS));
        if (workerNum != null) {
          Integer oldWorkerNum = JStormUtils.parseInt(topoConf.get(Config.TOPOLOGY_WORKERS));
          topoConf.put(Config.TOPOLOGY_WORKERS, workerNum);
          isConfUpdate = true;

          LOG.info("Update worker num from " + oldWorkerNum + " to " + workerNum);
        }

        if (keys.contains(Config.ISOLATION_SCHEDULER_MACHINES)) {
          topoConf.put(
              Config.ISOLATION_SCHEDULER_MACHINES, conf.get(Config.ISOLATION_SCHEDULER_MACHINES));
        }

        if (isConfUpdate) {
          StormConfig.write_nimbus_topology_conf(stormConf, topologyid, topoConf);
        }
      }

      TopologyAssignEvent event = new TopologyAssignEvent();

      event.setTopologyId(topologyid);
      event.setScratch(true);
      event.setOldStatus(oldStatus);
      event.setReassign(reassign);
      if (conf != null) event.setScaleTopology(true);
      TopologyAssign.push(event);
      event.waitFinish();
    } catch (Exception e) {
      LOG.error("do-rebalance error!", e);
      // Rollback the changes on ZK
      if (isSetTaskInfo) {
        try {
          StormClusterState clusterState = data.getStormClusterState();
          clusterState.remove_task(topologyid, newTasks);
        } catch (Exception e1) {
          LOG.error("Failed to rollback the changes on ZK for task-" + newTasks, e);
        }
      }
    }

    DelayStatusTransitionCallback delayCallback =
        new DelayStatusTransitionCallback(
            data, topologyid, oldStatus, StatusType.rebalancing, StatusType.done_rebalance);
    return delayCallback.execute();
  }
示例#23
0
  /**
   * Submit one Topology
   *
   * @param topologyname String: topology name
   * @param uploadedJarLocation String: already uploaded jar path
   * @param jsonConf String: jsonConf serialize all toplogy configuration to Json
   * @param topology StormTopology: topology Object
   */
  @SuppressWarnings("unchecked")
  @Override
  public void submitTopologyWithOpts(
      String topologyname,
      String uploadedJarLocation,
      String jsonConf,
      StormTopology topology,
      SubmitOptions options)
      throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException {
    LOG.info("Receive " + topologyname + ", uploadedJarLocation:" + uploadedJarLocation);
    // @@@ Move validate topologyname in client code
    try {
      checkTopologyActive(data, topologyname, false);
    } catch (AlreadyAliveException e) {
      LOG.info(topologyname + " is already exist ");
      throw e;
    } catch (Exception e) {
      LOG.info("Failed to check whether topology is alive or not", e);
      throw new TException(e);
    }

    int counter = data.getSubmittedCount().incrementAndGet();
    String topologyId = topologyname + "-" + counter + "-" + TimeUtils.current_time_secs();

    Map<Object, Object> serializedConf = (Map<Object, Object>) JStormUtils.from_json(jsonConf);
    if (serializedConf == null) {
      LOG.warn("Failed to serialized Configuration");
      throw new InvalidTopologyException("Failed to serilaze topology configuration");
    }

    serializedConf.put(Config.TOPOLOGY_ID, topologyId);
    serializedConf.put(Config.TOPOLOGY_NAME, topologyname);

    try {
      Map<Object, Object> stormConf;

      stormConf = NimbusUtils.normalizeConf(conf, serializedConf, topology);

      Map<Object, Object> totalStormConf = new HashMap<Object, Object>(conf);
      totalStormConf.putAll(stormConf);

      StormTopology normalizedTopology = NimbusUtils.normalizeTopology(stormConf, topology);

      // this validates the structure of the topology
      Common.validate_basic(normalizedTopology, totalStormConf, topologyId);
      // don't need generate real topology, so skip Common.system_topology
      // Common.system_topology(totalStormConf, topology);

      StormClusterState stormClusterState = data.getStormClusterState();

      // create /local-dir/nimbus/topologyId/xxxx files
      setupStormCode(conf, topologyId, uploadedJarLocation, stormConf, normalizedTopology);

      // generate TaskInfo for every bolt or spout in ZK
      // /ZK/tasks/topoologyId/xxx
      setupZkTaskInfo(conf, topologyId, stormClusterState);

      // make assignments for a topology
      TopologyAssignEvent assignEvent = new TopologyAssignEvent();
      assignEvent.setTopologyId(topologyId);
      assignEvent.setScratch(false);
      assignEvent.setTopologyName(topologyname);
      assignEvent.setOldStatus(
          Thrift.topologyInitialStatusToStormStatus(options.get_initial_status()));

      TopologyAssign.push(assignEvent);
      LOG.info("Submit for " + topologyname + " with conf " + serializedConf);

      boolean isSuccess = assignEvent.waitFinish();
      if (isSuccess == true) {
        LOG.info("Finish submit for " + topologyname);
      } else {
        throw new FailedAssignTopologyException(assignEvent.getErrorMsg());
      }

    } catch (FailedAssignTopologyException e) {
      StringBuilder sb = new StringBuilder();
      sb.append("Fail to sumbit topology, Root cause:");
      if (e.getMessage() == null) {
        sb.append("submit timeout");
      } else {
        sb.append(e.getMessage());
      }

      sb.append("\n\n");
      sb.append("topologyId:" + topologyId);
      sb.append(", uploadedJarLocation:" + uploadedJarLocation + "\n");
      LOG.error(sb.toString(), e);
      throw new TopologyAssignException(sb.toString());
    } catch (InvalidParameterException e) {
      StringBuilder sb = new StringBuilder();
      sb.append("Fail to sumbit topology ");
      sb.append(e.getMessage());
      sb.append(", cause:" + e.getCause());
      sb.append("\n\n");
      sb.append("topologyId:" + topologyId);
      sb.append(", uploadedJarLocation:" + uploadedJarLocation + "\n");
      LOG.error(sb.toString(), e);
      throw new InvalidParameterException(sb.toString());
    } catch (Throwable e) {
      StringBuilder sb = new StringBuilder();
      sb.append("Fail to sumbit topology ");
      sb.append(e.getMessage());
      sb.append(", cause:" + e.getCause());
      sb.append("\n\n");
      sb.append("topologyId:" + topologyId);
      sb.append(", uploadedJarLocation:" + uploadedJarLocation + "\n");
      LOG.error(sb.toString(), e);
      throw new TopologyAssignException(sb.toString());
    }
  }
 public static int getCpuSlotPerWorker(Map conf) {
   int slot = JStormUtils.parseInt(conf.get(CPU_SLOT_PER_WORKER), 1);
   return slot > 0 ? slot : 1;
 }
示例#25
0
  public List<Integer> grouper(List<Object> values) {
    int index = randomrange.nextInt();

    return JStormUtils.mk_list(index);
  }
  @Override
  public Map<Integer, ResourceAssignment> assignTasks(TopologyAssignContext context)
      throws FailedAssignTopologyException {

    int assignType = context.getAssignType();
    if (TopologyAssignContext.isAssignTypeValid(assignType) == false) {
      throw new FailedAssignTopologyException("Invalide Assign Type " + assignType);
    }

    DefaultTopologyAssignContext defaultContext = new DefaultTopologyAssignContext(context);
    if (assignType == TopologyAssignContext.ASSIGN_TYPE_REBALANCE) {
      freeUsed(defaultContext);
    }
    LOG.info("Dead tasks:" + defaultContext.getDeadTaskIds());
    LOG.info("Unstopped tasks:" + defaultContext.getUnstoppedTaskIds());

    Set<Integer> needAssignTasks = getNeedAssignTasks(defaultContext);

    Map<Integer, ResourceAssignment> keepAssigns = getKeepAssign(defaultContext, needAssignTasks);

    // please use tree map to make task sequence
    Map<Integer, ResourceAssignment> ret = new TreeMap<Integer, ResourceAssignment>();
    ret.putAll(keepAssigns);
    ret.putAll(defaultContext.getUnstoppedAssignments());

    Map<WorkerSlot, List<Integer>> keepAssignWorkers = Assignment.getWorkerTasks(keepAssigns);

    int allocWorkerNum =
        defaultContext.getTotalWorkerNum()
            - defaultContext.getUnstoppedWorkerNum()
            - keepAssignWorkers.size();
    if (allocWorkerNum <= 0) {
      LOG.warn(
          "Don't need assign workers, all workers are fine " + defaultContext.toDetailString());
      throw new FailedAssignTopologyException("Don't need assign worker, all workers are fine ");
    }

    Set<String> outputConfigComponents = new HashSet<String>();

    Map<ComponentAssignType, Pair<Set<Integer>, IPreassignTask>> typeHandler =
        registerPreAssignHandler(defaultContext, needAssignTasks);

    Map<Integer, ResourceAssignment> newAssigns = new HashMap<Integer, ResourceAssignment>();
    Set<String> usedSupervisorIds = new HashSet<String>();
    List<Integer> lastFailed = new ArrayList<Integer>();

    for (Entry<ComponentAssignType, Pair<Set<Integer>, IPreassignTask>> entry :
        typeHandler.entrySet()) {
      ComponentAssignType type = entry.getKey();

      Set<Integer> tasks = entry.getValue().getFirst();
      IPreassignTask handler = entry.getValue().getSecond();

      tasks.addAll(lastFailed);
      lastFailed.clear();

      List<Integer> sortedTasks = sortAssignTasks(defaultContext, tasks);

      StormTopology sysTopology = defaultContext.getSysTopology();

      for (Integer task : sortedTasks) {
        Set<String> canUsedSupervisorIds =
            getCanUsedSupervisors(defaultContext, usedSupervisorIds, allocWorkerNum);

        String componentName = defaultContext.getTaskToComponent().get(task);
        ComponentCommon componentCommon =
            ThriftTopologyUtils.getComponentCommon(sysTopology, componentName);

        Map componentMap = (Map) JStormUtils.from_json(componentCommon.get_json_conf());
        if (componentMap == null) {
          componentMap = Maps.newHashMap();
        }

        if (outputConfigComponents.contains(componentName) == false) {
          LOG.info("Component map of " + componentName + "\n" + componentMap);
          outputConfigComponents.add(componentName);
        }

        ResourceAssignment preAssignment =
            handler.preAssign(
                task,
                defaultContext,
                componentMap,
                componentName,
                canUsedSupervisorIds,
                ret,
                newAssigns);
        if (preAssignment == null) {
          // pre assign fail
          lastFailed.add(task);
        } else {
          // sucess to do preAssign
          SupervisorInfo supervisorInfo =
              defaultContext.getCluster().get(preAssignment.getSupervisorId());
          LOG.info("Task " + task + " had been assigned to " + supervisorInfo.getHostName());
          newAssigns.put(task, preAssignment);
          ret.put(task, preAssignment);
          usedSupervisorIds.add(preAssignment.getSupervisorId());
        }
      }
    }

    if (lastFailed.isEmpty() == false) {
      throw new FailedAssignTopologyException("Failed to assign tasks " + lastFailed);
    }

    // Here just hardcode
    IPostAssignTask postAssignHandler = new PostAssignTaskPort();
    postAssignHandler.postAssign(defaultContext, newAssigns, allocWorkerNum);

    LOG.info("Keep Alive slots:" + keepAssigns);
    LOG.info("Unstopped slots:" + defaultContext.getUnstoppedAssignments());
    LOG.info("New assign slots:" + newAssigns);

    return ret;
  }
 public static boolean isUseOldAssignment(Map conf) {
   return JStormUtils.parseBoolean(conf.get(USE_OLD_ASSIGNMENT), false);
 }
示例#28
0
  public BoltExecutors(
      IBolt _bolt,
      TaskTransfer _transfer_fn,
      Map<Integer, DisruptorQueue> innerTaskTransfer,
      Map storm_conf,
      DisruptorQueue deserializeQueue,
      TaskSendTargets _send_fn,
      TaskStatus taskStatus,
      TopologyContext sysTopologyCxt,
      TopologyContext userTopologyCxt,
      CommonStatsRolling _task_stats,
      ITaskReportErr _report_error) {

    super(
        _transfer_fn,
        storm_conf,
        deserializeQueue,
        innerTaskTransfer,
        sysTopologyCxt,
        userTopologyCxt,
        _task_stats,
        taskStatus,
        _report_error);

    this.bolt = _bolt;

    // create TimeCacheMap

    this.tuple_start_times = new RotatingMap<Tuple, Long>(Acker.TIMEOUT_BUCKET_NUM);

    this.ackerNum = JStormUtils.parseInt(storm_conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));

    // don't use TimeoutQueue for recv_tuple_queue,
    // then other place should check the queue size
    // TimeCacheQueue.DefaultExpiredCallback<Tuple> logExpireCb = new
    // TimeCacheQueue.DefaultExpiredCallback<Tuple>(
    // idStr);
    // this.recv_tuple_queue = new
    // TimeCacheQueue<Tuple>(message_timeout_secs,
    // TimeCacheQueue.DEFAULT_NUM_BUCKETS, logExpireCb);

    // create BoltCollector
    IOutputCollector output_collector =
        new BoltCollector(
            message_timeout_secs,
            _report_error,
            _send_fn,
            storm_conf,
            _transfer_fn,
            sysTopologyCxt,
            taskId,
            tuple_start_times,
            _task_stats);

    outputCollector = new OutputCollector(output_collector);

    boltExeTimer =
        Metrics.registerTimer(
            idStr, MetricDef.EXECUTE_TIME, String.valueOf(taskId), Metrics.MetricType.TASK);

    Object tickFrequence = storm_conf.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
    if (tickFrequence != null) {
      Integer frequence = JStormUtils.parseInt(tickFrequence);
      TickTupleTrigger tickTupleTrigger =
          new TickTupleTrigger(
              sysTopologyCxt, frequence, idStr + Constants.SYSTEM_TICK_STREAM_ID, exeQueue);
      tickTupleTrigger.register();
    }

    try {
      // do prepare
      WorkerClassLoader.switchThreadContext();

      //			Method method = IBolt.class.getMethod("prepare", new Class[] {Map.class,
      // TopologyContext.class,
      //					OutputCollector.class});
      //			method.invoke(bolt, new Object[] {storm_conf, userTopologyCxt, outputCollector});
      bolt.prepare(storm_conf, userTopologyCtx, outputCollector);

    } catch (Throwable e) {
      error = e;
      LOG.error("bolt prepare error ", e);
      report_error.report(e);
    } finally {
      WorkerClassLoader.restoreThreadContext();
    }

    LOG.info("Successfully create BoltExecutors " + idStr);
  }
示例#29
0
    public LogHandler() {

      logDir = JStormUtils.getLogDir();

      LOG.info("logview logDir=" + logDir); // +++
    }
 public static long getMemSizePerWorker(Map conf) {
   long size = JStormUtils.parseLong(conf.get(MEMSIZE_PER_WORKER), JStormUtils.SIZE_1_G * 2);
   return size > 0 ? size : JStormUtils.SIZE_1_G * 2;
 }