@Override
 public void read(org.apache.thrift.protocol.TProtocol prot, ClusterWorkerHeartbeat struct)
     throws org.apache.thrift.TException {
   TTupleProtocol iprot = (TTupleProtocol) prot;
   struct.storm_id = iprot.readString();
   struct.set_storm_id_isSet(true);
   {
     org.apache.thrift.protocol.TMap _map614 =
         new org.apache.thrift.protocol.TMap(
             org.apache.thrift.protocol.TType.STRUCT,
             org.apache.thrift.protocol.TType.STRUCT,
             iprot.readI32());
     struct.executor_stats = new HashMap<ExecutorInfo, ExecutorStats>(2 * _map614.size);
     ExecutorInfo _key615;
     ExecutorStats _val616;
     for (int _i617 = 0; _i617 < _map614.size; ++_i617) {
       _key615 = new ExecutorInfo();
       _key615.read(iprot);
       _val616 = new ExecutorStats();
       _val616.read(iprot);
       struct.executor_stats.put(_key615, _val616);
     }
   }
   struct.set_executor_stats_isSet(true);
   struct.time_secs = iprot.readI32();
   struct.set_time_secs_isSet(true);
   struct.uptime_secs = iprot.readI32();
   struct.set_uptime_secs_isSet(true);
 }
Example #2
0
  public static void printBoltStat(String topologyId) {
    try {
      Client client = ThriftClient.getClient();
      TopologyInfo topologyInfo = client.getTopologyInfo(topologyId);

      Iterator<ExecutorSummary> executorSummaryIterator = topologyInfo.get_executors_iterator();
      while (executorSummaryIterator.hasNext()) {
        ExecutorSummary executorSummary = executorSummaryIterator.next();
        ExecutorStats executorStats = executorSummary.get_stats();

        if (executorStats != null) {
          ExecutorSpecificStats executorSpecificStats = executorStats.get_specific();
          String componentId = executorSummary.get_component_id();

          if (executorSpecificStats.is_set_bolt()) {
            printLine();
            BoltStats boltStats = executorSpecificStats.get_bolt();
            System.out.println("component id of bolt : " + componentId);
            System.out.println(
                "transferred : " + getAllTimeStat(executorStats.get_transferred(), ALL_TIME));
            System.out.println(
                "total tuples emitted : " + getAllTimeStat(executorStats.get_emitted(), ALL_TIME));
            System.out.println("acked : " + getBoltStats(boltStats.get_acked(), ALL_TIME));
            System.out.println("failed : " + getBoltStats(boltStats.get_failed(), ALL_TIME));
            System.out.println("executed : " + getBoltStats(boltStats.get_executed(), ALL_TIME));
            printLine();
          }
        }
      }
    } catch (Exception ex) {
      throw new RuntimeException("Error occurred while fetching the spout information!");
    }
  }
 public void read(org.apache.thrift.protocol.TProtocol iprot, ClusterWorkerHeartbeat struct)
     throws org.apache.thrift.TException {
   org.apache.thrift.protocol.TField schemeField;
   iprot.readStructBegin();
   while (true) {
     schemeField = iprot.readFieldBegin();
     if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
       break;
     }
     switch (schemeField.id) {
       case 1: // STORM_ID
         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
           struct.storm_id = iprot.readString();
           struct.set_storm_id_isSet(true);
         } else {
           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
         break;
       case 2: // EXECUTOR_STATS
         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
           {
             org.apache.thrift.protocol.TMap _map608 = iprot.readMapBegin();
             struct.executor_stats = new HashMap<ExecutorInfo, ExecutorStats>(2 * _map608.size);
             ExecutorInfo _key609;
             ExecutorStats _val610;
             for (int _i611 = 0; _i611 < _map608.size; ++_i611) {
               _key609 = new ExecutorInfo();
               _key609.read(iprot);
               _val610 = new ExecutorStats();
               _val610.read(iprot);
               struct.executor_stats.put(_key609, _val610);
             }
             iprot.readMapEnd();
           }
           struct.set_executor_stats_isSet(true);
         } else {
           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
         break;
       case 3: // TIME_SECS
         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
           struct.time_secs = iprot.readI32();
           struct.set_time_secs_isSet(true);
         } else {
           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
         break;
       case 4: // UPTIME_SECS
         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
           struct.uptime_secs = iprot.readI32();
           struct.set_uptime_secs_isSet(true);
         } else {
           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
         break;
       default:
         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
     }
     iprot.readFieldEnd();
   }
   iprot.readStructEnd();
   struct.validate();
 }
  public boolean metrics(Client client, long now, MetricsState state, String message)
      throws Exception {
    ClusterSummary summary = client.getClusterInfo();
    long time = now - state.lastTime;
    state.lastTime = now;
    int numSupervisors = summary.get_supervisors_size();
    int totalSlots = 0;
    int totalUsedSlots = 0;

    //////////
    // String namaSupervisor = "";
    for (SupervisorSummary sup : summary.get_supervisors()) {
      totalSlots += sup.get_num_workers();
      totalUsedSlots += sup.get_num_used_workers();
      // namaSupervisor = namaSupervisor + sup.get_host() + ",";
    }
    // System.out.println(namaSupervisor);

    int slotsUsedDiff = totalUsedSlots - state.slotsUsed;
    state.slotsUsed = totalUsedSlots;

    int numTopologies = summary.get_topologies_size();
    long totalTransferred = 0;
    int totalExecutors = 0;
    int executorsWithMetrics = 0;
    for (TopologySummary ts : summary.get_topologies()) {
      String id = ts.get_id();
      TopologyInfo info = client.getTopologyInfo(id);

      //// SOE Addition
      PerftestWriter.print(summary, info, new HashMap<String, Long>());
      ////

      for (ExecutorSummary es : info.get_executors()) {
        ExecutorStats stats = es.get_stats();
        totalExecutors++;
        if (stats != null) {
          Map<String, Map<String, Long>> transferred = stats.get_emitted(); /* .get_transferred();*/
          if (transferred != null) {
            Map<String, Long> e2 = transferred.get(":all-time");
            if (e2 != null) {
              executorsWithMetrics++;
              // The SOL messages are always on the default stream, so just count those
              Long dflt = e2.get("default");
              if (dflt != null) {
                totalTransferred += dflt;
              }
            }
          }
        }
      }
    }
    // long transferredDiff = totalTransferred - state.transferred;
    state.transferred = totalTransferred;
    // double throughput = (transferredDiff == 0 || time == 0) ? 0.0 : (transferredDiff *
    // size)/(1024.0 * 1024.0)/(time/1000.0);
    // System.out.println(message+"\t"+numTopologies+"\t"+totalSlots+"\t"+totalUsedSlots+"\t"+totalExecutors+"\t"+executorsWithMetrics+"\t"+now+"\t"+time+"\t"+transferredDiff+"\t"+throughput);
    System.out.println(
        message
            + ","
            + totalSlots
            + ","
            + totalUsedSlots
            + ","
            + totalExecutors
            + ","
            + executorsWithMetrics
            + ","
            + time
            + ",NOLIMIT");
    if ("WAITING".equals(message)) {
      // System.err.println(" !("+totalUsedSlots+" > 0 && "+slotsUsedDiff+" == 0 &&
      // "+totalExecutors+" > 0 && "+executorsWithMetrics+" >= "+totalExecutors+")");
    }
    return !(totalUsedSlots > 0
        && slotsUsedDiff == 0
        && totalExecutors > 0
        && executorsWithMetrics >= totalExecutors);
  }