예제 #1
0
    /**
     * Gets the list of pages in memory that have been changed and hence need to be written as a
     * part of the flush operation that is being issued
     *
     * @param ledgerId Ledger id
     * @returns last entry in the in memory pages.
     */
    private LinkedList<Long> getFirstEntryListToBeFlushed(long ledgerId) {
      ConcurrentMap<Long, LedgerEntryPage> pageMap = pages.get(ledgerId);
      if (pageMap == null || pageMap.isEmpty()) {
        return null;
      }

      LinkedList<Long> firstEntryList = new LinkedList<Long>();
      for (ConcurrentMap.Entry<Long, LedgerEntryPage> entry : pageMap.entrySet()) {
        LedgerEntryPage lep = entry.getValue();
        if (lep.isClean()) {
          if (!lep.inUse()) {
            addToCleanPagesList(lep);
          }
          if (LOG.isTraceEnabled()) {
            LOG.trace("Page is clean " + lep);
          }
        } else {
          firstEntryList.add(lep.getFirstEntry());
        }
      }
      return firstEntryList;
    }
예제 #2
0
  public TopologyMetric mergeMetrics() {
    long start = System.currentTimeMillis();

    if (getMemCache().size() == 0) {
      // LOG.info("topology:{}, metric size is 0, skip...", topologyId);
      return null;
    }
    if (isMerging()) {
      LOG.info("topology {} is already merging, skip...", topologyId);
      return null;
    }

    setMerging(true);

    try {
      Map<String, MetricInfo> workerMetricMap = this.memCache;
      // reset mem cache
      this.memCache = new ConcurrentHashMap<>();

      MetricInfo topologyMetrics = MetricUtils.mkMetricInfo();
      MetricInfo componentMetrics = MetricUtils.mkMetricInfo();
      MetricInfo taskMetrics = MetricUtils.mkMetricInfo();
      MetricInfo streamMetrics = MetricUtils.mkMetricInfo();
      MetricInfo workerMetrics = MetricUtils.mkMetricInfo();
      MetricInfo nettyMetrics = MetricUtils.mkMetricInfo();
      TopologyMetric tpMetric =
          new TopologyMetric(
              topologyMetrics,
              componentMetrics,
              workerMetrics,
              taskMetrics,
              streamMetrics,
              nettyMetrics);

      // metric name => worker count
      Map<String, Integer> metricNameCounters = new HashMap<>();

      // special for histograms & timers, we merge the points to get a new snapshot data.
      Map<String, Map<Integer, Histogram>> histograms = new HashMap<>();
      Map<String, Map<Integer, Timer>> timers = new HashMap<>();

      // iterate metrics of all workers within the same topology
      for (ConcurrentMap.Entry<String, MetricInfo> metricEntry : workerMetricMap.entrySet()) {
        MetricInfo metricInfo = metricEntry.getValue();

        // merge counters: add old and new values, note we only add incoming new metrics and
        // overwrite
        // existing data, same for all below.
        Map<String, Map<Integer, MetricSnapshot>> metrics = metricInfo.get_metrics();
        for (Map.Entry<String, Map<Integer, MetricSnapshot>> metric : metrics.entrySet()) {
          String metricName = metric.getKey();
          Map<Integer, MetricSnapshot> data = metric.getValue();
          MetaType metaType = MetricUtils.metaType(metricName);

          MetricType metricType = MetricUtils.metricType(metricName);
          if (metricType == MetricType.COUNTER) {
            mergeCounters(tpMetric, metaType, metricName, data);
          } else if (metricType == MetricType.GAUGE) {
            mergeGauges(tpMetric, metaType, metricName, data);
          } else if (metricType == MetricType.METER) {
            mergeMeters(
                getMetricInfoByType(tpMetric, metaType), metricName, data, metricNameCounters);
          } else if (metricType == MetricType.HISTOGRAM) {
            mergeHistograms(
                getMetricInfoByType(tpMetric, metaType),
                metricName,
                data,
                metricNameCounters,
                histograms);
          } else if (metricType == MetricType.TIMER) {
            mergeTimers(
                getMetricInfoByType(tpMetric, metaType),
                metricName,
                data,
                metricNameCounters,
                timers);
          }
        }
      }
      adjustHistogramTimerMetrics(tpMetric, metricNameCounters, histograms, timers);
      // for counters, we only report delta data every time, need to sum with old data
      // adjustCounterMetrics(tpMetric, oldTpMetric);

      LOG.info(
          "merge topology metrics:{}, cost:{}", topologyId, System.currentTimeMillis() - start);
      // debug logs
      // MetricUtils.printMetricWinSize(componentMetrics);

      return tpMetric;
    } finally {
      setMerging(false);
    }
  }