private void performWrites() {
    final MeterInternalCallContext context = new MeterInternalCallContext();

    // This is the atomic operation: bulk insert the new aggregated TimelineChunk objects, and
    // delete
    // or invalidate the ones that were aggregated.  This should be very fast.
    final long startWriteTime = System.currentTimeMillis();
    aggregatorSqlDao.begin();
    timelineDao.bulkInsertTimelineChunks(chunksToWrite, context);
    if (config.getDeleteAggregatedChunks()) {
      aggregatorSqlDao.deleteTimelineChunks(chunkIdsToInvalidateOrDelete, context);
    } else {
      aggregatorSqlDao.makeTimelineChunksInvalid(chunkIdsToInvalidateOrDelete, context);
    }
    aggregatorSqlDao.commit();
    msWritingDb.addAndGet(System.currentTimeMillis() - startWriteTime);

    timelineChunksWritten.addAndGet(chunksToWrite.size());
    timelineChunksInvalidatedOrDeleted.addAndGet(chunkIdsToInvalidateOrDelete.size());
    chunksToWrite.clear();
    chunkIdsToInvalidateOrDelete.clear();
    final long sleepMs = config.getAggregationSleepBetweenBatches().getMillis();
    if (sleepMs > 0) {
      final long timeBeforeSleep = System.currentTimeMillis();
      try {
        Thread.sleep(sleepMs);
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
      }
      msSpentSleeping.addAndGet(System.currentTimeMillis() - timeBeforeSleep);
    }
    timelineChunkBatchesProcessed.incrementAndGet();
  }
  /** This method aggregates candidate timelines */
  public void getAndProcessTimelineAggregationCandidates() {
    if (!isAggregating.compareAndSet(false, true)) {
      log.info("Asked to aggregate, but we're already aggregating!");
      return;
    } else {
      log.debug("Starting aggregating");
    }

    aggregationRuns.incrementAndGet();
    final String[] chunkCountsToAggregate = config.getChunksToAggregate().split(",");
    for (int aggregationLevel = 0;
        aggregationLevel < config.getMaxAggregationLevel();
        aggregationLevel++) {
      final long startingAggregatesCreated = aggregatesCreated.get();
      final Map<String, Long> initialCounters = captureAggregatorCounters();
      final int chunkCountIndex =
          aggregationLevel >= chunkCountsToAggregate.length
              ? chunkCountsToAggregate.length - 1
              : aggregationLevel;
      final int chunksToAggregate = Integer.parseInt(chunkCountsToAggregate[chunkCountIndex]);
      streamingAggregateLevel(aggregationLevel, chunksToAggregate);
      final Map<String, Long> counterDeltas = subtractFromAggregatorCounters(initialCounters);
      final long netAggregatesCreated = aggregatesCreated.get() - startingAggregatesCreated;
      if (netAggregatesCreated == 0) {
        if (aggregationLevel == 0) {
          foundNothingRuns.incrementAndGet();
        }
        log.debug("Created no new aggregates, so skipping higher-level aggregations");
        break;
      } else {
        final StringBuilder builder = new StringBuilder();
        builder
            .append("For aggregation level ")
            .append(aggregationLevel)
            .append(", runs ")
            .append(aggregationRuns.get())
            .append(", foundNothingRuns ")
            .append(foundNothingRuns.get());
        for (final Map.Entry<String, Long> entry : counterDeltas.entrySet()) {
          builder.append(", ").append(entry.getKey()).append(": ").append(entry.getValue());
        }
        log.info(builder.toString());
      }
    }

    log.debug("Aggregation done");
    isAggregating.set(false);
  }
 public void runAggregationThread() {
   aggregatorThread.scheduleWithFixedDelay(
       new Runnable() {
         @Override
         public void run() {
           getAndProcessTimelineAggregationCandidates();
         }
       },
       config.getAggregationInterval().getMillis(),
       config.getAggregationInterval().getMillis(),
       TimeUnit.MILLISECONDS);
 }
  /**
   * The sequence of events is:
   *
   * <ul>
   *   <li>Build the aggregated TimelineChunk object, and save it, setting not_valid to true, and
   *       aggregation_level to 1. This means that it won't be noticed by any of the dashboard
   *       queries. The save operation returns the new timeline_times_id
   *   <li>Then, in a single transaction, update the aggregated TimelineChunk object to have
   *       not_valid = 0, and also delete the TimelineChunk objects that were the basis of the
   *       aggregation, and flush any TimelineChunks that happen to be in the cache.
   *       <p>
   *
   * @param timelineChunks the TimelineChunks to be aggregated
   */
  private void aggregateHostSampleChunks(
      final List<TimelineChunk> timelineChunks, final int aggregationLevel) throws IOException {
    final TimelineChunk firstTimesChunk = timelineChunks.get(0);
    final TimelineChunk lastTimesChunk = timelineChunks.get(timelineChunks.size() - 1);
    final int chunkCount = timelineChunks.size();
    final int sourceId = firstTimesChunk.getSourceId();
    final DateTime startTime = firstTimesChunk.getStartTime();
    final DateTime endTime = lastTimesChunk.getEndTime();
    final List<byte[]> timeParts = new ArrayList<byte[]>(chunkCount);
    try {
      final List<byte[]> sampleParts = new ArrayList<byte[]>(chunkCount);
      final List<Long> timelineChunkIds = new ArrayList<Long>(chunkCount);
      int sampleCount = 0;
      for (final TimelineChunk timelineChunk : timelineChunks) {
        timeParts.add(timelineChunk.getTimeBytesAndSampleBytes().getTimeBytes());
        sampleParts.add(timelineChunk.getTimeBytesAndSampleBytes().getSampleBytes());
        sampleCount += timelineChunk.getSampleCount();
        timelineChunkIds.add(timelineChunk.getChunkId());
      }
      final byte[] combinedTimeBytes = timelineCoder.combineTimelines(timeParts, sampleCount);
      final byte[] combinedSampleBytes = sampleCoder.combineSampleBytes(sampleParts);
      final int timeBytesLength = combinedTimeBytes.length;
      final int totalSize = 4 + timeBytesLength + combinedSampleBytes.length;
      log.debug(
          "For sourceId {}, aggregationLevel {}, aggregating {} timelines ({} bytes, {} samples): {}",
          new Object[] {
            firstTimesChunk.getSourceId(),
            firstTimesChunk.getAggregationLevel(),
            timelineChunks.size(),
            totalSize,
            sampleCount
          });
      timelineChunksBytesCreated.addAndGet(totalSize);
      final int totalSampleCount = sampleCount;
      final TimelineChunk chunk =
          new TimelineChunk(
              0,
              sourceId,
              firstTimesChunk.getMetricId(),
              startTime,
              endTime,
              combinedTimeBytes,
              combinedSampleBytes,
              totalSampleCount,
              aggregationLevel + 1,
              false,
              false);
      chunksToWrite.add(chunk);
      chunkIdsToInvalidateOrDelete.addAll(timelineChunkIds);
      timelineChunksQueuedForCreation.incrementAndGet();

      if (chunkIdsToInvalidateOrDelete.size() >= config.getMaxChunkIdsToInvalidateOrDelete()) {
        performWrites();
      }
    } catch (Exception e) {
      log.error(
          String.format(
              "Exception aggregating level %d, sourceId %d, metricId %d, startTime %s, endTime %s",
              aggregationLevel, sourceId, firstTimesChunk.getMetricId(), startTime, endTime),
          e);
    }
  }