Ejemplo n.º 1
0
 @Override
 protected DruidServer addInnerInventory(
     DruidServer container, String inventoryKey, DataSegment inventory) {
   Predicate<Pair<DruidServerMetadata, DataSegment>> predicate =
       Predicates.or(defaultFilter, Predicates.or(segmentPredicates.values()));
   if (predicate.apply(Pair.of(container.getMetadata(), inventory))) {
     addSingleInventory(container, inventory);
   }
   return container;
 }
Ejemplo n.º 2
0
 @Override
 public CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
   final CallbackAction action;
   if (filter.apply(Pair.of(server, segment))) {
     action = callback.segmentRemoved(server, segment);
   } else {
     action = CallbackAction.CONTINUE;
   }
   return action;
 }
Ejemplo n.º 3
0
    protected Pair<Integer, Integer> computeStartEnd(int cardinality) {
      int startIndex = ignoreFirstN;

      if (previousStop != null) {
        int lookupId = dimSelector.lookupId(previousStop) + 1;
        if (lookupId < 0) {
          lookupId *= -1;
        }
        if (lookupId > ignoreFirstN + keepOnlyN) {
          startIndex = ignoreFirstN + keepOnlyN;
        } else {
          startIndex = Math.max(lookupId, startIndex);
        }
      }

      int endIndex = Math.min(ignoreFirstN + keepOnlyN, cardinality);

      if (ignoreAfterThreshold && query.getDimensionsFilter() == null) {
        endIndex = Math.min(endIndex, startIndex + query.getThreshold());
      }

      return Pair.of(startIndex, endIndex);
    }
Ejemplo n.º 4
0
  @Override
  public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
    final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    final CacheStrategy<T, Object, Query<T>> strategy = toolChest.getCacheStrategy(query);

    final Map<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();

    final List<Pair<Interval, byte[]>> cachedResults = Lists.newArrayList();
    final Map<String, CachePopulator> cachePopulatorMap = Maps.newHashMap();

    final boolean useCache =
        query.getContextUseCache(true)
            && strategy != null
            && cacheConfig.isUseCache()
            && cacheConfig.isQueryCacheable(query);
    final boolean populateCache =
        query.getContextPopulateCache(true)
            && strategy != null
            && cacheConfig.isPopulateCache()
            && cacheConfig.isQueryCacheable(query);
    final boolean isBySegment = query.getContextBySegment(false);

    final ImmutableMap.Builder<String, Object> contextBuilder = new ImmutableMap.Builder<>();

    final int priority = query.getContextPriority(0);
    contextBuilder.put("priority", priority);

    if (populateCache) {
      // prevent down-stream nodes from caching results as well if we are populating the cache
      contextBuilder.put(CacheConfig.POPULATE_CACHE, false);
      contextBuilder.put("bySegment", true);
    }
    contextBuilder.put("intermediate", true);

    TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource());

    if (timeline == null) {
      return Sequences.empty();
    }

    // build set of segments to query
    Set<Pair<ServerSelector, SegmentDescriptor>> segments = Sets.newLinkedHashSet();

    List<TimelineObjectHolder<String, ServerSelector>> serversLookup = Lists.newLinkedList();

    for (Interval interval : query.getIntervals()) {
      Iterables.addAll(serversLookup, timeline.lookup(interval));
    }

    // Let tool chest filter out unneeded segments
    final List<TimelineObjectHolder<String, ServerSelector>> filteredServersLookup =
        toolChest.filterSegments(query, serversLookup);

    for (TimelineObjectHolder<String, ServerSelector> holder : filteredServersLookup) {
      for (PartitionChunk<ServerSelector> chunk : holder.getObject()) {
        ServerSelector selector = chunk.getObject();
        final SegmentDescriptor descriptor =
            new SegmentDescriptor(
                holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());

        segments.add(Pair.of(selector, descriptor));
      }
    }

    final byte[] queryCacheKey;

    if ((populateCache || useCache) // implies strategy != null
        && !isBySegment) // explicit bySegment queries are never cached
    {
      queryCacheKey = strategy.computeCacheKey(query);
    } else {
      queryCacheKey = null;
    }

    if (queryCacheKey != null) {
      // cachKeys map must preserve segment ordering, in order for shards to always be combined in
      // the same order
      Map<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> cacheKeys =
          Maps.newLinkedHashMap();
      for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
        final Cache.NamedKey segmentCacheKey =
            CacheUtil.computeSegmentCacheKey(
                segment.lhs.getSegment().getIdentifier(), segment.rhs, queryCacheKey);
        cacheKeys.put(segment, segmentCacheKey);
      }

      // Pull cached segments from cache and remove from set of segments to query
      final Map<Cache.NamedKey, byte[]> cachedValues;
      if (useCache) {
        cachedValues =
            cache.getBulk(
                Iterables.limit(cacheKeys.values(), cacheConfig.getCacheBulkMergeLimit()));
      } else {
        cachedValues = ImmutableMap.of();
      }

      for (Map.Entry<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> entry :
          cacheKeys.entrySet()) {
        Pair<ServerSelector, SegmentDescriptor> segment = entry.getKey();
        Cache.NamedKey segmentCacheKey = entry.getValue();
        final Interval segmentQueryInterval = segment.rhs.getInterval();

        final byte[] cachedValue = cachedValues.get(segmentCacheKey);
        if (cachedValue != null) {
          // remove cached segment from set of segments to query
          segments.remove(segment);
          cachedResults.add(Pair.of(segmentQueryInterval, cachedValue));
        } else if (populateCache) {
          // otherwise, if populating cache, add segment to list of segments to cache
          final String segmentIdentifier = segment.lhs.getSegment().getIdentifier();
          cachePopulatorMap.put(
              String.format("%s_%s", segmentIdentifier, segmentQueryInterval),
              new CachePopulator(cache, objectMapper, segmentCacheKey));
        }
      }
    }

    // Compile list of all segments not pulled from cache
    for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
      final QueryableDruidServer queryableDruidServer = segment.lhs.pick();

      if (queryableDruidServer == null) {
        log.makeAlert(
                "No servers found for SegmentDescriptor[%s] for DataSource[%s]?! How can this be?!",
                segment.rhs, query.getDataSource())
            .emit();
      } else {
        final DruidServer server = queryableDruidServer.getServer();
        List<SegmentDescriptor> descriptors = serverSegments.get(server);

        if (descriptors == null) {
          descriptors = Lists.newArrayList();
          serverSegments.put(server, descriptors);
        }

        descriptors.add(segment.rhs);
      }
    }

    return new LazySequence<>(
        new Supplier<Sequence<T>>() {
          @Override
          public Sequence<T> get() {
            ArrayList<Sequence<T>> sequencesByInterval = Lists.newArrayList();
            addSequencesFromCache(sequencesByInterval);
            addSequencesFromServer(sequencesByInterval);

            return mergeCachedAndUncachedSequences(sequencesByInterval, toolChest);
          }

          private void addSequencesFromCache(ArrayList<Sequence<T>> listOfSequences) {
            if (strategy == null) {
              return;
            }

            final Function<Object, T> pullFromCacheFunction = strategy.pullFromCache();
            final TypeReference<Object> cacheObjectClazz = strategy.getCacheObjectClazz();
            for (Pair<Interval, byte[]> cachedResultPair : cachedResults) {
              final byte[] cachedResult = cachedResultPair.rhs;
              Sequence<Object> cachedSequence =
                  new BaseSequence<>(
                      new BaseSequence.IteratorMaker<Object, Iterator<Object>>() {
                        @Override
                        public Iterator<Object> make() {
                          try {
                            if (cachedResult.length == 0) {
                              return Iterators.emptyIterator();
                            }

                            return objectMapper.readValues(
                                objectMapper.getFactory().createParser(cachedResult),
                                cacheObjectClazz);
                          } catch (IOException e) {
                            throw Throwables.propagate(e);
                          }
                        }

                        @Override
                        public void cleanup(Iterator<Object> iterFromMake) {}
                      });
              listOfSequences.add(Sequences.map(cachedSequence, pullFromCacheFunction));
            }
          }

          private void addSequencesFromServer(ArrayList<Sequence<T>> listOfSequences) {
            listOfSequences.ensureCapacity(listOfSequences.size() + serverSegments.size());

            final Query<Result<BySegmentResultValueClass<T>>> rewrittenQuery =
                (Query<Result<BySegmentResultValueClass<T>>>)
                    query.withOverriddenContext(contextBuilder.build());

            // Loop through each server, setting up the query and initiating it.
            // The data gets handled as a Future and parsed in the long Sequence chain in the
            // resultSeqToAdd setter.
            for (Map.Entry<DruidServer, List<SegmentDescriptor>> entry :
                serverSegments.entrySet()) {
              final DruidServer server = entry.getKey();
              final List<SegmentDescriptor> descriptors = entry.getValue();

              final QueryRunner clientQueryable = serverView.getQueryRunner(server);

              if (clientQueryable == null) {
                log.error("WTF!? server[%s] doesn't have a client Queryable?", server);
                continue;
              }

              final MultipleSpecificSegmentSpec segmentSpec =
                  new MultipleSpecificSegmentSpec(descriptors);

              final Sequence<T> resultSeqToAdd;
              if (!server.isAssignable()
                  || !populateCache
                  || isBySegment) { // Direct server queryable
                if (!isBySegment) {
                  resultSeqToAdd =
                      clientQueryable.run(query.withQuerySegmentSpec(segmentSpec), responseContext);
                } else {
                  // bySegment queries need to be de-serialized, see DirectDruidClient.run()

                  @SuppressWarnings("unchecked")
                  final Query<Result<BySegmentResultValueClass<T>>> bySegmentQuery =
                      (Query<Result<BySegmentResultValueClass<T>>>) query;

                  @SuppressWarnings("unchecked")
                  final Sequence<Result<BySegmentResultValueClass<T>>> resultSequence =
                      clientQueryable.run(
                          bySegmentQuery.withQuerySegmentSpec(segmentSpec), responseContext);

                  resultSeqToAdd =
                      (Sequence)
                          Sequences.map(
                              resultSequence,
                              new Function<
                                  Result<BySegmentResultValueClass<T>>,
                                  Result<BySegmentResultValueClass<T>>>() {
                                @Override
                                public Result<BySegmentResultValueClass<T>> apply(
                                    Result<BySegmentResultValueClass<T>> input) {
                                  final BySegmentResultValueClass<T> bySegmentValue =
                                      input.getValue();
                                  return new Result<>(
                                      input.getTimestamp(),
                                      new BySegmentResultValueClass<T>(
                                          Lists.transform(
                                              bySegmentValue.getResults(),
                                              toolChest.makePreComputeManipulatorFn(
                                                  query, MetricManipulatorFns.deserializing())),
                                          bySegmentValue.getSegmentId(),
                                          bySegmentValue.getInterval()));
                                }
                              });
                }
              } else { // Requires some manipulation on broker side
                @SuppressWarnings("unchecked")
                final Sequence<Result<BySegmentResultValueClass<T>>> runningSequence =
                    clientQueryable.run(
                        rewrittenQuery.withQuerySegmentSpec(segmentSpec), responseContext);
                resultSeqToAdd =
                    toolChest.mergeSequencesUnordered(
                        Sequences.<Result<BySegmentResultValueClass<T>>, Sequence<T>>map(
                            runningSequence,
                            new Function<Result<BySegmentResultValueClass<T>>, Sequence<T>>() {
                              private final Function<T, Object> cacheFn =
                                  strategy.prepareForCache();

                              // Acctually do something with the results
                              @Override
                              public Sequence<T> apply(Result<BySegmentResultValueClass<T>> input) {
                                final BySegmentResultValueClass<T> value = input.getValue();
                                final CachePopulator cachePopulator =
                                    cachePopulatorMap.get(
                                        String.format(
                                            "%s_%s", value.getSegmentId(), value.getInterval()));

                                final Queue<ListenableFuture<Object>> cacheFutures =
                                    new ConcurrentLinkedQueue<>();

                                return Sequences.<T>withEffect(
                                    Sequences.<T, T>map(
                                        Sequences.<T, T>map(
                                            Sequences.<T>simple(value.getResults()),
                                            new Function<T, T>() {
                                              @Override
                                              public T apply(final T input) {
                                                if (cachePopulator != null) {
                                                  // only compute cache data if populating cache
                                                  cacheFutures.add(
                                                      backgroundExecutorService.submit(
                                                          new Callable<Object>() {
                                                            @Override
                                                            public Object call() {
                                                              return cacheFn.apply(input);
                                                            }
                                                          }));
                                                }
                                                return input;
                                              }
                                            }),
                                        toolChest.makePreComputeManipulatorFn(
                                            // Ick... most makePreComputeManipulatorFn directly cast
                                            // to their ToolChest query type of choice
                                            // This casting is sub-optimal, but hasn't caused any
                                            // major problems yet...
                                            (Query) rewrittenQuery,
                                            MetricManipulatorFns.deserializing())),
                                    new Runnable() {
                                      @Override
                                      public void run() {
                                        if (cachePopulator != null) {
                                          Futures.addCallback(
                                              Futures.allAsList(cacheFutures),
                                              new FutureCallback<List<Object>>() {
                                                @Override
                                                public void onSuccess(List<Object> cacheData) {
                                                  cachePopulator.populate(cacheData);
                                                  // Help out GC by making sure all references are
                                                  // gone
                                                  cacheFutures.clear();
                                                }

                                                @Override
                                                public void onFailure(Throwable throwable) {
                                                  log.error(throwable, "Background caching failed");
                                                }
                                              },
                                              backgroundExecutorService);
                                        }
                                      }
                                    },
                                    MoreExecutors.sameThreadExecutor()); // End withEffect
                              }
                            }));
              }

              listOfSequences.add(resultSeqToAdd);
            }
          }
        } // End of Supplier
        );
  }
Ejemplo n.º 5
0
  private void becomeLeader() {
    synchronized (lock) {
      if (!started) {
        return;
      }

      log.info("I am the leader of the coordinators, all must bow!");
      try {
        leader = true;
        databaseSegmentManager.start();
        databaseRuleManager.start();
        serverInventoryView.start();
        serviceAnnouncer.announce(self);

        final List<Pair<? extends CoordinatorRunnable, Duration>> coordinatorRunnables =
            Lists.newArrayList();
        dynamicConfigs =
            configManager.watch(
                CoordinatorDynamicConfig.CONFIG_KEY,
                CoordinatorDynamicConfig.class,
                new CoordinatorDynamicConfig.Builder().build());
        coordinatorRunnables.add(
            Pair.of(new CoordinatorHistoricalManagerRunnable(), config.getCoordinatorPeriod()));
        if (indexingServiceClient != null) {
          coordinatorRunnables.add(
              Pair.of(
                  new CoordinatorIndexingServiceRunnable(
                      makeIndexingServiceHelpers(
                          configManager.watch(
                              DatasourceWhitelist.CONFIG_KEY, DatasourceWhitelist.class))),
                  config.getCoordinatorIndexingPeriod()));
        }

        for (final Pair<? extends CoordinatorRunnable, Duration> coordinatorRunnable :
            coordinatorRunnables) {
          ScheduledExecutors.scheduleWithFixedDelay(
              exec,
              config.getCoordinatorStartDelay(),
              coordinatorRunnable.rhs,
              new Callable<ScheduledExecutors.Signal>() {
                private final CoordinatorRunnable theRunnable = coordinatorRunnable.lhs;

                @Override
                public ScheduledExecutors.Signal call() {
                  if (leader) {
                    theRunnable.run();
                  }
                  if (leader) { // (We might no longer be coordinator)
                    return ScheduledExecutors.Signal.REPEAT;
                  } else {
                    return ScheduledExecutors.Signal.STOP;
                  }
                }
              });
        }
      } catch (Exception e) {
        log.makeAlert(e, "Unable to become leader").emit();
        final LeaderLatch oldLatch = createNewLeaderLatch();
        Closeables.closeQuietly(oldLatch);
        try {
          leaderLatch.get().start();
        } catch (Exception e1) {
          // If an exception gets thrown out here, then the coordinator will zombie out 'cause it
          // won't be looking for
          // the latch anymore.  I don't believe it's actually possible for an Exception to throw
          // out here, but
          // Curator likes to have "throws Exception" on methods so it might happen...
          log.makeAlert(e1, "I am a zombie").emit();
        }
      }
    }
  }
Ejemplo n.º 6
0
  @Override
  public Sequence<T> run(final Query<T> query) {
    final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    final CacheStrategy<T, Object, Query<T>> strategy = toolChest.getCacheStrategy(query);

    final Map<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();

    final List<Pair<DateTime, byte[]>> cachedResults = Lists.newArrayList();
    final Map<String, CachePopulator> cachePopulatorMap = Maps.newHashMap();

    final boolean useCache =
        Boolean.parseBoolean(query.getContextValue("useCache", "true")) && strategy != null;
    final boolean populateCache =
        Boolean.parseBoolean(query.getContextValue("populateCache", "true")) && strategy != null;
    final boolean isBySegment = Boolean.parseBoolean(query.getContextValue("bySegment", "false"));

    ImmutableMap.Builder<String, String> contextBuilder = new ImmutableMap.Builder<>();

    final String priority = query.getContextValue("priority", "0");
    contextBuilder.put("priority", priority);

    if (populateCache) {
      contextBuilder.put("bySegment", "true");
    }
    contextBuilder.put("intermediate", "true");

    final Query<T> rewrittenQuery = query.withOverriddenContext(contextBuilder.build());

    VersionedIntervalTimeline<String, ServerSelector> timeline =
        serverView.getTimeline(query.getDataSource());
    if (timeline == null) {
      return Sequences.empty();
    }

    // build set of segments to query
    Set<Pair<ServerSelector, SegmentDescriptor>> segments = Sets.newLinkedHashSet();

    List<TimelineObjectHolder<String, ServerSelector>> serversLookup = Lists.newLinkedList();

    for (Interval interval : rewrittenQuery.getIntervals()) {
      serversLookup.addAll(timeline.lookup(interval));
    }

    // Let tool chest filter out unneeded segments
    final List<TimelineObjectHolder<String, ServerSelector>> filteredServersLookup =
        toolChest.filterSegments(query, serversLookup);

    for (TimelineObjectHolder<String, ServerSelector> holder : filteredServersLookup) {
      for (PartitionChunk<ServerSelector> chunk : holder.getObject()) {
        ServerSelector selector = chunk.getObject();
        final SegmentDescriptor descriptor =
            new SegmentDescriptor(
                holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());

        segments.add(Pair.of(selector, descriptor));
      }
    }

    final byte[] queryCacheKey;
    if (strategy != null) {
      queryCacheKey = strategy.computeCacheKey(query);
    } else {
      queryCacheKey = null;
    }

    if (queryCacheKey != null) {
      Map<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> cacheKeys = Maps.newHashMap();
      for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
        final Cache.NamedKey segmentCacheKey =
            computeSegmentCacheKey(
                segment.lhs.getSegment().getIdentifier(), segment.rhs, queryCacheKey);
        cacheKeys.put(segment, segmentCacheKey);
      }

      // Pull cached segments from cache and remove from set of segments to query
      final Map<Cache.NamedKey, byte[]> cachedValues;
      if (useCache) {
        cachedValues = cache.getBulk(cacheKeys.values());
      } else {
        cachedValues = ImmutableMap.of();
      }

      for (Map.Entry<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> entry :
          cacheKeys.entrySet()) {
        Pair<ServerSelector, SegmentDescriptor> segment = entry.getKey();
        Cache.NamedKey segmentCacheKey = entry.getValue();
        final Interval segmentQueryInterval = segment.rhs.getInterval();

        final byte[] cachedValue = cachedValues.get(segmentCacheKey);
        if (cachedValue != null) {
          // remove cached segment from set of segments to query
          segments.remove(segment);
          cachedResults.add(Pair.of(segmentQueryInterval.getStart(), cachedValue));
        } else if (populateCache) {
          final String segmentIdentifier = segment.lhs.getSegment().getIdentifier();
          cachePopulatorMap.put(
              String.format("%s_%s", segmentIdentifier, segmentQueryInterval),
              new CachePopulator(cache, objectMapper, segmentCacheKey));
        }
      }
    }

    // Compile list of all segments not pulled from cache
    for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
      final QueryableDruidServer queryableDruidServer = segment.lhs.pick();

      if (queryableDruidServer == null) {
        log.error("No servers found for %s?! How can this be?!", segment.rhs);
      } else {
        final DruidServer server = queryableDruidServer.getServer();
        List<SegmentDescriptor> descriptors = serverSegments.get(server);

        if (descriptors == null) {
          descriptors = Lists.newArrayList();
          serverSegments.put(server, descriptors);
        }

        descriptors.add(segment.rhs);
      }
    }

    return new LazySequence<>(
        new Supplier<Sequence<T>>() {
          @Override
          public Sequence<T> get() {
            ArrayList<Pair<DateTime, Sequence<T>>> listOfSequences = Lists.newArrayList();

            addSequencesFromServer(listOfSequences);
            addSequencesFromCache(listOfSequences);

            Collections.sort(
                listOfSequences,
                Ordering.natural().onResultOf(Pair.<DateTime, Sequence<T>>lhsFn()));

            final Sequence<Sequence<T>> seq =
                Sequences.simple(
                    Iterables.transform(listOfSequences, Pair.<DateTime, Sequence<T>>rhsFn()));
            if (strategy == null) {
              return toolChest.mergeSequences(seq);
            } else {
              return strategy.mergeSequences(seq);
            }
          }

          private void addSequencesFromCache(
              ArrayList<Pair<DateTime, Sequence<T>>> listOfSequences) {
            if (strategy == null) {
              return;
            }

            final Function<Object, T> pullFromCacheFunction = strategy.pullFromCache();
            final TypeReference<Object> cacheObjectClazz = strategy.getCacheObjectClazz();
            for (Pair<DateTime, byte[]> cachedResultPair : cachedResults) {
              final byte[] cachedResult = cachedResultPair.rhs;
              Sequence<Object> cachedSequence =
                  new BaseSequence<>(
                      new BaseSequence.IteratorMaker<Object, Iterator<Object>>() {
                        @Override
                        public Iterator<Object> make() {
                          try {
                            if (cachedResult.length == 0) {
                              return Iterators.emptyIterator();
                            }

                            return objectMapper.readValues(
                                objectMapper.getFactory().createParser(cachedResult),
                                cacheObjectClazz);
                          } catch (IOException e) {
                            throw Throwables.propagate(e);
                          }
                        }

                        @Override
                        public void cleanup(Iterator<Object> iterFromMake) {}
                      });
              listOfSequences.add(
                  Pair.of(
                      cachedResultPair.lhs, Sequences.map(cachedSequence, pullFromCacheFunction)));
            }
          }

          @SuppressWarnings("unchecked")
          private void addSequencesFromServer(
              ArrayList<Pair<DateTime, Sequence<T>>> listOfSequences) {
            for (Map.Entry<DruidServer, List<SegmentDescriptor>> entry :
                serverSegments.entrySet()) {
              final DruidServer server = entry.getKey();
              final List<SegmentDescriptor> descriptors = entry.getValue();

              final QueryRunner clientQueryable = serverView.getQueryRunner(server);
              if (clientQueryable == null) {
                log.makeAlert("WTF!? server[%s] doesn't have a client Queryable?", server).emit();
                continue;
              }

              final Sequence<T> resultSeqToAdd;
              final MultipleSpecificSegmentSpec segmentSpec =
                  new MultipleSpecificSegmentSpec(descriptors);
              List<Interval> intervals = segmentSpec.getIntervals();

              if (!server.isAssignable() || !populateCache || isBySegment) {
                resultSeqToAdd = clientQueryable.run(query.withQuerySegmentSpec(segmentSpec));
              } else {
                resultSeqToAdd =
                    toolChest.mergeSequences(
                        Sequences.map(
                            clientQueryable.run(rewrittenQuery.withQuerySegmentSpec(segmentSpec)),
                            new Function<Object, Sequence<T>>() {
                              private final Function<T, Object> prepareForCache =
                                  strategy.prepareForCache();

                              @Override
                              public Sequence<T> apply(Object input) {
                                Result<Object> result = (Result<Object>) input;
                                final BySegmentResultValueClass<T> value =
                                    (BySegmentResultValueClass<T>) result.getValue();
                                String segmentIdentifier = value.getSegmentId();
                                final Iterable<T> segmentResults = value.getResults();

                                CachePopulator cachePopulator =
                                    cachePopulatorMap.get(
                                        String.format(
                                            "%s_%s", segmentIdentifier, value.getInterval()));
                                if (cachePopulator != null) {
                                  cachePopulator.populate(
                                      Iterables.transform(segmentResults, prepareForCache));
                                }

                                return Sequences.simple(
                                    Iterables.transform(
                                        segmentResults,
                                        toolChest.makeMetricManipulatorFn(
                                            rewrittenQuery,
                                            new MetricManipulationFn() {
                                              @Override
                                              public Object manipulate(
                                                  AggregatorFactory factory, Object object) {
                                                return factory.deserialize(object);
                                              }
                                            })));
                              }
                            }));
              }

              listOfSequences.add(Pair.of(intervals.get(0).getStart(), resultSeqToAdd));
            }
          }
        });
  }