@Setup
  public void setup() throws IOException {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());

    if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
      ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(Hashing.murmur3_128()));
    }

    executorService = Execs.multiThreaded(numSegments, "TimeseriesThreadPool");

    setupQueries();

    String[] schemaQuery = schemaAndQuery.split("\\.");
    String schemaName = schemaQuery[0];
    String queryName = schemaQuery[1];

    schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schemaName);
    query = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);

    incIndexes = new ArrayList<>();
    for (int i = 0; i < numSegments; i++) {
      log.info("Generating rows for segment " + i);
      BenchmarkDataGenerator gen =
          new BenchmarkDataGenerator(
              schemaInfo.getColumnSchemas(),
              RNG_SEED + i,
              schemaInfo.getDataInterval(),
              rowsPerSegment);

      IncrementalIndex incIndex = makeIncIndex();

      for (int j = 0; j < rowsPerSegment; j++) {
        InputRow row = gen.nextRow();
        if (j % 10000 == 0) {
          log.info(j + " rows generated.");
        }
        incIndex.add(row);
      }
      log.info(rowsPerSegment + " rows generated");
      incIndexes.add(incIndex);
    }

    File tmpFile = Files.createTempDir();
    log.info("Using temp dir: " + tmpFile.getAbsolutePath());
    tmpFile.deleteOnExit();

    qIndexes = new ArrayList<>();
    for (int i = 0; i < numSegments; i++) {
      File indexFile = INDEX_MERGER_V9.persist(incIndexes.get(i), tmpFile, new IndexSpec());

      QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile);
      qIndexes.add(qIndex);
    }

    factory =
        new TimeseriesQueryRunnerFactory(
            new TimeseriesQueryQueryToolChest(
                QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()),
            new TimeseriesQueryEngine(),
            QueryBenchmarkUtil.NOOP_QUERYWATCHER);
  }
  private void setupQueries() {
    // queries for the basic schema
    Map<String, TimeseriesQuery> basicQueries = new LinkedHashMap<>();
    BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");

    { // basic.A
      QuerySegmentSpec intervalSpec =
          new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));

      List<AggregatorFactory> queryAggs = new ArrayList<>();
      queryAggs.add(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential"));
      queryAggs.add(new LongMaxAggregatorFactory("maxLongUniform", "maxLongUniform"));
      queryAggs.add(new DoubleSumAggregatorFactory("sumFloatNormal", "sumFloatNormal"));
      queryAggs.add(new DoubleMinAggregatorFactory("minFloatZipf", "minFloatZipf"));
      queryAggs.add(new HyperUniquesAggregatorFactory("hyperUniquesMet", "hyper"));

      TimeseriesQuery queryA =
          Druids.newTimeseriesQueryBuilder()
              .dataSource("blah")
              .granularity(QueryGranularities.ALL)
              .intervals(intervalSpec)
              .aggregators(queryAggs)
              .descending(false)
              .build();

      basicQueries.put("A", queryA);
    }
    {
      QuerySegmentSpec intervalSpec =
          new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));

      List<AggregatorFactory> queryAggs = new ArrayList<>();
      LongSumAggregatorFactory lsaf =
          new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
      BoundDimFilter timeFilter =
          new BoundDimFilter(
              Column.TIME_COLUMN_NAME,
              "200000",
              "300000",
              false,
              false,
              null,
              null,
              StringComparators.NUMERIC);
      queryAggs.add(new FilteredAggregatorFactory(lsaf, timeFilter));

      TimeseriesQuery timeFilterQuery =
          Druids.newTimeseriesQueryBuilder()
              .dataSource("blah")
              .granularity(QueryGranularities.ALL)
              .intervals(intervalSpec)
              .aggregators(queryAggs)
              .descending(false)
              .build();

      basicQueries.put("timeFilterNumeric", timeFilterQuery);
    }
    {
      QuerySegmentSpec intervalSpec =
          new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));

      List<AggregatorFactory> queryAggs = new ArrayList<>();
      LongSumAggregatorFactory lsaf =
          new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
      BoundDimFilter timeFilter =
          new BoundDimFilter(
              Column.TIME_COLUMN_NAME,
              "200000",
              "300000",
              false,
              false,
              null,
              null,
              StringComparators.ALPHANUMERIC);
      queryAggs.add(new FilteredAggregatorFactory(lsaf, timeFilter));

      TimeseriesQuery timeFilterQuery =
          Druids.newTimeseriesQueryBuilder()
              .dataSource("blah")
              .granularity(QueryGranularities.ALL)
              .intervals(intervalSpec)
              .aggregators(queryAggs)
              .descending(false)
              .build();

      basicQueries.put("timeFilterAlphanumeric", timeFilterQuery);
    }
    {
      QuerySegmentSpec intervalSpec =
          new MultipleIntervalSegmentSpec(Arrays.asList(new Interval(200000, 300000)));
      List<AggregatorFactory> queryAggs = new ArrayList<>();
      LongSumAggregatorFactory lsaf =
          new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
      queryAggs.add(lsaf);

      TimeseriesQuery timeFilterQuery =
          Druids.newTimeseriesQueryBuilder()
              .dataSource("blah")
              .granularity(QueryGranularities.ALL)
              .intervals(intervalSpec)
              .aggregators(queryAggs)
              .descending(false)
              .build();

      basicQueries.put("timeFilterByInterval", timeFilterQuery);
    }

    SCHEMA_QUERY_MAP.put("basic", basicQueries);
  }