@Before
  public void setUp() throws Exception {
    final MetadataStorageUpdaterJobSpec metadataStorageUpdaterJobSpec =
        new MetadataStorageUpdaterJobSpec() {
          @Override
          public String getSegmentTable() {
            return derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable();
          }

          @Override
          public MetadataStorageConnectorConfig get() {
            return derbyConnectorRule.getMetadataConnectorConfig();
          }
        };
    final File scratchFileDir = temporaryFolder.newFolder();
    storageLocProperty = System.getProperty(STORAGE_PROPERTY_KEY);
    tmpSegmentDir = temporaryFolder.newFolder();
    System.setProperty(STORAGE_PROPERTY_KEY, tmpSegmentDir.getAbsolutePath());

    final URL url =
        Preconditions.checkNotNull(Query.class.getClassLoader().getResource("druid.sample.tsv"));
    final File tmpInputFile = temporaryFolder.newFile();
    FileUtils.retryCopy(
        new ByteSource() {
          @Override
          public InputStream openStream() throws IOException {
            return url.openStream();
          }
        },
        tmpInputFile,
        FileUtils.IS_EXCEPTION,
        3);
    final HadoopDruidIndexerConfig hadoopDruidIndexerConfig =
        new HadoopDruidIndexerConfig(
            new HadoopIngestionSpec(
                new DataSchema(
                    DATASOURCE,
                    HadoopDruidIndexerConfig.JSON_MAPPER.convertValue(
                        new StringInputRowParser(
                            new DelimitedParseSpec(
                                new TimestampSpec("ts", "iso", null),
                                new DimensionsSpec(Arrays.asList(TestIndex.DIMENSIONS), null, null),
                                "\t",
                                "\u0001",
                                Arrays.asList(TestIndex.COLUMNS))),
                        Map.class),
                    new AggregatorFactory[] {
                      new DoubleSumAggregatorFactory(TestIndex.METRICS[0], TestIndex.METRICS[0]),
                      new HyperUniquesAggregatorFactory("quality_uniques", "quality")
                    },
                    new UniformGranularitySpec(
                        Granularity.MONTH,
                        QueryGranularity.DAY,
                        ImmutableList.<Interval>of(interval)),
                    HadoopDruidIndexerConfig.JSON_MAPPER),
                new HadoopIOConfig(
                    ImmutableMap.<String, Object>of(
                        "type", "static", "paths", tmpInputFile.getAbsolutePath()),
                    metadataStorageUpdaterJobSpec,
                    tmpSegmentDir.getAbsolutePath()),
                new HadoopTuningConfig(
                    scratchFileDir.getAbsolutePath(),
                    null,
                    null,
                    null,
                    null,
                    null,
                    false,
                    false,
                    false,
                    false,
                    null,
                    false,
                    false,
                    false,
                    null,
                    null,
                    false)));
    metadataStorageTablesConfigSupplier = derbyConnectorRule.metadataTablesConfigSupplier();
    connector = derbyConnectorRule.getConnector();
    try {
      connector
          .getDBI()
          .withHandle(
              new HandleCallback<Void>() {
                @Override
                public Void withHandle(Handle handle) throws Exception {
                  handle.execute("DROP TABLE druid_segments");
                  return null;
                }
              });
    } catch (CallbackFailedException e) {
      // Who cares
    }
    List<Jobby> jobs =
        ImmutableList.of(
            new Jobby() {
              @Override
              public boolean run() {
                connector.createSegmentTable(metadataStorageUpdaterJobSpec.getSegmentTable());
                return true;
              }
            },
            new HadoopDruidDetermineConfigurationJob(hadoopDruidIndexerConfig),
            new HadoopDruidIndexerJob(
                hadoopDruidIndexerConfig, new SQLMetadataStorageUpdaterJobHandler(connector)));
    JobHelper.runJobs(jobs, hadoopDruidIndexerConfig);
  }