示例#1
0
 private Druids.SelectQueryBuilder newTestQuery() {
   return Druids.newSelectQueryBuilder()
       .dataSource(new TableDataSource(QueryRunnerTestHelper.dataSource))
       .dimensionSpecs(DefaultDimensionSpec.toSpec(Arrays.<String>asList()))
       .metrics(Arrays.<String>asList())
       .intervals(QueryRunnerTestHelper.fullOnInterval)
       .granularity(QueryRunnerTestHelper.allGran)
       .pagingSpec(PagingSpec.newSpec(3))
       .descending(descending);
 }
示例#2
0
  private <T> Future assertQueryable(
      QueryGranularity granularity,
      String dataSource,
      Interval interval,
      List<Pair<String, Interval>> expected) {
    final Iterator<Pair<String, Interval>> expectedIter = expected.iterator();
    final List<Interval> intervals = Arrays.asList(interval);
    final SearchQuery query =
        Druids.newSearchQueryBuilder()
            .dataSource(dataSource)
            .intervals(intervals)
            .granularity(granularity)
            .limit(10000)
            .query("wow")
            .build();
    final QueryRunner<Result<SearchResultValue>> runner =
        serverManager.getQueryRunnerForIntervals(query, intervals);

    return serverManagerExec.submit(
        new Runnable() {
          @Override
          public void run() {
            Sequence<Result<SearchResultValue>> seq = runner.run(query);
            Sequences.toList(seq, Lists.<Result<SearchResultValue>>newArrayList());
            Iterator<SegmentForTesting> adaptersIter = factory.getAdapters().iterator();

            while (expectedIter.hasNext() && adaptersIter.hasNext()) {
              Pair<String, Interval> expectedVals = expectedIter.next();
              SegmentForTesting value = adaptersIter.next();

              Assert.assertEquals(expectedVals.lhs, value.getVersion());
              Assert.assertEquals(expectedVals.rhs, value.getInterval());
            }

            Assert.assertFalse(expectedIter.hasNext());
            Assert.assertFalse(adaptersIter.hasNext());
          }
        });
  }
  private void setupQueries() {
    // queries for the basic schema
    Map<String, TimeseriesQuery> basicQueries = new LinkedHashMap<>();
    BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");

    { // basic.A
      QuerySegmentSpec intervalSpec =
          new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));

      List<AggregatorFactory> queryAggs = new ArrayList<>();
      queryAggs.add(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential"));
      queryAggs.add(new LongMaxAggregatorFactory("maxLongUniform", "maxLongUniform"));
      queryAggs.add(new DoubleSumAggregatorFactory("sumFloatNormal", "sumFloatNormal"));
      queryAggs.add(new DoubleMinAggregatorFactory("minFloatZipf", "minFloatZipf"));
      queryAggs.add(new HyperUniquesAggregatorFactory("hyperUniquesMet", "hyper"));

      TimeseriesQuery queryA =
          Druids.newTimeseriesQueryBuilder()
              .dataSource("blah")
              .granularity(QueryGranularities.ALL)
              .intervals(intervalSpec)
              .aggregators(queryAggs)
              .descending(false)
              .build();

      basicQueries.put("A", queryA);
    }
    {
      QuerySegmentSpec intervalSpec =
          new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));

      List<AggregatorFactory> queryAggs = new ArrayList<>();
      LongSumAggregatorFactory lsaf =
          new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
      BoundDimFilter timeFilter =
          new BoundDimFilter(
              Column.TIME_COLUMN_NAME,
              "200000",
              "300000",
              false,
              false,
              null,
              null,
              StringComparators.NUMERIC);
      queryAggs.add(new FilteredAggregatorFactory(lsaf, timeFilter));

      TimeseriesQuery timeFilterQuery =
          Druids.newTimeseriesQueryBuilder()
              .dataSource("blah")
              .granularity(QueryGranularities.ALL)
              .intervals(intervalSpec)
              .aggregators(queryAggs)
              .descending(false)
              .build();

      basicQueries.put("timeFilterNumeric", timeFilterQuery);
    }
    {
      QuerySegmentSpec intervalSpec =
          new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));

      List<AggregatorFactory> queryAggs = new ArrayList<>();
      LongSumAggregatorFactory lsaf =
          new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
      BoundDimFilter timeFilter =
          new BoundDimFilter(
              Column.TIME_COLUMN_NAME,
              "200000",
              "300000",
              false,
              false,
              null,
              null,
              StringComparators.ALPHANUMERIC);
      queryAggs.add(new FilteredAggregatorFactory(lsaf, timeFilter));

      TimeseriesQuery timeFilterQuery =
          Druids.newTimeseriesQueryBuilder()
              .dataSource("blah")
              .granularity(QueryGranularities.ALL)
              .intervals(intervalSpec)
              .aggregators(queryAggs)
              .descending(false)
              .build();

      basicQueries.put("timeFilterAlphanumeric", timeFilterQuery);
    }
    {
      QuerySegmentSpec intervalSpec =
          new MultipleIntervalSegmentSpec(Arrays.asList(new Interval(200000, 300000)));
      List<AggregatorFactory> queryAggs = new ArrayList<>();
      LongSumAggregatorFactory lsaf =
          new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
      queryAggs.add(lsaf);

      TimeseriesQuery timeFilterQuery =
          Druids.newTimeseriesQueryBuilder()
              .dataSource("blah")
              .granularity(QueryGranularities.ALL)
              .intervals(intervalSpec)
              .aggregators(queryAggs)
              .descending(false)
              .build();

      basicQueries.put("timeFilterByInterval", timeFilterQuery);
    }

    SCHEMA_QUERY_MAP.put("basic", basicQueries);
  }
示例#4
0
  @Test
  public void testRun() throws Exception {
    HttpClient httpClient = EasyMock.createMock(HttpClient.class);
    final URL url = new URL("http://foo/druid/v2/");

    SettableFuture<InputStream> futureResult = SettableFuture.create();
    Capture<Request> capturedRequest = EasyMock.newCapture();
    EasyMock.expect(
            httpClient.go(
                EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject()))
        .andReturn(futureResult)
        .times(1);

    SettableFuture futureException = SettableFuture.create();
    EasyMock.expect(
            httpClient.go(
                EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject()))
        .andReturn(futureException)
        .times(1);

    EasyMock.expect(
            httpClient.go(
                EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject()))
        .andReturn(SettableFuture.create())
        .atLeastOnce();

    EasyMock.replay(httpClient);

    final ServerSelector serverSelector =
        new ServerSelector(
            new DataSegment(
                "test",
                new Interval("2013-01-01/2013-01-02"),
                new DateTime("2013-01-01").toString(),
                Maps.<String, Object>newHashMap(),
                Lists.<String>newArrayList(),
                Lists.<String>newArrayList(),
                NoneShardSpec.instance(),
                0,
                0L),
            new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));

    DirectDruidClient client1 =
        new DirectDruidClient(
            new ReflectionQueryToolChestWarehouse(),
            QueryRunnerTestHelper.NOOP_QUERYWATCHER,
            new DefaultObjectMapper(),
            httpClient,
            "foo",
            new NoopServiceEmitter());
    DirectDruidClient client2 =
        new DirectDruidClient(
            new ReflectionQueryToolChestWarehouse(),
            QueryRunnerTestHelper.NOOP_QUERYWATCHER,
            new DefaultObjectMapper(),
            httpClient,
            "foo2",
            new NoopServiceEmitter());

    QueryableDruidServer queryableDruidServer1 =
        new QueryableDruidServer(
            new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0),
            client1);
    serverSelector.addServerAndUpdateSegment(queryableDruidServer1, serverSelector.getSegment());
    QueryableDruidServer queryableDruidServer2 =
        new QueryableDruidServer(
            new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0),
            client2);
    serverSelector.addServerAndUpdateSegment(queryableDruidServer2, serverSelector.getSegment());

    TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
    HashMap<String, List> context = Maps.newHashMap();
    Sequence s1 = client1.run(query, context);
    Assert.assertTrue(capturedRequest.hasCaptured());
    Assert.assertEquals(url, capturedRequest.getValue().getUrl());
    Assert.assertEquals(HttpMethod.POST, capturedRequest.getValue().getMethod());
    Assert.assertEquals(1, client1.getNumOpenConnections());

    // simulate read timeout
    Sequence s2 = client1.run(query, context);
    Assert.assertEquals(2, client1.getNumOpenConnections());
    futureException.setException(new ReadTimeoutException());
    Assert.assertEquals(1, client1.getNumOpenConnections());

    // subsequent connections should work
    Sequence s3 = client1.run(query, context);
    Sequence s4 = client1.run(query, context);
    Sequence s5 = client1.run(query, context);

    Assert.assertTrue(client1.getNumOpenConnections() == 4);

    // produce result for first connection
    futureResult.set(
        new ByteArrayInputStream(
            "[{\"timestamp\":\"2014-01-01T01:02:03Z\", \"result\": 42.0}]".getBytes()));
    List<Result> results = Sequences.toList(s1, Lists.<Result>newArrayList());
    Assert.assertEquals(1, results.size());
    Assert.assertEquals(new DateTime("2014-01-01T01:02:03Z"), results.get(0).getTimestamp());
    Assert.assertEquals(3, client1.getNumOpenConnections());

    client2.run(query, context);
    client2.run(query, context);

    Assert.assertTrue(client2.getNumOpenConnections() == 2);

    Assert.assertTrue(serverSelector.pick() == queryableDruidServer2);

    EasyMock.verify(httpClient);
  }
示例#5
0
  @Test
  public void testQueryInterruptionExceptionLogMessage() throws JsonProcessingException {
    HttpClient httpClient = EasyMock.createMock(HttpClient.class);
    SettableFuture<Object> interruptionFuture = SettableFuture.create();
    Capture<Request> capturedRequest = EasyMock.newCapture();
    String hostName = "localhost:8080";
    EasyMock.expect(
            httpClient.go(
                EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject()))
        .andReturn(interruptionFuture)
        .anyTimes();

    EasyMock.replay(httpClient);

    DataSegment dataSegment =
        new DataSegment(
            "test",
            new Interval("2013-01-01/2013-01-02"),
            new DateTime("2013-01-01").toString(),
            Maps.<String, Object>newHashMap(),
            Lists.<String>newArrayList(),
            Lists.<String>newArrayList(),
            NoneShardSpec.instance(),
            0,
            0L);
    final ServerSelector serverSelector =
        new ServerSelector(
            dataSegment,
            new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));

    DirectDruidClient client1 =
        new DirectDruidClient(
            new ReflectionQueryToolChestWarehouse(),
            QueryRunnerTestHelper.NOOP_QUERYWATCHER,
            new DefaultObjectMapper(),
            httpClient,
            hostName,
            new NoopServiceEmitter());

    QueryableDruidServer queryableDruidServer =
        new QueryableDruidServer(
            new DruidServer("test1", hostName, 0, "historical", DruidServer.DEFAULT_TIER, 0),
            client1);

    serverSelector.addServerAndUpdateSegment(queryableDruidServer, dataSegment);

    TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
    HashMap<String, List> context = Maps.newHashMap();
    interruptionFuture.set(new ByteArrayInputStream("{\"error\":\"testing\"}".getBytes()));
    Sequence results = client1.run(query, context);

    QueryInterruptedException actualException = null;
    try {
      Sequences.toList(results, Lists.newArrayList());
    } catch (QueryInterruptedException e) {
      actualException = e;
    }
    Assert.assertNotNull(actualException);
    Assert.assertEquals(actualException.getMessage(), QueryInterruptedException.UNKNOWN_EXCEPTION);
    Assert.assertEquals(actualException.getCauseMessage(), "testing");
    Assert.assertEquals(actualException.getHost(), hostName);
    EasyMock.verify(httpClient);
  }
示例#6
0
  @Test
  public void testCancel() throws Exception {
    HttpClient httpClient = EasyMock.createStrictMock(HttpClient.class);

    Capture<Request> capturedRequest = EasyMock.newCapture();
    ListenableFuture<Object> cancelledFuture = Futures.immediateCancelledFuture();
    SettableFuture<Object> cancellationFuture = SettableFuture.create();

    EasyMock.expect(
            httpClient.go(
                EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject()))
        .andReturn(cancelledFuture)
        .once();

    EasyMock.expect(
            httpClient.go(
                EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject()))
        .andReturn(cancellationFuture)
        .once();

    EasyMock.replay(httpClient);

    final ServerSelector serverSelector =
        new ServerSelector(
            new DataSegment(
                "test",
                new Interval("2013-01-01/2013-01-02"),
                new DateTime("2013-01-01").toString(),
                Maps.<String, Object>newHashMap(),
                Lists.<String>newArrayList(),
                Lists.<String>newArrayList(),
                NoneShardSpec.instance(),
                0,
                0L),
            new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));

    DirectDruidClient client1 =
        new DirectDruidClient(
            new ReflectionQueryToolChestWarehouse(),
            QueryRunnerTestHelper.NOOP_QUERYWATCHER,
            new DefaultObjectMapper(),
            httpClient,
            "foo",
            new NoopServiceEmitter());

    QueryableDruidServer queryableDruidServer1 =
        new QueryableDruidServer(
            new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0),
            client1);
    serverSelector.addServerAndUpdateSegment(queryableDruidServer1, serverSelector.getSegment());

    TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
    HashMap<String, List> context = Maps.newHashMap();
    cancellationFuture.set(
        new StatusResponseHolder(HttpResponseStatus.OK, new StringBuilder("cancelled")));
    Sequence results = client1.run(query, context);
    Assert.assertEquals(HttpMethod.DELETE, capturedRequest.getValue().getMethod());
    Assert.assertEquals(0, client1.getNumOpenConnections());

    QueryInterruptedException exception = null;
    try {
      Sequences.toList(results, Lists.newArrayList());
    } catch (QueryInterruptedException e) {
      exception = e;
    }
    Assert.assertNotNull(exception);

    EasyMock.verify(httpClient);
  }
  @Test
  public void testGetBrokerServiceName() throws Exception {
    final LinkedHashMap<String, String> tierBrokerMap = new LinkedHashMap<>();
    tierBrokerMap.put("fast", "druid/fastBroker");
    tierBrokerMap.put("fast", "druid/broker");
    tierBrokerMap.put("slow", "druid/slowBroker");

    final TieredBrokerConfig tieredBrokerConfig =
        new TieredBrokerConfig() {
          @Override
          public String getDefaultBrokerServiceName() {
            return "druid/broker";
          }

          @Override
          public LinkedHashMap<String, String> getTierToBrokerMap() {
            return tierBrokerMap;
          }
        };

    final TopNQueryBuilder queryBuilder =
        new TopNQueryBuilder()
            .dataSource("test")
            .intervals("2014/2015")
            .dimension("bigdim")
            .metric("count")
            .threshold(1)
            .aggregators(ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("count")));

    Assert.assertEquals(
        Optional.absent(),
        jsStrategy.getBrokerServiceName(tieredBrokerConfig, queryBuilder.build()));

    Assert.assertEquals(
        Optional.absent(),
        jsStrategy.getBrokerServiceName(
            tieredBrokerConfig,
            Druids.newTimeBoundaryQueryBuilder().dataSource("test").bound("maxTime").build()));

    Assert.assertEquals(
        Optional.of("druid/slowBroker"),
        jsStrategy.getBrokerServiceName(
            tieredBrokerConfig,
            queryBuilder
                .aggregators(
                    ImmutableList.of(
                        new CountAggregatorFactory("count"),
                        new LongSumAggregatorFactory("longSum", "a"),
                        new DoubleSumAggregatorFactory("doubleSum", "b")))
                .build()));

    // in absence of tiers, expect the default
    tierBrokerMap.clear();
    Assert.assertEquals(
        Optional.of("druid/broker"),
        jsStrategy.getBrokerServiceName(
            tieredBrokerConfig,
            queryBuilder
                .aggregators(
                    ImmutableList.of(
                        new CountAggregatorFactory("count"),
                        new LongSumAggregatorFactory("longSum", "a"),
                        new DoubleSumAggregatorFactory("doubleSum", "b")))
                .build()));
  }