Esempio n. 1
0
 @Provides
 @ManageLifecycle
 public ServiceEmitter getServiceEmitter(
     @Self Supplier<DruidNode> configSupplier, Emitter emitter) {
   final DruidNode config = configSupplier.get();
   final ServiceEmitter retVal =
       new ServiceEmitter(config.getServiceName(), config.getHost(), emitter);
   EmittingLogger.registerEmitter(retVal);
   return retVal;
 }
  @Test
  public void testRun() throws Exception {
    TestingCluster localCluster = new TestingCluster(1);
    localCluster.start();

    CuratorFramework localCf =
        CuratorFrameworkFactory.builder()
            .connectString(localCluster.getConnectString())
            .retryPolicy(new ExponentialBackoffRetry(1, 10))
            .compressionProvider(new PotentiallyGzippedCompressionProvider(false))
            .build();
    localCf.start();

    TestingCluster remoteCluster = new TestingCluster(1);
    remoteCluster.start();

    CuratorFramework remoteCf =
        CuratorFrameworkFactory.builder()
            .connectString(remoteCluster.getConnectString())
            .retryPolicy(new ExponentialBackoffRetry(1, 10))
            .compressionProvider(new PotentiallyGzippedCompressionProvider(false))
            .build();
    remoteCf.start();

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    DruidClusterBridgeConfig config =
        new DruidClusterBridgeConfig() {
          @Override
          public String getTier() {
            return DruidServer.DEFAULT_TIER;
          }

          @Override
          public Duration getStartDelay() {
            return new Duration(0);
          }

          @Override
          public Duration getPeriod() {
            return new Duration(Long.MAX_VALUE);
          }

          @Override
          public String getBrokerServiceName() {
            return "testz0rz";
          }

          @Override
          public int getPriority() {
            return 0;
          }
        };

    ScheduledExecutorFactory factory = ScheduledExecutors.createFactory(new Lifecycle());

    DruidNode me = new DruidNode("me", "localhost", 8080);

    AtomicReference<LeaderLatch> leaderLatch =
        new AtomicReference<>(new LeaderLatch(localCf, "test"));

    ZkPathsConfig zkPathsConfig =
        new ZkPathsConfig() {
          @Override
          public String getZkBasePath() {
            return "/druid";
          }
        };
    DruidServerMetadata metadata =
        new DruidServerMetadata("test", "localhost", 1000, "bridge", DruidServer.DEFAULT_TIER, 0);
    DbSegmentPublisher dbSegmentPublisher = EasyMock.createMock(DbSegmentPublisher.class);
    EasyMock.replay(dbSegmentPublisher);
    DatabaseSegmentManager databaseSegmentManager =
        EasyMock.createMock(DatabaseSegmentManager.class);
    EasyMock.replay(databaseSegmentManager);
    ServerView serverView = EasyMock.createMock(ServerView.class);
    EasyMock.replay(serverView);

    BridgeZkCoordinator bridgeZkCoordinator =
        new BridgeZkCoordinator(
            jsonMapper,
            zkPathsConfig,
            new SegmentLoaderConfig(),
            metadata,
            remoteCf,
            dbSegmentPublisher,
            databaseSegmentManager,
            serverView);

    Announcer announcer = new Announcer(remoteCf, Executors.newSingleThreadExecutor());
    announcer.start();
    announcer.announce(
        zkPathsConfig.getAnnouncementsPath() + "/" + me.getHost(),
        jsonMapper.writeValueAsBytes(me));

    BatchDataSegmentAnnouncer batchDataSegmentAnnouncer =
        EasyMock.createMock(BatchDataSegmentAnnouncer.class);
    BatchServerInventoryView batchServerInventoryView =
        EasyMock.createMock(BatchServerInventoryView.class);
    EasyMock.expect(batchServerInventoryView.getInventory())
        .andReturn(
            Arrays.asList(
                new DruidServer("1", "localhost", 117, "historical", DruidServer.DEFAULT_TIER, 0),
                new DruidServer("2", "localhost", 1, "historical", DruidServer.DEFAULT_TIER, 0)));
    batchServerInventoryView.registerSegmentCallback(
        EasyMock.<Executor>anyObject(), EasyMock.<ServerView.SegmentCallback>anyObject());
    batchServerInventoryView.registerServerCallback(
        EasyMock.<Executor>anyObject(), EasyMock.<ServerView.ServerCallback>anyObject());
    EasyMock.expectLastCall();
    batchServerInventoryView.start();
    EasyMock.expectLastCall();
    batchServerInventoryView.stop();
    EasyMock.expectLastCall();
    EasyMock.replay(batchServerInventoryView);

    DruidClusterBridge bridge =
        new DruidClusterBridge(
            jsonMapper,
            config,
            factory,
            me,
            localCf,
            leaderLatch,
            bridgeZkCoordinator,
            announcer,
            batchDataSegmentAnnouncer,
            batchServerInventoryView);

    bridge.start();

    int retry = 0;
    while (!bridge.isLeader()) {
      if (retry > 5) {
        throw new ISE("Unable to become leader");
      }

      Thread.sleep(100);
      retry++;
    }

    String path = "/druid/announcements/localhost:8080";
    retry = 0;
    while (remoteCf.checkExists().forPath(path) == null) {
      if (retry > 5) {
        throw new ISE("Unable to announce");
      }

      Thread.sleep(100);
      retry++;
    }

    boolean verified = verifyUpdate(jsonMapper, path, remoteCf);
    retry = 0;
    while (!verified) {
      if (retry > 5) {
        throw new ISE("No updates to bridge node occurred");
      }

      Thread.sleep(100);
      retry++;

      verified = verifyUpdate(jsonMapper, path, remoteCf);
    }

    announcer.stop();
    bridge.stop();

    remoteCf.close();
    remoteCluster.close();
    localCf.close();
    localCluster.close();

    EasyMock.verify(batchServerInventoryView);
    EasyMock.verify(dbSegmentPublisher);
    EasyMock.verify(databaseSegmentManager);
    EasyMock.verify(serverView);
  }
Esempio n. 3
0
  @Test(timeout = 2000L)
  public void testOverlordRun() throws Exception {
    // basic task master lifecycle test
    taskMaster.start();
    announcementLatch.await();
    while (!taskMaster.isLeading()) {
      // I believe the control will never reach here and thread will never sleep but just to be on
      // safe side
      Thread.sleep(10);
    }
    Assert.assertEquals(taskMaster.getLeader(), druidNode.getHostAndPort());
    // Test Overlord resource stuff
    overlordResource =
        new OverlordResource(
            taskMaster,
            new TaskStorageQueryAdapter(taskStorage),
            null,
            null,
            null,
            new AuthConfig());
    Response response = overlordResource.getLeader();
    Assert.assertEquals(druidNode.getHostAndPort(), response.getEntity());

    final String taskId_0 = "0";
    NoopTask task_0 = new NoopTask(taskId_0, 0, 0, null, null, null);
    response = overlordResource.taskPost(task_0, req);
    Assert.assertEquals(200, response.getStatus());
    Assert.assertEquals(ImmutableMap.of("task", taskId_0), response.getEntity());

    // Duplicate task - should fail
    response = overlordResource.taskPost(task_0, req);
    Assert.assertEquals(400, response.getStatus());

    // Task payload for task_0 should be present in taskStorage
    response = overlordResource.getTaskPayload(taskId_0);
    Assert.assertEquals(task_0, ((Map) response.getEntity()).get("payload"));

    // Task not present in taskStorage - should fail
    response = overlordResource.getTaskPayload("whatever");
    Assert.assertEquals(404, response.getStatus());

    // Task status of the submitted task should be running
    response = overlordResource.getTaskStatus(taskId_0);
    Assert.assertEquals(taskId_0, ((Map) response.getEntity()).get("task"));
    Assert.assertEquals(
        TaskStatus.running(taskId_0).getStatusCode(),
        ((TaskStatus) ((Map) response.getEntity()).get("status")).getStatusCode());

    // Simulate completion of task_0
    taskCompletionCountDownLatches[Integer.parseInt(taskId_0)].countDown();
    // Wait for taskQueue to handle success status of task_0
    waitForTaskStatus(taskId_0, TaskStatus.Status.SUCCESS);

    // Manually insert task in taskStorage
    // Verifies sync from storage
    final String taskId_1 = "1";
    NoopTask task_1 = new NoopTask(taskId_1, 0, 0, null, null, null);
    taskStorage.insert(task_1, TaskStatus.running(taskId_1));
    // Wait for task runner to run task_1
    runTaskCountDownLatches[Integer.parseInt(taskId_1)].await();

    response = overlordResource.getRunningTasks(req);
    // 1 task that was manually inserted should be in running state
    Assert.assertEquals(1, (((List) response.getEntity()).size()));
    final OverlordResource.TaskResponseObject taskResponseObject =
        ((List<OverlordResource.TaskResponseObject>) response.getEntity()).get(0);
    Assert.assertEquals(taskId_1, taskResponseObject.toJson().get("id"));
    Assert.assertEquals(TASK_LOCATION, taskResponseObject.toJson().get("location"));

    // Simulate completion of task_1
    taskCompletionCountDownLatches[Integer.parseInt(taskId_1)].countDown();
    // Wait for taskQueue to handle success status of task_1
    waitForTaskStatus(taskId_1, TaskStatus.Status.SUCCESS);

    // should return number of tasks which are not in running state
    response = overlordResource.getCompleteTasks(req);
    Assert.assertEquals(2, (((List) response.getEntity()).size()));
    taskMaster.stop();
    Assert.assertFalse(taskMaster.isLeading());
    EasyMock.verify(taskLockbox, taskActionClientFactory);
  }