void doEvaluate(Statement base) throws Throwable { try { cluster = new TestingCluster(3); cluster.start(); client = newClient(cluster.getConnectString(), new RetryOneTime(200 /* ms */)); client.start(); checkState( client.blockUntilConnected(5, TimeUnit.SECONDS), "failed to connect to zookeeper in 5 seconds"); base.evaluate(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IllegalStateException("Interrupted while connecting to ZooKeeper", e); } finally { client.close(); cluster.close(); } }
@Test public void testRun() throws Exception { TestingCluster localCluster = new TestingCluster(1); localCluster.start(); CuratorFramework localCf = CuratorFrameworkFactory.builder() .connectString(localCluster.getConnectString()) .retryPolicy(new ExponentialBackoffRetry(1, 10)) .compressionProvider(new PotentiallyGzippedCompressionProvider(false)) .build(); localCf.start(); TestingCluster remoteCluster = new TestingCluster(1); remoteCluster.start(); CuratorFramework remoteCf = CuratorFrameworkFactory.builder() .connectString(remoteCluster.getConnectString()) .retryPolicy(new ExponentialBackoffRetry(1, 10)) .compressionProvider(new PotentiallyGzippedCompressionProvider(false)) .build(); remoteCf.start(); ObjectMapper jsonMapper = new DefaultObjectMapper(); DruidClusterBridgeConfig config = new DruidClusterBridgeConfig() { @Override public String getTier() { return DruidServer.DEFAULT_TIER; } @Override public Duration getStartDelay() { return new Duration(0); } @Override public Duration getPeriod() { return new Duration(Long.MAX_VALUE); } @Override public String getBrokerServiceName() { return "testz0rz"; } @Override public int getPriority() { return 0; } }; ScheduledExecutorFactory factory = ScheduledExecutors.createFactory(new Lifecycle()); DruidNode me = new DruidNode("me", "localhost", 8080); AtomicReference<LeaderLatch> leaderLatch = new AtomicReference<>(new LeaderLatch(localCf, "test")); ZkPathsConfig zkPathsConfig = new ZkPathsConfig() { @Override public String getZkBasePath() { return "/druid"; } }; DruidServerMetadata metadata = new DruidServerMetadata("test", "localhost", 1000, "bridge", DruidServer.DEFAULT_TIER, 0); DbSegmentPublisher dbSegmentPublisher = EasyMock.createMock(DbSegmentPublisher.class); EasyMock.replay(dbSegmentPublisher); DatabaseSegmentManager databaseSegmentManager = EasyMock.createMock(DatabaseSegmentManager.class); EasyMock.replay(databaseSegmentManager); ServerView serverView = EasyMock.createMock(ServerView.class); EasyMock.replay(serverView); BridgeZkCoordinator bridgeZkCoordinator = new BridgeZkCoordinator( jsonMapper, zkPathsConfig, new SegmentLoaderConfig(), metadata, remoteCf, dbSegmentPublisher, databaseSegmentManager, serverView); Announcer announcer = new Announcer(remoteCf, Executors.newSingleThreadExecutor()); announcer.start(); announcer.announce( zkPathsConfig.getAnnouncementsPath() + "/" + me.getHost(), jsonMapper.writeValueAsBytes(me)); BatchDataSegmentAnnouncer batchDataSegmentAnnouncer = EasyMock.createMock(BatchDataSegmentAnnouncer.class); BatchServerInventoryView batchServerInventoryView = EasyMock.createMock(BatchServerInventoryView.class); EasyMock.expect(batchServerInventoryView.getInventory()) .andReturn( Arrays.asList( new DruidServer("1", "localhost", 117, "historical", DruidServer.DEFAULT_TIER, 0), new DruidServer("2", "localhost", 1, "historical", DruidServer.DEFAULT_TIER, 0))); batchServerInventoryView.registerSegmentCallback( EasyMock.<Executor>anyObject(), EasyMock.<ServerView.SegmentCallback>anyObject()); batchServerInventoryView.registerServerCallback( EasyMock.<Executor>anyObject(), EasyMock.<ServerView.ServerCallback>anyObject()); EasyMock.expectLastCall(); batchServerInventoryView.start(); EasyMock.expectLastCall(); batchServerInventoryView.stop(); EasyMock.expectLastCall(); EasyMock.replay(batchServerInventoryView); DruidClusterBridge bridge = new DruidClusterBridge( jsonMapper, config, factory, me, localCf, leaderLatch, bridgeZkCoordinator, announcer, batchDataSegmentAnnouncer, batchServerInventoryView); bridge.start(); int retry = 0; while (!bridge.isLeader()) { if (retry > 5) { throw new ISE("Unable to become leader"); } Thread.sleep(100); retry++; } String path = "/druid/announcements/localhost:8080"; retry = 0; while (remoteCf.checkExists().forPath(path) == null) { if (retry > 5) { throw new ISE("Unable to announce"); } Thread.sleep(100); retry++; } boolean verified = verifyUpdate(jsonMapper, path, remoteCf); retry = 0; while (!verified) { if (retry > 5) { throw new ISE("No updates to bridge node occurred"); } Thread.sleep(100); retry++; verified = verifyUpdate(jsonMapper, path, remoteCf); } announcer.stop(); bridge.stop(); remoteCf.close(); remoteCluster.close(); localCf.close(); localCluster.close(); EasyMock.verify(batchServerInventoryView); EasyMock.verify(dbSegmentPublisher); EasyMock.verify(databaseSegmentManager); EasyMock.verify(serverView); }
@After public void tearDown() throws Exception { workerTaskMonitor.stop(); cf.close(); testingCluster.stop(); }
/** * Test start kaa node server application. * * @throws Exception the exception */ @Test public void testStartKaaNodeServerApplication() throws Exception { TestingCluster zkCluster = null; TTransport transport = null; Thread kaaNodeServerLauncherThread = null; KaaNodeThriftService.Client client = null; CuratorFramework zkClient = null; CountDownLatch latch = new CountDownLatch(1); boolean kaaNodeStarted = false; TestKaaNodeLauncher launcher = new TestKaaNodeLauncher(); try { zkCluster = new TestingCluster(new InstanceSpec(null, 2185, -1, -1, true, -1, -1, -1)); zkCluster.start(); zkClient = CuratorFrameworkFactory.newClient(zkCluster.getConnectString(), new RetryOneTime(100)); zkClient.start(); kaaNodeServerLauncherThread = new Thread(launcher); kaaNodeServerLauncherThread.start(); OperationsNodeStartupListener operationsNodeStartupListener = new OperationsNodeStartupListener(); zkClient.getCuratorListenable().addListener(operationsNodeStartupListener); zkClient.getChildren().inBackground(latch).forPath(OPERATIONS_SERVER_NODE_PATH); // Wait for operations service to start kaaNodeStarted = latch.await(KAA_NODE_START_TIMEOUT_SEC, TimeUnit.SECONDS); zkClient.getCuratorListenable().removeListener(operationsNodeStartupListener); transport = new TSocket(HOST, PORT); TProtocol protocol = new TBinaryProtocol(transport); TMultiplexedProtocol mp = new TMultiplexedProtocol(protocol, KaaThriftService.KAA_NODE_SERVICE.getServiceName()); client = new KaaNodeThriftService.Client(mp); transport.open(); client.shutdown(); } finally { boolean shutdownFailed = false; Closeables.close(zkClient, true); if (transport != null && transport.isOpen()) { Closeables.close(transport, true); } if (kaaNodeServerLauncherThread != null) { kaaNodeServerLauncherThread.join(30000); shutdownFailed = kaaNodeServerLauncherThread.isAlive(); } Closeables.close(zkCluster, true); if (launcher != null) { ConfigurableApplicationContext appContext = launcher.getApplicationContext(); if (appContext.isActive()) { Closeables.close(appContext, true); } } if (!kaaNodeStarted) { throw new TimeoutException( "Timeout (" + KAA_NODE_START_TIMEOUT_SEC + " sec) occured while waiting kaa node server to start!"); } else if (shutdownFailed) { throw new TimeoutException( "Timeout (" + KAA_NODE_STOP_TIMEOUT_SEC + " sec) occured while waiting kaa node server shutdown thread!"); } } }
@Before public void setUp() throws Exception { testingCluster = new TestingCluster(1); testingCluster.start(); cf = CuratorFrameworkFactory.builder() .connectString(testingCluster.getConnectString()) .retryPolicy(new ExponentialBackoffRetry(1, 10)) .compressionProvider(new PotentiallyGzippedCompressionProvider(false)) .build(); cf.start(); cf.create().creatingParentsIfNeeded().forPath(basePath); worker = new Worker("worker", "localhost", 3, "0"); workerCuratorCoordinator = new WorkerCuratorCoordinator( jsonMapper, new IndexerZkConfig( new ZkPathsConfig() { @Override public String getBase() { return basePath; } }, null, null, null, null, null), new TestRemoteTaskRunnerConfig(new Period("PT1S")), cf, worker); workerCuratorCoordinator.start(); // Start a task monitor workerTaskMonitor = createTaskMonitor(); jsonMapper.registerSubtypes(new NamedType(TestMergeTask.class, "test")); jsonMapper.registerSubtypes(new NamedType(TestRealtimeTask.class, "test_realtime")); workerTaskMonitor.start(); task = TestMergeTask.createDummyTask("test"); }