@Inject public SingleServerInventoryView( final ZkPathsConfig zkPaths, final CuratorFramework curator, final ObjectMapper jsonMapper, final Predicate<Pair<DruidServerMetadata, DataSegment>> defaultFilter) { super( log, zkPaths.getAnnouncementsPath(), zkPaths.getServedSegmentsPath(), curator, jsonMapper, new TypeReference<DataSegment>() {}); Preconditions.checkNotNull(defaultFilter); this.defaultFilter = defaultFilter; }
private LeaderLatch createNewLeaderLatch() { final LeaderLatch newLeaderLatch = new LeaderLatch( curator, ZKPaths.makePath(zkPaths.getCoordinatorPath(), COORDINATOR_OWNER_NODE), config.getHost()); newLeaderLatch.addListener( new LeaderLatchListener() { @Override public void isLeader() { DruidCoordinator.this.becomeLeader(); } @Override public void notLeader() { DruidCoordinator.this.stopBeingLeader(); } }, Execs.singleThreaded("CoordinatorLeader-%s")); return leaderLatch.getAndSet(newLeaderLatch); }
@Test public void testRun() throws Exception { TestingCluster localCluster = new TestingCluster(1); localCluster.start(); CuratorFramework localCf = CuratorFrameworkFactory.builder() .connectString(localCluster.getConnectString()) .retryPolicy(new ExponentialBackoffRetry(1, 10)) .compressionProvider(new PotentiallyGzippedCompressionProvider(false)) .build(); localCf.start(); TestingCluster remoteCluster = new TestingCluster(1); remoteCluster.start(); CuratorFramework remoteCf = CuratorFrameworkFactory.builder() .connectString(remoteCluster.getConnectString()) .retryPolicy(new ExponentialBackoffRetry(1, 10)) .compressionProvider(new PotentiallyGzippedCompressionProvider(false)) .build(); remoteCf.start(); ObjectMapper jsonMapper = new DefaultObjectMapper(); DruidClusterBridgeConfig config = new DruidClusterBridgeConfig() { @Override public String getTier() { return DruidServer.DEFAULT_TIER; } @Override public Duration getStartDelay() { return new Duration(0); } @Override public Duration getPeriod() { return new Duration(Long.MAX_VALUE); } @Override public String getBrokerServiceName() { return "testz0rz"; } @Override public int getPriority() { return 0; } }; ScheduledExecutorFactory factory = ScheduledExecutors.createFactory(new Lifecycle()); DruidNode me = new DruidNode("me", "localhost", 8080); AtomicReference<LeaderLatch> leaderLatch = new AtomicReference<>(new LeaderLatch(localCf, "test")); ZkPathsConfig zkPathsConfig = new ZkPathsConfig() { @Override public String getZkBasePath() { return "/druid"; } }; DruidServerMetadata metadata = new DruidServerMetadata("test", "localhost", 1000, "bridge", DruidServer.DEFAULT_TIER, 0); DbSegmentPublisher dbSegmentPublisher = EasyMock.createMock(DbSegmentPublisher.class); EasyMock.replay(dbSegmentPublisher); DatabaseSegmentManager databaseSegmentManager = EasyMock.createMock(DatabaseSegmentManager.class); EasyMock.replay(databaseSegmentManager); ServerView serverView = EasyMock.createMock(ServerView.class); EasyMock.replay(serverView); BridgeZkCoordinator bridgeZkCoordinator = new BridgeZkCoordinator( jsonMapper, zkPathsConfig, new SegmentLoaderConfig(), metadata, remoteCf, dbSegmentPublisher, databaseSegmentManager, serverView); Announcer announcer = new Announcer(remoteCf, Executors.newSingleThreadExecutor()); announcer.start(); announcer.announce( zkPathsConfig.getAnnouncementsPath() + "/" + me.getHost(), jsonMapper.writeValueAsBytes(me)); BatchDataSegmentAnnouncer batchDataSegmentAnnouncer = EasyMock.createMock(BatchDataSegmentAnnouncer.class); BatchServerInventoryView batchServerInventoryView = EasyMock.createMock(BatchServerInventoryView.class); EasyMock.expect(batchServerInventoryView.getInventory()) .andReturn( Arrays.asList( new DruidServer("1", "localhost", 117, "historical", DruidServer.DEFAULT_TIER, 0), new DruidServer("2", "localhost", 1, "historical", DruidServer.DEFAULT_TIER, 0))); batchServerInventoryView.registerSegmentCallback( EasyMock.<Executor>anyObject(), EasyMock.<ServerView.SegmentCallback>anyObject()); batchServerInventoryView.registerServerCallback( EasyMock.<Executor>anyObject(), EasyMock.<ServerView.ServerCallback>anyObject()); EasyMock.expectLastCall(); batchServerInventoryView.start(); EasyMock.expectLastCall(); batchServerInventoryView.stop(); EasyMock.expectLastCall(); EasyMock.replay(batchServerInventoryView); DruidClusterBridge bridge = new DruidClusterBridge( jsonMapper, config, factory, me, localCf, leaderLatch, bridgeZkCoordinator, announcer, batchDataSegmentAnnouncer, batchServerInventoryView); bridge.start(); int retry = 0; while (!bridge.isLeader()) { if (retry > 5) { throw new ISE("Unable to become leader"); } Thread.sleep(100); retry++; } String path = "/druid/announcements/localhost:8080"; retry = 0; while (remoteCf.checkExists().forPath(path) == null) { if (retry > 5) { throw new ISE("Unable to announce"); } Thread.sleep(100); retry++; } boolean verified = verifyUpdate(jsonMapper, path, remoteCf); retry = 0; while (!verified) { if (retry > 5) { throw new ISE("No updates to bridge node occurred"); } Thread.sleep(100); retry++; verified = verifyUpdate(jsonMapper, path, remoteCf); } announcer.stop(); bridge.stop(); remoteCf.close(); remoteCluster.close(); localCf.close(); localCluster.close(); EasyMock.verify(batchServerInventoryView); EasyMock.verify(dbSegmentPublisher); EasyMock.verify(databaseSegmentManager); EasyMock.verify(serverView); }
public void moveSegment( String from, String to, String segmentName, final LoadPeonCallback callback) { try { final DruidServer fromServer = serverInventoryView.getInventoryValue(from); if (fromServer == null) { throw new IAE("Unable to find server [%s]", from); } final DruidServer toServer = serverInventoryView.getInventoryValue(to); if (toServer == null) { throw new IAE("Unable to find server [%s]", to); } if (to.equalsIgnoreCase(from)) { throw new IAE( "Redundant command to move segment [%s] from [%s] to [%s]", segmentName, from, to); } final DataSegment segment = fromServer.getSegment(segmentName); if (segment == null) { throw new IAE("Unable to find segment [%s] on server [%s]", segmentName, from); } final LoadQueuePeon loadPeon = loadManagementPeons.get(to); if (loadPeon == null) { throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", to); } final LoadQueuePeon dropPeon = loadManagementPeons.get(from); if (dropPeon == null) { throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", from); } final ServerHolder toHolder = new ServerHolder(toServer, loadPeon); if (toHolder.getAvailableSize() < segment.getSize()) { throw new IAE( "Not enough capacity on server [%s] for segment [%s]. Required: %,d, available: %,d.", to, segment, segment.getSize(), toHolder.getAvailableSize()); } final String toLoadQueueSegPath = ZKPaths.makePath(ZKPaths.makePath(zkPaths.getLoadQueuePath(), to), segmentName); final String toServedSegPath = ZKPaths.makePath( ZKPaths.makePath( serverInventoryView.getInventoryManagerConfig().getInventoryPath(), to), segmentName); loadPeon.loadSegment( segment, new LoadPeonCallback() { @Override protected void execute() { try { if (curator.checkExists().forPath(toServedSegPath) != null && curator.checkExists().forPath(toLoadQueueSegPath) == null && !dropPeon.getSegmentsToDrop().contains(segment)) { dropPeon.dropSegment(segment, callback); } else if (callback != null) { callback.execute(); } } catch (Exception e) { throw Throwables.propagate(e); } } }); } catch (Exception e) { log.makeAlert(e, "Exception moving segment %s", segmentName).emit(); callback.execute(); } }
@LifecycleStart public void start() throws IOException { synchronized (lock) { if (started) { return; } log.info("Starting zkCoordinator for server[%s]", me.getName()); final String loadQueueLocation = ZKPaths.makePath(zkPaths.getLoadQueuePath(), me.getName()); final String servedSegmentsLocation = ZKPaths.makePath(zkPaths.getServedSegmentsPath(), me.getName()); final String liveSegmentsLocation = ZKPaths.makePath(zkPaths.getLiveSegmentsPath(), me.getName()); loadQueueCache = new PathChildrenCache(curator, loadQueueLocation, true, true, loadingExec); try { curator.newNamespaceAwareEnsurePath(loadQueueLocation).ensure(curator.getZookeeperClient()); curator .newNamespaceAwareEnsurePath(servedSegmentsLocation) .ensure(curator.getZookeeperClient()); curator .newNamespaceAwareEnsurePath(liveSegmentsLocation) .ensure(curator.getZookeeperClient()); loadLocalCache(); loadQueueCache .getListenable() .addListener( new PathChildrenCacheListener() { @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { final ChildData child = event.getData(); switch (event.getType()) { case CHILD_ADDED: final String path = child.getPath(); final DataSegmentChangeRequest request = jsonMapper.readValue(child.getData(), DataSegmentChangeRequest.class); log.info("New request[%s] with zNode[%s].", request.asString(), path); try { request.go( getDataSegmentChangeHandler(), new DataSegmentChangeCallback() { boolean hasRun = false; @Override public void execute() { try { if (!hasRun) { curator.delete().guaranteed().forPath(path); log.info("Completed request [%s]", request.asString()); hasRun = true; } } catch (Exception e) { try { curator.delete().guaranteed().forPath(path); } catch (Exception e1) { log.error( e1, "Failed to delete zNode[%s], but ignoring exception.", path); } log.error(e, "Exception while removing zNode[%s]", path); throw Throwables.propagate(e); } } }); } catch (Exception e) { try { curator.delete().guaranteed().forPath(path); } catch (Exception e1) { log.error( e1, "Failed to delete zNode[%s], but ignoring exception.", path); } log.makeAlert(e, "Segment load/unload: uncaught exception.") .addData("node", path) .addData("nodeProperties", request) .emit(); } break; case CHILD_REMOVED: log.info("zNode[%s] was removed", event.getData().getPath()); break; default: log.info("Ignoring event[%s]", event); } } }); loadQueueCache.start(); } catch (Exception e) { Throwables.propagateIfPossible(e, IOException.class); throw Throwables.propagate(e); } started = true; } }
@LifecycleStart public void start() throws IOException { log.info("Starting zkCoordinator for server[%s]", me); synchronized (lock) { if (started) { return; } final String loadQueueLocation = ZKPaths.makePath(zkPaths.getLoadQueuePath(), me.getName()); final String servedSegmentsLocation = ZKPaths.makePath(zkPaths.getServedSegmentsPath(), me.getName()); final String liveSegmentsLocation = ZKPaths.makePath(zkPaths.getLiveSegmentsPath(), me.getName()); loadQueueCache = new PathChildrenCache( curator, loadQueueLocation, true, true, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("ZkCoordinator-%s").build()); try { config.getInfoDir().mkdirs(); curator.newNamespaceAwareEnsurePath(loadQueueLocation).ensure(curator.getZookeeperClient()); curator .newNamespaceAwareEnsurePath(servedSegmentsLocation) .ensure(curator.getZookeeperClient()); curator .newNamespaceAwareEnsurePath(liveSegmentsLocation) .ensure(curator.getZookeeperClient()); loadCache(); loadQueueCache .getListenable() .addListener( new PathChildrenCacheListener() { @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { final ChildData child = event.getData(); switch (event.getType()) { case CHILD_ADDED: final String path = child.getPath(); final DataSegmentChangeRequest segment = jsonMapper.readValue(child.getData(), DataSegmentChangeRequest.class); log.info("New node[%s] with segmentClass[%s]", path, segment.getClass()); try { segment.go(ZkCoordinator.this); curator.delete().guaranteed().forPath(path); log.info("Completed processing for node[%s]", path); } catch (Exception e) { try { curator.delete().guaranteed().forPath(path); } catch (Exception e1) { log.info( e1, "Failed to delete node[%s], but ignoring exception.", path); } log.makeAlert(e, "Segment load/unload: uncaught exception.") .addData("node", path) .addData("nodeProperties", segment) .emit(); } break; case CHILD_REMOVED: log.info("%s was removed", event.getData().getPath()); break; default: log.info("Ignoring event[%s]", event); } } }); loadQueueCache.start(); } catch (Exception e) { Throwables.propagateIfPossible(e, IOException.class); throw Throwables.propagate(e); } started = true; } }