@Test public void testStateTransition() throws InterruptedException, ExecutionException, TimeoutException { InMemoryZKServer zkServer = InMemoryZKServer.builder().build(); zkServer.startAndWait(); try { final String namespace = Joiner.on('/').join("/twill", RunIds.generate(), "runnables", "Runner1"); final ZKClientService zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr()).build(); zkClient.startAndWait(); zkClient.create(namespace, null, CreateMode.PERSISTENT).get(); try { JsonObject content = new JsonObject(); content.addProperty("containerId", "container-123"); content.addProperty("host", "localhost"); RunId runId = RunIds.generate(); final Semaphore semaphore = new Semaphore(0); ZKServiceDecorator service = new ZKServiceDecorator( ZKClients.namespace(zkClient, namespace), runId, Suppliers.ofInstance(content), new AbstractIdleService() { @Override protected void startUp() throws Exception { Preconditions.checkArgument( semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to start"); } @Override protected void shutDown() throws Exception { Preconditions.checkArgument( semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to stop"); } }); final String runnablePath = namespace + "/" + runId.getId(); final AtomicReference<String> stateMatch = new AtomicReference<String>("STARTING"); watchDataChange(zkClient, runnablePath + "/state", semaphore, stateMatch); Assert.assertEquals(Service.State.RUNNING, service.start().get(5, TimeUnit.SECONDS)); stateMatch.set("STOPPING"); Assert.assertEquals(Service.State.TERMINATED, service.stop().get(5, TimeUnit.SECONDS)); } finally { zkClient.stopAndWait(); } } finally { zkServer.stopAndWait(); } }
BasicFlowletContext( Program program, final String flowletId, int instanceId, RunId runId, int instanceCount, Set<String> datasets, Arguments runtimeArguments, FlowletSpecification flowletSpec, MetricsCollectionService metricsCollectionService, DiscoveryServiceClient discoveryServiceClient, DatasetFramework dsFramework) { super( program, runId, runtimeArguments, datasets, getMetricCollector(metricsCollectionService, program, flowletId, runId.getId(), instanceId), dsFramework, discoveryServiceClient); this.namespaceId = program.getNamespaceId(); this.flowId = program.getName(); this.flowletId = flowletId; this.groupId = FlowUtils.generateConsumerGroupId(program, flowletId); this.instanceId = instanceId; this.instanceCount = instanceCount; this.flowletSpec = flowletSpec; this.userMetrics = new ProgramUserMetrics( getMetricCollector( metricsCollectionService, program, flowletId, runId.getId(), instanceId)); // TODO - does this have to cache the metric collectors? Metrics framework itself has a cache // [CDAP-2334] this.queueMetrics = CacheBuilder.newBuilder() .expireAfterAccess(1, TimeUnit.HOURS) .build( new CacheLoader<String, MetricsContext>() { @Override public MetricsContext load(String key) throws Exception { return getProgramMetrics() .childContext(Constants.Metrics.Tag.FLOWLET_QUEUE, key); } }); this.producerMetrics = CacheBuilder.newBuilder() .expireAfterAccess(1, TimeUnit.HOURS) .build( new CacheLoader<ImmutablePair<String, String>, MetricsContext>() { @Override public MetricsContext load(ImmutablePair<String, String> key) throws Exception { return getProgramMetrics() .childContext( ImmutableMap.of( Constants.Metrics.Tag.PRODUCER, key.getFirst(), Constants.Metrics.Tag.FLOWLET_QUEUE, key.getSecond(), Constants.Metrics.Tag.CONSUMER, BasicFlowletContext.this.flowletId)); } }); }
private OperationFuture<String> removeServiceNode() { String serviceNode = String.format("/%s", id.getId()); LOG.info("Remove service node {}{}", zkClient.getConnectString(), serviceNode); return ZKOperations.recursiveDelete(zkClient, serviceNode); }