@Test public void test() throws Exception { startDefaultMaster(); final HeliosClient client = defaultClient(); final CreateJobResponse createIdMismatch = client .createJob( new Job( JobId.fromString("bad:job:deadbeef"), BUSYBOX, EMPTY_HOSTNAME, IDLE_COMMAND, EMPTY_ENV, EMPTY_RESOURCES, EMPTY_PORTS, EMPTY_REGISTRATION, EMPTY_GRACE_PERIOD, EMPTY_VOLUMES, EMPTY_EXPIRES, EMPTY_REGISTRATION_DOMAIN, EMPTY_CREATING_USER, EMPTY_TOKEN, EMPTY_HEALTH_CHECK, EMPTY_SECURITY_OPT, EMPTY_NETWORK_MODE)) .get(); // TODO (dano): Maybe this should be ID_MISMATCH but then JobValidator must become able to // TODO (dano): communicate that assertEquals(CreateJobResponse.Status.INVALID_JOB_DEFINITION, createIdMismatch.getStatus()); }
@Before public void setUp() { // use a real, dummy Subparser impl to avoid having to mock out every single call final ArgumentParser parser = ArgumentParsers.newArgumentParser("test"); final Subparser subparser = parser.addSubparsers().addParser("inspect"); command = new DeploymentGroupInspectCommand(subparser); when(client.deploymentGroup(NAME)).thenReturn(Futures.immediateFuture(DEPLOYMENT_GROUP)); final ListenableFuture<DeploymentGroup> nullFuture = Futures.immediateFuture(null); when(client.deploymentGroup(NON_EXISTENT_NAME)).thenReturn(nullFuture); }
@After public void baseTeardown() throws Exception { for (final HeliosClient client : clients) { client.close(); } clients.clear(); for (Service service : services) { try { service.stopAsync(); } catch (Exception e) { log.error("Uncaught exception", e); } } for (Service service : services) { try { service.awaitTerminated(); } catch (Exception e) { log.error("Service failed", e); } } services.clear(); // Clean up docker try (final DockerClient dockerClient = getNewDockerClient()) { final List<Container> containers = dockerClient.listContainers(); for (final Container container : containers) { for (final String name : container.names()) { if (name.contains(testTag)) { try { dockerClient.killContainer(container.id()); } catch (DockerException e) { e.printStackTrace(); } break; } } } } catch (Exception e) { log.error("Docker client exception", e); } if (zk != null) { zk.close(); } listThreads(); }
@Override protected int runWithJobId( final Namespace options, final HeliosClient client, final PrintStream out, final boolean json, final JobId jobId) throws ExecutionException, InterruptedException, IOException { final List<String> hosts = options.getList(hostsArg.getDest()); final Deployment deployment = new Deployment.Builder().setGoal(Goal.STOP).setJobId(jobId).build(); out.printf("Stopping %s on %s%n", jobId, hosts); int code = 0; for (final String host : hosts) { out.printf("%s: ", host); final SetGoalResponse result = client.setGoal(deployment, host).get(); if (result.getStatus() == SetGoalResponse.Status.OK) { out.printf("done%n"); } else { out.printf("failed: %s%n", result); code = 1; } } return code; }
public void assertVolumes(final JobId jobId) throws Exception { // Wait for agent to come up awaitHostRegistered(client, testHost(), LONG_WAIT_SECONDS, SECONDS); awaitHostStatus(client, testHost(), UP, LONG_WAIT_SECONDS, SECONDS); // Deploy the job on the agent final Deployment deployment = Deployment.of(jobId, START); final JobDeployResponse deployed = client.deploy(deployment, testHost()).get(); assertEquals(JobDeployResponse.Status.OK, deployed.getStatus()); // Wait for the job to run final TaskStatus taskStatus = awaitJobState(client, testHost(), jobId, RUNNING, LONG_WAIT_SECONDS, SECONDS); assertJobEquals(job, taskStatus.getJob()); final Integer barPort = taskStatus.getPorts().get("bar").getExternalPort(); final Integer hostnamePort = taskStatus.getPorts().get("hostname").getExternalPort(); assert barPort != null; assert hostnamePort != null; // Read "foo" from /volume/bar final String foo = recvUtf8(barPort, 3); assertEquals("foo", foo); // Read hostname from /hostname final String hostname = getNewDockerClient().info().name(); final String mountedHostname = recvUtf8(hostnamePort, hostname.length()); assertEquals(hostname, mountedHostname); }
HeliosSoloDeployment(final Builder builder) { final String username = Optional.fromNullable(builder.heliosUsername).or(randomString()); this.dockerClient = checkNotNull(builder.dockerClient, "dockerClient"); this.dockerHost = Optional.fromNullable(builder.dockerHost).or(DockerHost.fromEnv()); this.containerDockerHost = Optional.fromNullable(builder.containerDockerHost).or(containerDockerHostFromEnv()); this.namespace = Optional.fromNullable(builder.namespace).or(randomString()); this.env = containerEnv(); this.binds = containerBinds(); final String heliosHost; final String heliosPort; // TODO(negz): Determine and propagate NetworkManager DNS servers? try { assertDockerReachableFromContainer(); if (dockerHost.address().equals("localhost") || dockerHost.address().equals("127.0.0.1")) { heliosHost = containerGateway(); } else { heliosHost = dockerHost.address(); } this.heliosContainerId = deploySolo(heliosHost); heliosPort = getHostPort(this.heliosContainerId, HELIOS_MASTER_PORT); } catch (HeliosDeploymentException e) { throw new AssertionError("Unable to deploy helios-solo container.", e); } // Running the String host:port through HostAndPort does some validation for us. this.heliosClient = HeliosClient.newBuilder() .setUser(username) .setEndpoints( "http://" + HostAndPort.fromString(dockerHost.address() + ":" + heliosPort)) .build(); }
protected HeliosClient client(final String user, final String endpoint) { final HeliosClient client = HeliosClient.newBuilder() .setUser(user) .setEndpoints(singletonList(URI.create(endpoint))) .build(); clients.add(client); return client; }
@Test public void test() throws Exception { startDefaultMaster(); startDefaultAgent(testHost()); final HeliosClient client = defaultClient(); // Create a job using an image exposing port 11211 but without mapping it final Job job1 = Job.newBuilder() .setName(testTag + "memcached") .setVersion("v1") .setImage("rohan/memcached-mini") .build(); final JobId jobId1 = job1.getId(); client.createJob(job1).get(); // Create a job using an image exposing port 11211 and map it to a specific external port final Job job2 = Job.newBuilder() .setName(testTag + "memcached") .setVersion("v2") .setImage("rohan/memcached-mini") .setPorts(ImmutableMap.of("tcp", PortMapping.of(11211, externalPort))) .build(); final JobId jobId2 = job2.getId(); client.createJob(job2).get(); // Wait for agent to come up awaitHostRegistered(client, testHost(), LONG_WAIT_MINUTES, MINUTES); awaitHostStatus(client, testHost(), UP, LONG_WAIT_MINUTES, MINUTES); // Deploy the jobs on the agent client.deploy(Deployment.of(jobId1, START), testHost()).get(); client.deploy(Deployment.of(jobId2, START), testHost()).get(); // Wait for the jobs to run awaitJobState(client, testHost(), jobId1, RUNNING, LONG_WAIT_MINUTES, MINUTES); awaitJobState(client, testHost(), jobId2, RUNNING, LONG_WAIT_MINUTES, MINUTES); }
@Override int run(Namespace options, HeliosClient client, PrintStream out, final boolean json) throws ExecutionException, InterruptedException { final String host = options.getString(hostArg.getDest()); final String id = options.getString(idArg.getDest()); out.printf("Registering host %s with id %s%n", host, id); int code = 0; out.printf("%s: ", host); final int result = client.registerHost(host, id).get(); if (result == 200) { out.printf("done%n"); } else { out.printf("failed: %s%n", result); code = 1; } return code; }
/** * Create a new helios client as a specific user, connecting to a helios master cluster in a * specific domain. * * @param domain The target domain. * @param user The user to identify as. * @return A helios client. */ public static HeliosClient create(final String domain, final String user) { return HeliosClient.newBuilder().setDomain(domain).setUser(user).build(); }
@Test public void testClient() throws Exception { final CreateJobResponse created = client.createJob(job).get(); assertEquals(CreateJobResponse.Status.OK, created.getStatus()); assertVolumes(job.getId()); }
@Override protected int runWithJobId( final Namespace options, final HeliosClient client, final PrintStream out, final boolean json, final JobId jobId, final BufferedReader stdin) throws ExecutionException, InterruptedException, IOException { final String name = options.getString(nameArg.getDest()); final long timeout = options.getLong(timeoutArg.getDest()); final int parallelism = options.getInt(parallelismArg.getDest()); final boolean async = options.getBoolean(asyncArg.getDest()); final long rolloutTimeout = options.getLong(rolloutTimeoutArg.getDest()); final boolean migrate = options.getBoolean(migrateArg.getDest()); final boolean overlap = options.getBoolean(overlapArg.getDest()); final String token = options.getString(tokenArg.getDest()); checkArgument(timeout > 0, "Timeout must be greater than 0"); checkArgument(parallelism > 0, "Parallelism must be greater than 0"); checkArgument(rolloutTimeout > 0, "Rollout timeout must be greater than 0"); final long startTime = timeSupplier.get(); final RolloutOptions rolloutOptions = RolloutOptions.newBuilder() .setTimeout(timeout) .setParallelism(parallelism) .setMigrate(migrate) .setOverlap(overlap) .setToken(token) .build(); final RollingUpdateResponse response = client.rollingUpdate(name, jobId, rolloutOptions).get(); if (response.getStatus() != RollingUpdateResponse.Status.OK) { if (!json) { out.println("Failed: " + response); } else { out.println(response.toJsonString()); } return 1; } if (!json) { out.println( format( "Rolling update%s started: %s -> %s " + "(parallelism=%d, timeout=%d, overlap=%b, token=%s)%s", async ? " (async)" : "", name, jobId.toShortString(), parallelism, timeout, overlap, token, async ? "" : "\n")); } final Map<String, Object> jsonOutput = Maps.newHashMap(); jsonOutput.put("parallelism", parallelism); jsonOutput.put("timeout", timeout); jsonOutput.put("overlap", overlap); jsonOutput.put("token", token); if (async) { if (json) { jsonOutput.put("status", response.getStatus()); out.println(Json.asStringUnchecked(jsonOutput)); } return 0; } String error = ""; boolean failed = false; boolean timedOut = false; final Set<String> reported = Sets.newHashSet(); while (true) { final DeploymentGroupStatusResponse status = client.deploymentGroupStatus(name).get(); if (status == null) { failed = true; error = "Failed to fetch deployment-group status"; break; } if (!jobId.equals(status.getDeploymentGroup().getJobId())) { // Another rolling-update was started, overriding this one -- exit failed = true; error = "Deployment-group job id changed during rolling-update"; break; } if (!json) { for (DeploymentGroupStatusResponse.HostStatus hostStatus : status.getHostStatuses()) { final JobId hostJobId = hostStatus.getJobId(); final String host = hostStatus.getHost(); final TaskStatus.State state = hostStatus.getState(); final boolean done = hostJobId != null && hostJobId.equals(jobId) && state == TaskStatus.State.RUNNING; if (done && reported.add(host)) { out.println( format( "%s -> %s (%d/%d)", host, state, reported.size(), status.getHostStatuses().size())); } } } if (status.getStatus() != DeploymentGroupStatusResponse.Status.ROLLING_OUT) { if (status.getStatus() == DeploymentGroupStatusResponse.Status.FAILED) { failed = true; error = status.getError(); } break; } if (timeSupplier.get() - startTime > TimeUnit.MINUTES.toMillis(rolloutTimeout)) { // Rollout timed out timedOut = true; break; } sleepFunction.sleep(POLL_INTERVAL_MILLIS); } final double duration = (timeSupplier.get() - startTime) / 1000.0; if (json) { if (failed) { jsonOutput.put("status", "FAILED"); jsonOutput.put("error", error); } else if (timedOut) { jsonOutput.put("status", "TIMEOUT"); } else { jsonOutput.put("status", "DONE"); } jsonOutput.put("duration", duration); out.println(Json.asStringUnchecked(jsonOutput)); } else { out.println(); if (failed) { out.println(format("Failed: %s", error)); } else if (timedOut) { out.println("Timed out! (rolling-update still in progress)"); } else { out.println("Done."); } out.println(format("Duration: %.2f s", duration)); } return (failed || timedOut) ? 1 : 0; }