Ejemplo n.º 1
0
  private Map<JobId, Deployment> getTasks(final ZooKeeperClient client, final String host) {
    final Map<JobId, Deployment> jobs = Maps.newHashMap();
    try {
      final String folder = Paths.configHostJobs(host);
      final List<String> jobIds;
      try {
        jobIds = client.getChildren(folder);
      } catch (KeeperException.NoNodeException e) {
        return null;
      }

      for (final String jobIdString : jobIds) {
        final JobId jobId = JobId.fromString(jobIdString);
        final String containerPath = Paths.configHostJob(host, jobId);
        try {
          final byte[] data = client.getData(containerPath);
          final Task task = parse(data, Task.class);
          jobs.put(jobId, Deployment.of(jobId, task.getGoal()));
        } catch (KeeperException.NoNodeException ignored) {
          log.debug("deployment config node disappeared: {}", jobIdString);
        }
      }
    } catch (KeeperException | IOException e) {
      throw new HeliosRuntimeException("getting deployment config failed", e);
    }

    return jobs;
  }
Ejemplo n.º 2
0
  /** Used to update the existing deployment of a job. */
  @Override
  public void updateDeployment(final String host, final Deployment deployment)
      throws HostNotFoundException, JobNotDeployedException {
    log.info("updating deployment {}: {}", deployment, host);

    final ZooKeeperClient client = provider.get("updateDeployment");

    final JobId jobId = deployment.getJobId();
    final Job job = getJob(client, jobId);

    if (job == null) {
      throw new JobNotDeployedException(host, jobId);
    }

    assertHostExists(client, host);
    assertTaskExists(client, host, deployment.getJobId());

    final String path = Paths.configHostJob(host, jobId);
    final Task task = new Task(job, deployment.getGoal());
    try {
      client.setData(path, task.toJsonBytes());
    } catch (Exception e) {
      throw new HeliosRuntimeException(
          "updating deployment " + deployment + " on host " + host + " failed", e);
    }
  }
Ejemplo n.º 3
0
  /** Undeploys the job specified by {@code jobId} on {@code host}. */
  @Override
  public Deployment undeployJob(final String host, final JobId jobId)
      throws HostNotFoundException, JobNotDeployedException {
    log.info("undeploying {}: {}", jobId, host);
    final ZooKeeperClient client = provider.get("undeployJob");

    assertHostExists(client, host);

    final Deployment deployment = getDeployment(host, jobId);
    if (deployment == null) {
      throw new JobNotDeployedException(host, jobId);
    }

    // TODO (dano): Is this safe? can the ports of an undeployed job collide with a new deployment?
    // TODO (drewc):  If it's still in UNDEPLOY, that means the agent hasn't gotten to it
    //    yet, which means it probably won't see the new job yet either.  However, it may spin up
    //    a new supervisor for the new job before the old one is done being torn down.  So it can
    //    race and lose.  With a little change to the Agent where we manage the supervisors, with
    //    some coordination, we could remove the race, such that this race goes away.  Specifically,
    //    we're creating new Supervisors before updating existing ones.  If we swap that, part of
    //    the problem goes away, but we'd need some coordination between the Supervisor and the
    //    agent such that the Agent could wait until the Supervisor had handled the Goal change.
    //    Additionally, since ZK guarantees we'll see the writes in the proper order, we wouldn't
    //    need to deal with seeing the new job before the UNDEPLOY.

    final Job job = getJob(client, jobId);
    final String path = Paths.configHostJob(host, jobId);
    final Task task = new Task(job, UNDEPLOY);
    final List<ZooKeeperOperation> operations =
        Lists.newArrayList(set(path, task.toJsonBytes()), delete(Paths.configJobHost(jobId, host)));

    final List<Integer> staticPorts = staticPorts(job);
    for (int port : staticPorts) {
      operations.add(delete(Paths.configHostPort(host, port)));
    }

    try {
      client.transaction(operations);
    } catch (NoNodeException e) {
      if (e.getPath().equals(path)) {
        // NoNodeException on updating the deployment node may happen due to retry failures.
        // If the deployment isn't there anymore, we're done.
        return deployment;
      } else {
        // The relation node deletes should not fail unless there is a programming error.
        throw new HeliosRuntimeException("Removing deployment failed", e);
      }
    } catch (KeeperException e) {
      throw new HeliosRuntimeException("Removing deployment failed", e);
    }
    return deployment;
  }
Ejemplo n.º 4
0
 /** Returns the current deployment state of {@code jobId} on {@code host}. */
 @Override
 public Deployment getDeployment(final String host, final JobId jobId) {
   final String path = Paths.configHostJob(host, jobId);
   final ZooKeeperClient client = provider.get("getDeployment");
   try {
     final byte[] data = client.getData(path);
     final Task task = parse(data, Task.class);
     return Deployment.of(jobId, task.getGoal());
   } catch (KeeperException.NoNodeException e) {
     return null;
   } catch (KeeperException | IOException e) {
     throw new HeliosRuntimeException("getting deployment failed", e);
   }
 }
Ejemplo n.º 5
0
  // TODO(drewc): this kinda screams "long method"
  private void deployJobRetry(
      final ZooKeeperClient client, final String host, final Deployment deployment, int count)
      throws JobDoesNotExistException, JobAlreadyDeployedException, HostNotFoundException,
          JobPortAllocationConflictException {
    if (count == 3) {
      throw new HeliosRuntimeException(
          "3 failures (possibly concurrent modifications) while " + "deploying. Giving up.");
    }
    log.info("deploying {}: {} (retry={})", deployment, host, count);

    final JobId id = deployment.getJobId();
    final Job job = getJob(id);

    if (job == null) {
      throw new JobDoesNotExistException(id);
    }

    final UUID operationId = UUID.randomUUID();
    final String jobPath = Paths.configJob(id);
    final String taskPath = Paths.configHostJob(host, id);
    final String taskCreationPath = Paths.configHostJobCreation(host, id, operationId);

    final List<Integer> staticPorts = staticPorts(job);
    final Map<String, byte[]> portNodes = Maps.newHashMap();
    final byte[] idJson = id.toJsonBytes();
    for (final int port : staticPorts) {
      final String path = Paths.configHostPort(host, port);
      portNodes.put(path, idJson);
    }

    final Task task = new Task(job, deployment.getGoal());
    final List<ZooKeeperOperation> operations =
        Lists.newArrayList(
            check(jobPath), create(portNodes), create(Paths.configJobHost(id, host)));

    // Attempt to read a task here.  If it's goal is UNDEPLOY, it's as good as not existing
    try {
      final Node existing = client.getNode(taskPath);
      byte[] bytes = existing.getBytes();
      Task readTask = Json.read(bytes, Task.class);
      if (readTask.getGoal() != Goal.UNDEPLOY) {
        throw new JobAlreadyDeployedException(host, id);
      }
      operations.add(check(taskPath, existing.getStat().getVersion()));
      operations.add(set(taskPath, task));
    } catch (NoNodeException e) {
      operations.add(create(taskPath, task));
      operations.add(create(taskCreationPath));
    } catch (IOException | KeeperException e) {
      throw new HeliosRuntimeException("reading existing task description failed", e);
    }

    // TODO (dano): Failure handling is racy wrt agent and job modifications.
    try {
      client.transaction(operations);
      log.info("deployed {}: {} (retry={})", deployment, host, count);
    } catch (NoNodeException e) {
      // Either the job, the host or the task went away
      assertJobExists(client, id);
      assertHostExists(client, host);
      // If the job and host still exists, we likely tried to redeploy a job that had an UNDEPLOY
      // goal and lost the race with the agent removing the task before we could set it. Retry.
      deployJobRetry(client, host, deployment, count + 1);
    } catch (NodeExistsException e) {
      // Check for conflict due to transaction retry
      try {
        if (client.exists(taskCreationPath) != null) {
          // Our creation operation node existed, we're done here
          return;
        }
      } catch (KeeperException ex) {
        throw new HeliosRuntimeException("checking job deployment failed", ex);
      }
      try {
        // Check if the job was already deployed
        if (client.stat(taskPath) != null) {
          throw new JobAlreadyDeployedException(host, id);
        }
      } catch (KeeperException ex) {
        throw new HeliosRuntimeException("checking job deployment failed", e);
      }

      // Check for static port collisions
      for (final int port : staticPorts) {
        final String path = Paths.configHostPort(host, port);
        try {
          if (client.stat(path) == null) {
            continue;
          }
          final byte[] b = client.getData(path);
          final JobId existingJobId = parse(b, JobId.class);
          throw new JobPortAllocationConflictException(id, existingJobId, host, port);
        } catch (KeeperException | IOException ex) {
          throw new HeliosRuntimeException("checking port allocations failed", e);
        }
      }

      // Catch all for logic and ephemeral issues
      throw new HeliosRuntimeException("deploying job failed", e);
    } catch (KeeperException e) {
      throw new HeliosRuntimeException("deploying job failed", e);
    }
  }