/** Invoked when a Mesos task reaches a terminal status. */
  private void taskTerminated(Protos.TaskID taskID, Protos.TaskStatus status) {
    // this callback occurs for failed containers and for released containers alike

    final ResourceID id = extractResourceID(taskID);

    boolean existed;
    try {
      existed = workerStore.removeWorker(taskID);
    } catch (Exception ex) {
      fatalError("unable to remove worker", ex);
      return;
    }

    if (!existed) {
      LOG.info("Received a termination notice for an unrecognized worker: {}", id);
      return;
    }

    // check if this is a failed task or a released task
    if (workersBeingReturned.remove(id) != null) {
      // regular finished worker that we released
      LOG.info("Worker {} finished successfully with diagnostics: {}", id, status.getMessage());
    } else {
      // failed worker, either at startup, or running
      final MesosWorkerStore.Worker launched = workersInLaunch.remove(id);
      if (launched != null) {
        LOG.info(
            "Mesos task {} failed, with a TaskManager in launch or registration. "
                + "State: {} Reason: {} ({})",
            id,
            status.getState(),
            status.getReason(),
            status.getMessage());
        // we will trigger re-acquiring new workers at the end
      } else {
        // failed registered worker
        LOG.info(
            "Mesos task {} failed, with a registered TaskManager. " + "State: {} Reason: {} ({})",
            id,
            status.getState(),
            status.getReason(),
            status.getMessage());

        // notify the generic logic, which notifies the JobManager, etc.
        notifyWorkerFailed(id, "Mesos task " + id + " failed.  State: " + status.getState());
      }

      // general failure logging
      failedTasksSoFar++;

      String diagMessage =
          String.format(
              "Diagnostics for task %s in state %s : " + "reason=%s message=%s",
              id, status.getState(), status.getReason(), status.getMessage());
      sendInfoMessage(diagMessage);

      LOG.info(diagMessage);
      LOG.info("Total number of failed tasks so far: {}", failedTasksSoFar);

      // maxFailedTasks == -1 is infinite number of retries.
      if (maxFailedTasks >= 0 && failedTasksSoFar > maxFailedTasks) {
        String msg =
            "Stopping Mesos session because the number of failed tasks ("
                + failedTasksSoFar
                + ") exceeded the maximum failed tasks ("
                + maxFailedTasks
                + "). This number is controlled by the '"
                + ConfigConstants.MESOS_MAX_FAILED_TASKS
                + "' configuration setting. "
                + "By default its the number of requested tasks.";

        LOG.error(msg);
        self()
            .tell(
                decorateMessage(new StopCluster(ApplicationStatus.FAILED, msg)),
                ActorRef.noSender());

        // no need to do anything else
        return;
      }
    }

    // in case failed containers were among the finished containers, make
    // sure we re-examine and request new ones
    triggerCheckWorkers();
  }