@Override
 public void onReceive(final Object message) throws Exception {
   if (message instanceof AskParam) {
     AskParam askParam = (AskParam) message;
     timeout = askParam.timeout;
     caller = sender();
     targetActor = context().actorOf(askParam.props);
     context().watch(targetActor);
     targetActor.forward(askParam.message, context());
     final Scheduler scheduler = context().system().scheduler();
     timeoutMessage =
         scheduler.scheduleOnce(
             askParam.timeout.duration(), self(), new AskTimeout(), context().dispatcher(), null);
   } else if (message instanceof Terminated) {
     sendFailureToCaller(new ActorKilledException("Target actor terminated."));
     timeoutMessage.cancel();
     context().stop(self());
   } else if (message instanceof AskTimeout) {
     sendFailureToCaller(
         new TimeoutException("Target actor timed out after " + timeout.toString()));
     context().stop(self());
   } else {
     unhandled(message);
   }
 }
示例#2
0
  @Override
  public void postStop() {
    LOG.info("Stopping Shard {}", persistenceId());

    super.postStop();

    messageRetrySupport.close();

    if (txCommitTimeoutCheckSchedule != null) {
      txCommitTimeoutCheckSchedule.cancel();
    }

    commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);

    shardMBean.unregisterMBean();
  }
  /**
   * The real brains of this scheduler: using the memory capacity reported in each work request,
   * determine how to spread the load of unscheduled jobs efficiently across the requesting agents.
   *
   * <p>The algorithm works by taking the collection of work requests and sorting them from smallest
   * to largest free memory capacity.
   *
   * <p>The scheduler then iterates over each job, handing it to the first agent that has enough
   * capacity to complete it. In this sense, each agent is "greedy", grabbing as many jobs as it can
   * manage.
   *
   * <p>If a job is too big to fit into the remaining capacity of any of the agents, it will remain
   * unscheduled.
   *
   * <p><b>WARNING:</b> If a job is too big for any of the agents, it will sit on the backlog
   * forever!
   *
   * <p>
   */
  private void scheduleJobs() {

    Map<ActorRef, List<Job>> agentJobs = new HashMap<ActorRef, List<Job>>();
    List<Job> scheduledJobs = new ArrayList<>();
    Iterator<Job> iterator = unscheduledJobs.iterator();
    while (iterator.hasNext()) {
      Job job = iterator.next();
      long jobSize = new Long(job.getParams().get(JVMCapacityScheduler.JOB_SIZE));
      sortWorkRequestsByFreeMemory();

      for (JVMCapacityWorkRequest workRequest : workRequests) {
        long freeMemory = workRequest.getFreeMemory();
        ActorRef agent = workRequest.getAgent();
        if (freeMemory >= jobSize) {
          // Add the job to the agent
          List<Job> jobs = agentJobs.get(agent);
          if (jobs == null) {
            jobs = new ArrayList<Job>();
            agentJobs.put(agent, jobs);
          }
          jobs.add(job);
          scheduledJobs.add(job);

          // Decrease agent's available memory
          workRequest.setFreeMemory(freeMemory - jobSize);
          break;
        }
      }
    }

    // Create a schedule
    Schedule schedule = new Schedule();
    for (ActorRef agent : agentJobs.keySet()) {
      schedule.setJobs(agent, agentJobs.get(agent));
    }

    // Dispatch the schedule
    dispatchJobs(schedule);

    // Clear old work requests
    workRequests.clear();

    // Cancel the scheduling
    scheduleJobs.cancel();
  }
示例#4
0
 @Override
 public void onReceive(Object msg) throws Exception {
   if (msg.equals(Msg.TICK)) {
     if (probes.timeout()) {
       timer.cancel(); // Stop timer
       getContext().become(SPLITTING);
       getSelf().tell(Msg.DONE, getSelf()); // Start splitting
       logger.info("Latency Monitor Start Splitting");
       return;
     }
     // Send probes to all targets
     for (Pair<ActorRef, ActorRef> targetPort : targetPorts) {
       ActorRef target = targetPort.first();
       ActorRef port = targetPort.second();
       port.tell(probes.newProbe(target), getSelf());
     }
   } else if (msg instanceof Probe) {
     probes.fill((Probe) msg);
     long now = System.nanoTime();
     // TODO Temporary log here, remove or format this later
     logger.info(
         "SubOperator: "
             + ((Probe) msg).target
             + " Current latency: "
             + (now - ((Probe) msg).now) / 1000
             + "us");
   } else if (msg instanceof Target) {
     Target target = (Target) msg;
     if (target.toAdd) {
       probes.addTarget(target.target);
       targetPorts.add(new Pair<ActorRef, ActorRef>(target.target, target.port));
     } else {
       probes.removeTarget(target.target);
       for (Pair<ActorRef, ActorRef> targetPort : targetPorts) {
         if (targetPort.first().equals(target.target)) {
           targetPorts.remove(targetPort);
           break;
         }
       }
     }
   } else unhandled(msg);
 }
 private void stopHeartBeat() {
   if (heartbeatSchedule != null && !heartbeatSchedule.isCancelled()) {
     heartbeatSchedule.cancel();
   }
 }
 @Override
 public void postStop() {
   registerTask.cancel();
 }
 private void handleLostPosition() {
   if (cancelledPowerChange != null) cancelledPowerChange.cancel();
 }
示例#8
0
 @Override
 public void postStop() {
   tick.cancel();
 }
 @Override
 public void postStop() {
   checkTimer.cancel();
 }