@Override
    public void run() {
      GlobalLock lock = GlobalLock.getInternLock("TransitionChecking");
      if (lock == null) {
        s_logger.debug("Couldn't get the global lock");
        return;
      }

      if (!lock.lock(30)) {
        s_logger.debug("Couldn't lock the db");
        return;
      }
      try {
        lock.addRef();
        List<VMInstanceVO> instances =
            _instanceDao.findVMInTransition(
                new Date(new Date().getTime() - (_operationTimeout * 1000)),
                State.Starting,
                State.Stopping);
        for (VMInstanceVO instance : instances) {
          State state = instance.getState();
          if (state == State.Stopping) {
            scheduleStop(instance, instance.getHostId(), true);
          } else if (state == State.Starting) {
            scheduleRestart(instance, true);
          }
        }
      } catch (Exception e) {
        s_logger.warn("Caught the following exception on transition checking", e);
      } finally {
        StackMaid.current().exitCleanup();
        lock.unlock();
      }
    }
    @Override
    public void run() {
      s_logger.info("HA Cleanup Thread Running");

      try {
        _haDao.cleanup(System.currentTimeMillis() - _timeBetweenFailures);
      } catch (Exception e) {
        s_logger.warn("Error while cleaning up", e);
      } finally {
        StackMaid.current().exitCleanup();
      }
    }
    @Override
    public void run() {
      s_logger.info("Starting work");
      while (!_stopped) {
        try {
          s_logger.trace("Checking the database");
          final HaWorkVO work = _haDao.take(_serverId);
          if (work == null) {
            try {
              synchronized (this) {
                wait(_timeToSleep);
              }
              continue;
            } catch (final InterruptedException e) {
              s_logger.info("Interrupted");
              continue;
            }
          }

          s_logger.info("Working on " + work.toString());

          try {
            final WorkType wt = work.getWorkType();
            Long nextTime = null;
            if (wt == WorkType.Migration) {
              nextTime = migrate(work);
            } else if (wt == WorkType.HA) {
              nextTime = restart(work);
            } else if (wt == WorkType.Stop || wt == WorkType.CheckStop) {
              nextTime = stopVM(work);
            } else if (wt == WorkType.Destroy) {
              nextTime = destroyVM(work);
            } else {
              assert false : "How did we get here with " + wt.toString();
              continue;
            }

            if (nextTime == null) {
              if (s_logger.isDebugEnabled()) {
                s_logger.debug(work.toString() + " is complete");
              }
              work.setStep(Step.Done);
            } else {
              if (s_logger.isDebugEnabled()) {
                s_logger.debug(
                    "Rescheduling "
                        + work.toString()
                        + " for instance "
                        + work.getInstanceId()
                        + " to try again at "
                        + new Date(nextTime << 10));
              }
              work.setTimeToTry(nextTime);
              work.setServerId(null);
              work.setDateTaken(null);
            }
          } catch (Exception e) {
            s_logger.error("Caught this exception while processing the work queue.", e);
            work.setStep(Step.Error);
          }
          _haDao.update(work.getId(), work);
        } catch (final Throwable th) {
          s_logger.error("Caught this throwable, ", th);
        } finally {
          StackMaid.current().exitCleanup();
        }
      }
      s_logger.info("Time to go home!");
    }