@Override
  public void fullSync(long agentId, HashMap<String, Pair<Long, Long>> newGroupStates) {
    ArrayList<Long> affectedVms = new ArrayList<Long>();
    for (String vmName : newGroupStates.keySet()) {
      Long vmId = newGroupStates.get(vmName).first();
      Long seqno = newGroupStates.get(vmName).second();

      VmRulesetLogVO log = _rulesetLogDao.findByVmId(vmId);
      if (log != null && log.getLogsequence() != seqno) {
        affectedVms.add(vmId);
      }
    }
    if (affectedVms.size() > 0) {
      s_logger.info(
          "Network Group full sync for agent "
              + agentId
              + " found "
              + affectedVms.size()
              + " vms out of sync");
      scheduleRulesetUpdateToHosts(affectedVms, false, null);
    }
  }
  @DB
  public void work() {
    if (s_logger.isTraceEnabled()) {
      s_logger.trace("Checking the database");
    }
    final SecurityGroupWorkVO work = _workDao.take(_serverId);
    if (work == null) {
      if (s_logger.isTraceEnabled()) {
        s_logger.trace("Security Group work: no work found");
      }
      return;
    }
    Long userVmId = work.getInstanceId();
    if (work.getStep() == Step.Done) {
      if (s_logger.isDebugEnabled()) {
        s_logger.debug(
            "Security Group work: found a job in done state, rescheduling for vm: " + userVmId);
      }
      ArrayList<Long> affectedVms = new ArrayList<Long>();
      affectedVms.add(userVmId);
      scheduleRulesetUpdateToHosts(affectedVms, false, _timeBetweenCleanups * 1000l);
      return;
    }
    UserVm vm = null;
    Long seqnum = null;
    s_logger.debug("Working on " + work);
    final Transaction txn = Transaction.currentTxn();
    txn.start();
    boolean locked = false;
    try {
      vm = _userVMDao.acquireInLockTable(work.getInstanceId());
      if (vm == null) {
        vm = _userVMDao.findById(work.getInstanceId());
        if (vm == null) {
          s_logger.info("VM " + work.getInstanceId() + " is removed");
          locked = true;
          return;
        }
        s_logger.warn("Unable to acquire lock on vm id=" + userVmId);
        return;
      }
      locked = true;
      Long agentId = null;
      VmRulesetLogVO log = _rulesetLogDao.findByVmId(userVmId);
      if (log == null) {
        s_logger.warn("Cannot find log record for vm id=" + userVmId);
        return;
      }
      seqnum = log.getLogsequence();

      if (vm != null && vm.getState() == State.Running) {
        Map<PortAndProto, Set<String>> rules = generateRulesForVM(userVmId);
        agentId = vm.getHostId();
        if (agentId != null) {
          SecurityIngressRulesCmd cmd =
              generateRulesetCmd(
                  vm.getInstanceName(),
                  vm.getPrivateIpAddress(),
                  vm.getPrivateMacAddress(),
                  vm.getId(),
                  generateRulesetSignature(rules),
                  seqnum,
                  rules);
          Commands cmds = new Commands(cmd);
          try {
            _agentMgr.send(agentId, cmds, _answerListener);
          } catch (AgentUnavailableException e) {
            s_logger.debug(
                "Unable to send updates for vm: " + userVmId + "(agentid=" + agentId + ")");
            _workDao.updateStep(work.getInstanceId(), seqnum, Step.Done);
          }
        }
      }
    } finally {
      if (locked) {
        _userVMDao.releaseFromLockTable(userVmId);
        _workDao.updateStep(work.getId(), Step.Done);
      }
      txn.commit();
    }
  }
  @DB
  public void scheduleRulesetUpdateToHosts(
      List<Long> affectedVms, boolean updateSeqno, Long delayMs) {
    if (affectedVms.size() == 0) {
      return;
    }

    if (delayMs == null) {
      delayMs = new Long(100l);
    }

    Collections.sort(affectedVms);
    if (s_logger.isTraceEnabled()) {
      s_logger.trace(
          "Security Group Mgr: scheduling ruleset updates for " + affectedVms.size() + " vms");
    }
    boolean locked = _workLock.lock(_globalWorkLockTimeout);
    if (!locked) {
      s_logger.warn("Security Group Mgr: failed to acquire global work lock");
      return;
    }

    if (s_logger.isTraceEnabled()) {
      s_logger.trace("Security Group Mgr: acquired global work lock");
    }
    Transaction txn = Transaction.currentTxn();
    try {
      txn.start();
      for (Long vmId : affectedVms) {
        if (s_logger.isTraceEnabled()) {
          s_logger.trace("Security Group Mgr: scheduling ruleset update for " + vmId);
        }
        VmRulesetLogVO log = null;
        SecurityGroupWorkVO work = null;

        log = _rulesetLogDao.findByVmId(vmId);
        if (log == null) {
          log = new VmRulesetLogVO(vmId);
          log = _rulesetLogDao.persist(log);
        }

        if (log != null && updateSeqno) {
          log.incrLogsequence();
          _rulesetLogDao.update(log.getId(), log);
        }
        work = _workDao.findByVmIdStep(vmId, Step.Scheduled);
        if (work == null) {
          work = new SecurityGroupWorkVO(vmId, null, null, SecurityGroupWork.Step.Scheduled, null);
          work = _workDao.persist(work);
          if (s_logger.isTraceEnabled()) {
            s_logger.trace(
                "Security Group Mgr: created new work item for " + vmId + "; id = " + work.getId());
          }
        }

        work.setLogsequenceNumber(log.getLogsequence());
        _workDao.update(work.getId(), work);
      }
      txn.commit();
      for (Long vmId : affectedVms) {
        _executorPool.schedule(new WorkerThread(), delayMs, TimeUnit.MILLISECONDS);
      }
    } finally {
      _workLock.unlock();
      if (s_logger.isTraceEnabled()) {
        s_logger.trace("Security Group Mgr: released global work lock");
      }
    }
  }