Esempio n. 1
0
  /** Return the user's secure_hash_key. If one wasn't found create one on the fly. */
  private SecureHashKeyResult getSecureHashKey(UserAccount user, Connection c) throws SQLException {

    PreparedStatement select = null;

    // Create lazily.
    PreparedStatement update = null;

    int userId = user.getUserID();

    boolean justCreated = false;

    byte[] key = null;
    try {
      // TODO:  consider having UserManager returning secure_hash_key.
      // TODO:  We have similar logic in several places for creating secure_hash_key just-in-time.
      // D.R.Y. this out.  Sorry I couldn't resist using this cliche :)

      select = c.prepareStatement("SELECT secure_hash_key FROM dat_user_account WHERE object_id=?");

      select.setInt(1, userId);
      ResultSet rs = select.executeQuery();
      if (!rs.next()) {
        LogMessageGen lmg = new LogMessageGen();
        lmg.setSubject("dat_user_account row not found");
        lmg.param(LoggingConsts.USER_ID, userId);
        // possible that the user simply disappeared by the time we got here.
        m_logCategory.warn(lmg.toString());
      } else {
        key = rs.getBytes(1);
        if (key == null || key.length == 0) {
          // hash key not found; create one on the fly.
          update =
              c.prepareStatement("UPDATE dat_user_account SET secure_hash_key=? WHERE object_id=?");
          key = createNewRandomKey();
          update.setBytes(1, key);
          update.setInt(2, userId);
          int ct = update.executeUpdate();
          if (ct != 1) {
            LogMessageGen lmg = new LogMessageGen();
            lmg.setSubject("Unable to update dat_user_account.secure_hash_key");
            lmg.param(LoggingConsts.USER_ID, userId);
            m_logCategory.error(lmg.toString());
          } else {
            justCreated = true;
          }
        } // needed to set key.
      } // user found
    } finally {
      DbUtils.safeClose(select);
      DbUtils.safeClose(update);
    }
    return new SecureHashKeyResult(key, justCreated);
  }
Esempio n. 2
0
  private static Map saveSites(ExchSite3[] sites, int siteType, int sourceID, Connection c)
      throws SQLException {
    PreparedStatement insert = null;
    PreparedStatement delete = null;
    try {
      insert =
          c.prepareStatement(
              "insert into dat_customer_sites (site_id, site_type, source_id, internal_name, display_name) "
                  + "values (?, ?, ?, ?, ?)");
      delete = c.prepareStatement("delete from dat_customer_sites where site_id = ?");

      Map siteIDs = new HashMap(sites.length * 2 + 1);
      for (int i = 0; i < sites.length; i++) {
        ExchSite3 site = sites[i];

        int siteID = queryLookupSiteId(sourceID, site.getInternalName(), siteType, c);

        if (siteID == 0) {
          // if we couldn't find an existing siteID, grab the next one from the sequence
          siteID = getNextFromSequence("seq_site_id", c);
        } else {
          // if there is an existing siteID, delete it so we can insert the changes
          delete.setInt(1, siteID);
          int deleted = delete.executeUpdate();
          if (deleted != 1) {
            throw new SQLException("Delete for siteID " + siteID + " returned " + deleted);
          }
        }

        siteIDs.put(site.getInternalName(), siteID);
        insert.setInt(1, siteID);
        insert.setInt(2, siteType);
        insert.setInt(3, sourceID);
        insert.setString(
            4, DirectoryUtils.truncateString(site.getInternalName(), DB_SITE_INTERNALNAME_LENGTH));
        insert.setString(
            5, DirectoryUtils.truncateString(site.getDisplayName(), DB_SITE_DISPLAYNAME_LENGTH));
        insert.executeUpdate();
      }
      return siteIDs;
    } finally {
      if (delete != null) {
        delete.close();
      }
      if (insert != null) {
        insert.close();
      }
    }
  }
Esempio n. 3
0
  /** Returns event_id of newly created logging entry. */
  private Integer logEvent(IClient client, String eventText, InputStream logFileSteam, Connection c)
      throws SQLException {
    PreparedStatement stmt = null;
    int id = 0;
    try {
      long longId = getSequenceId("seq_client_log_event_ids", c);
      if (longId > Integer.MAX_VALUE) {
        // dat_client_log_events.event_id is a 32 bit integer numeric type.

        // this is a bad problem; ensure it's logged; don't depend on whoever's catching
        // the exception.
        m_logger.log("seq_client_log_event_ids overflowed").error();
        throw new IllegalStateException("seq_client_log_event_ids overflowed.");
      }
      id = (int) longId;
      stmt =
          c.prepareStatement(
              "insert into dat_client_log_events "
                  + "(event_id, customer_id, user_id, event_time, description, has_log_file) "
                  + "values "
                  + "(?, ?, ?, current_timestamp, ?, ?)");

      stmt.setInt(1, id);
      stmt.setInt(2, client.getCustomerId());
      stmt.setInt(3, client.getUserId());
      stmt.setString(4, eventText);
      stmt.setInt(5, logFileSteam == null ? 0 : 1);
      if (stmt.executeUpdate() != 1) {
        throw new SQLException("Can't insert client event for " + client);
      }
    } finally {
      DbUtils.safeClose(stmt);
    }
    return new Integer(id);
  }
Esempio n. 4
0
 /**
  * Returns the siteId for the site with the given source, name, and type. If no such site is
  * found, this method returns 0;
  */
 private static int queryLookupSiteId(
     int sourceID, String siteInternalName, int siteType, Connection c) throws SQLException {
   PreparedStatement ps = null;
   ResultSet rs = null;
   try {
     ps =
         c.prepareStatement(
             "select site_id from dat_customer_sites "
                 + "where source_id = ? and internal_name = ? and site_type = ?");
     int siteID = 0;
     ps.setInt(1, sourceID);
     ps.setString(2, siteInternalName);
     ps.setInt(3, siteType);
     rs = ps.executeQuery();
     if (rs.next()) {
       siteID = rs.getInt(1);
     }
     return siteID;
   } finally {
     if (rs != null) {
       rs.close();
     }
     if (ps != null) {
       ps.close();
     }
   }
 }
Esempio n. 5
0
 private static int queryLookupServerId(int sourceID, String serverInternalName, Connection c)
     throws SQLException {
   PreparedStatement idSelect = null;
   ResultSet rs = null;
   try {
     int serverId = 0;
     idSelect =
         c.prepareStatement(
             "select server_id from dat_customer_servers "
                 + "where source_id = ? and internal_name = ?");
     idSelect.setInt(1, sourceID);
     idSelect.setString(2, serverInternalName);
     rs = idSelect.executeQuery();
     if (rs.next()) {
       serverId = rs.getInt(1);
     }
     return serverId;
   } finally {
     if (rs != null) {
       rs.close();
     }
     if (idSelect != null) {
       idSelect.close();
     }
   }
 }
Esempio n. 6
0
  private static int getSourceID(Customer cust, String sourceName, Connection c)
      throws SQLException {
    PreparedStatement ps = null;
    ResultSet rs = null;
    try {
      ps =
          c.prepareStatement(
              "select source_id from dat_dirsync_sources where customer_id = ? and source = ?");
      ps.setInt(1, cust.getCustID());
      ps.setString(2, sourceName);
      rs = ps.executeQuery();
      if (rs.next()) {
        return rs.getInt(1);
      }
      rs.close();
      rs = null;
      ps.close();
      ps = null;

      // doesn't already exist, so create it with nulls for scheduling stuff
      ps =
          c.prepareStatement(
              "insert into dat_dirsync_sources (source_id, customer_id, source) values (nextval('seq_source_id'), ?, ?)");
      ps.setInt(1, cust.getCustID());
      ps.setString(2, sourceName);
      ps.executeUpdate();
      ps.close();
      ps = null;

      // and grab the sourceID
      ps = c.prepareStatement("select currval('seq_source_id')");
      rs = ps.executeQuery();
      rs.next();
      return rs.getInt(1);
    } finally {
      if (rs != null) {
        rs.close();
      }
      if (ps != null) {
        ps.close();
      }
    }
  }
Esempio n. 7
0
  public void purgeLogEvents(final List<SearchConstraint> constraints, final int daysToKeep) {
    blockHereIfBadNfs();

    StringBuffer sql =
        new StringBuffer(
            "select u.mail_directory || '/logs/' || e.event_id || '.log' "
                + "from dat_client_log_events e, dat_user_account u "
                + "where event_time < current_timestamp - (? || ' days')::interval "
                + "and e.has_log_file != 0 "
                + "and e.user_id = u.object_id");
    appendWhereConstraints(sql, constraints, s_propToColumnMap);
    m_logCategory.info("Purge event logs query is\n" + sql);

    StringBuffer sql2 =
        new StringBuffer(
            "delete from dat_client_log_events "
                + "where event_time < current_timestamp - (? || ' days')::interval");
    if (!constraints.isEmpty()) {
      sql2.append(" and event_id in (select e.event_id from dat_client_log_events e ");
      appendWhere(sql2, constraints, s_propToColumnMap);
      sql2.append(")");
    }
    m_logCategory.info("Purge event logs query is\n" + sql2);

    try {
      Connection c = m_txManager.getConnection();
      PreparedStatement stmt = null;
      ResultSet rs = null;
      boolean needsRollback = true;
      try {
        stmt = c.prepareStatement(sql.toString());
        stmt.setInt(1, daysToKeep);
        rs = stmt.executeQuery();
        while (rs.next()) {
          File logFile = new File(getNfsRoot(), rs.getString(1));
          // nfs usage, but DB transaction takes no locks.
          if (logFile.exists()) {
            boolean ok = logFile.delete();
            if (!ok) {
              m_logger
                  .log("Unable to delete")
                  .param(LoggingConsts.FILENAME, logFile.getAbsolutePath())
                  .warn();
            }
          } // file exists.
        } // each row

        // Below, no nfs usage occurs.  We can use the same connection.
        // the 'sql delete' may use DB locks.
        stmt = c.prepareStatement(sql2.toString());
        stmt.setInt(1, daysToKeep);
        stmt.executeUpdate();
        c.commit();
        needsRollback = false;
      } finally {
        DbUtils.safeClose(rs);
        DbUtils.safeClose(stmt);
        if (needsRollback) {
          c.rollback();
        }
        m_txManager.returnConnection(c);
      }
    } catch (Exception ex) {
      m_logger.log("Error in purgeLogEvents").warn(ex);
    }
  }
Esempio n. 8
0
  public void setKeyCreatingIfNeeded(
      List<? extends IClientInfo> clients,
      List<IClientInfo> clientsNeedingSignalUpdate,
      Connection c)
      throws SQLException {

    PreparedStatement select = null;

    // Create lazily.
    PreparedStatement update = null;

    try {

      select = c.prepareStatement("SELECT secure_hash_key FROM dat_user_account WHERE object_id=?");

      for (IClientInfo client : clients) {
        int userId = client.getUserId();
        // ensure dat_user_account.secure_hash_key is filled.
        select.setInt(1, userId);
        ResultSet rs = select.executeQuery();
        if (!rs.next()) {
          LogMessageGen lmg = new LogMessageGen();
          lmg.setSubject("dat_user_account row not found");
          lmg.param(LoggingConsts.USER_ID, userId);
          // possible that the user simply disappeared by the time we got here.
          m_logCategory.warn(lmg.toString());
          continue;
        }
        boolean firstTimeCreate = false;
        byte[] key = rs.getBytes(1);
        if (key == null || key.length == 0) {
          if (update == null) {
            update =
                c.prepareStatement(
                    "UPDATE dat_user_account SET secure_hash_key=? WHERE object_id=?");
          }
          key = createNewRandomKey();
          update.setBytes(1, key);
          update.setInt(2, userId);
          int ct = update.executeUpdate();
          if (ct != 1) {
            LogMessageGen lmg = new LogMessageGen();
            lmg.setSubject("Unable to update dat_user_account.secure_hash_key");
            lmg.param(LoggingConsts.USER_ID, userId);
            m_logCategory.error(lmg.toString());
            continue;
          } else {
            firstTimeCreate = true;
          }
        } // no existing key.
        client.getHashSupport().setHashKey(key);
        if (firstTimeCreate) {
          if (clientsNeedingSignalUpdate != null) {
            // EMSDEV-7854.  Don't actually do
            // updateSignalFiles(client) right here; we want to avoid nfs usage in the middle of a
            // db
            // transaction.
            clientsNeedingSignalUpdate.add(client);
          }
        }
      } // each client.
    } finally {
      DbUtils.safeClose(select);
      DbUtils.safeClose(update);
    }
  }
Esempio n. 9
0
  /** Writes user state information to the shared filesystem */
  private void updateStateCache(
      Customer customer, int transitionId, CustomerState newState, Connection c)
      throws SQLException {
    PreparedStatement pst = null;
    ResultSet rs = null;

    try {
      pst =
          c.prepareStatement(
              "SELECT u.secure_hash_key, u.object_id, u.user_state, u.is_deleted, u.last_activation_id "
                  + "FROM dat_user_account u, dat_transition_users tr "
                  + "WHERE u.object_id = tr.user_id AND u.customer_id=? "
                  + "AND tr.transition_id=?;");

      // The state files are used by Outlook, BB, and maybe future clients.
      pst.setInt(1, customer.getCustID());
      pst.setInt(2, transitionId);
      rs = pst.executeQuery();
      while (rs.next()) {
        int i = 0;
        byte[] key = rs.getBytes(++i);
        int userId = rs.getInt(++i);
        CustomerState state = CustomerState.fromInt(rs.getInt(++i));
        int isDeletedInt = rs.getInt(++i);
        int lastActivationId = rs.getInt(++i);

        // If the user is marked deleted but has a key, we'll try cleaning
        // up state files.
        boolean isDeleted = isDeletedInt != IUserManager.USER_NOT_DELETED;
        if (key == null || key.length == 0) {
          LogMessageGen lmg = new LogMessageGen();
          lmg.setSubject("dat_user_account.secure_hash_key not set");
          lmg.param(LoggingConsts.USER_ID, userId);
          m_logCategory.info(lmg.toString());

          // Without a key, we can't determine the signal filenames
          // so no cleanup is possible.
          continue;
        }

        ClientHashSupport hash = null;
        hash = new ClientHashSupport();
        hash.setCustomerId(customer.getCustID());
        hash.setUserId(userId);
        hash.setHashKey(key);
        hash.setLastActivationId(lastActivationId);

        if (m_logCategory.isInfoEnabled()) {
          LogMessageGen lmg = new LogMessageGen();
          lmg.setSubject("Updating signal files");
          lmg.param(LoggingConsts.USER_ID, userId);
          m_logCategory.info(lmg.toString());
        }
        // wrt EMSDEV-7854 nfs calls and database transactions.
        // Above is only doing a select; no locks are taken and this
        // method is private.
        updateSignalFiles(hash, state, isDeleted);
      } // each row.
    } finally {
      DbUtils.safeClose(rs);
      DbUtils.safeClose(pst);
    }
  }
Esempio n. 10
0
  private static Map saveServers(
      TopologyData3 td, int sourceID, Map adminGroupIDs, Map routingGroupIDs, Connection c)
      throws SQLException {
    PreparedStatement insert = null;
    PreparedStatement update = null;
    try {
      insert =
          c.prepareStatement(
              "insert into dat_customer_servers (server_id, source_id, admin_group_id, routing_group_id, internal_name, display_name, sink_capable, sink_enabled, server_role, server_version, cloud_service_id) "
                  + "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
      update =
          c.prepareStatement(
              "update dat_customer_servers "
                  + "set source_id = ?, admin_group_id = ?, routing_group_id = ?, internal_name = ?, "
                  + "    display_name = ?, sink_capable = ?, server_role = ?, server_version = ?, is_deleted = false, purge_time = NULL, cloud_service_id = ? "
                  + "where server_id = ?");

      Map serverIDs = new HashMap(adminGroupIDs.size() * 8 + 1);
      ExchSite3[] adminGroups = td.getAdminGroups();
      ExchSite3[] routingGroups = td.getRoutingGroups();
      for (int i = 0; i < adminGroups.length; i++) {
        ExchSite3 adminGroup = adminGroups[i];
        int adminGroupID = ((Number) adminGroupIDs.get(adminGroup.getInternalName())).intValue();
        ExchServer3[] servers = adminGroup.getServers();
        for (int j = 0; j < servers.length; j++) {
          ExchServer3 server = servers[j];

          int serverID = queryLookupServerId(sourceID, server.getInternalName(), c);
          int routingGroupID =
              findRoutingGroupID(server.getInternalName(), routingGroups, routingGroupIDs);

          // for non-sinkable servers, attempt to parse cloud service from version field
          CloudService cloudService = CloudService.NONE;
          if (!server.isSinkCapable()) {
            Matcher matcher = SERVER_VERSION_PATTERN.matcher(server.getServerVersion());

            if (matcher.matches()) {
              cloudService = CloudService.valueOfCaseInsensitive(matcher.group(1), cloudService);
            }
          }

          if (serverID == 0) {
            // if we couldn't find an existing serverID, grab the next one from the sequence
            serverID = getNextFromSequence("seq_server_id", c);
            serverIDs.put(server.getInternalName(), serverID);
            insert.setInt(1, serverID);
            insert.setInt(2, sourceID);
            insert.setInt(3, adminGroupID);
            insert.setInt(4, routingGroupID);
            insert.setString(
                5,
                DirectoryUtils.truncateString(
                    server.getInternalName(), DB_SERVER_INTERNALNAME_LENGTH));
            insert.setString(
                6,
                DirectoryUtils.truncateString(
                    server.getDisplayName(), DB_SERVER_DISPLAYNAME_LENGTH));
            insert.setInt(7, server.isSinkCapable() ? 1 : 0);
            insert.setInt(
                8,
                cloudService.getId() <= CloudService.ONPREMISES.getId()
                    ? 1
                    : 0); // by default, all non-cloud servers are sink-enabled (so if they become
                          // e2k servers, they're enabled)
            insert.setInt(9, server.getServerRole());
            insert.setString(
                10,
                DirectoryUtils.truncateString(server.getServerVersion(), DB_SERVER_VERSION_LENGTH));
            insert.setInt(11, cloudService.getId());
            insert.executeUpdate();
          } else {
            // if there is an existing serverID, update it (preserve value of sink_enabled)
            serverIDs.put(server.getInternalName(), serverID);
            update.setInt(1, sourceID);
            update.setInt(2, adminGroupID);
            update.setInt(3, routingGroupID);
            update.setString(
                4,
                DirectoryUtils.truncateString(
                    server.getInternalName(), DB_SERVER_INTERNALNAME_LENGTH));
            update.setString(
                5,
                DirectoryUtils.truncateString(
                    server.getDisplayName(), DB_SERVER_DISPLAYNAME_LENGTH));
            update.setInt(6, server.isSinkCapable() ? 1 : 0);
            update.setInt(7, server.getServerRole());
            update.setString(
                8,
                DirectoryUtils.truncateString(server.getServerVersion(), DB_SERVER_VERSION_LENGTH));
            update.setInt(9, cloudService.getId());
            update.setInt(10, serverID);
            update.executeUpdate();
          }
        }
      }
      return serverIDs;
    } finally {
      if (update != null) {
        update.close();
      }
      if (insert != null) {
        insert.close();
      }
    }
  }
Esempio n. 11
0
  private static Map saveStores(
      TopologyData3 td, int sourceID, ExchSite3[] sites, Map serverIDs, Connection c)
      throws SQLException {
    PreparedStatement insert = null;
    PreparedStatement delete = null;
    try {
      insert =
          c.prepareStatement(
              "insert into dat_customer_stores (store_id, source_id, server_id, internal_name, display_name) "
                  + "values (?, ?, ?, ?, ?)");
      delete = c.prepareStatement("delete from dat_customer_stores where store_id = ?");

      Map storeIDs = new HashMap(101);

      for (int i = 0; i < sites.length; i++) {
        ExchSite3 site = sites[i];
        ExchServer3[] servers = site.getServers();
        for (int j = 0; j < servers.length; j++) {
          ExchServer3 server = servers[j];
          int serverID = ((Number) serverIDs.get(server.getInternalName())).intValue();
          ExchStore2[] stores = server.getStores();
          for (int k = 0; k < stores.length; k++) {
            ExchStore2 store = stores[k];

            int storeId = queryLookupStoreId(sourceID, store.getInternalName(), c);

            if (storeId == 0) {
              // if we couldn't find an existing storeID, grab the next one from the sequence
              storeId = getNextFromSequence("seq_store_id", c);
            } else {
              // if there is an existing storeID, delete it so we can insert the changes
              delete.setInt(1, storeId);
              int deleted = delete.executeUpdate();
              if (deleted != 1) {
                throw new SQLException("Delete for store " + storeId + " returned " + deleted);
              }
            }

            storeIDs.put(store.getInternalName(), storeId);
            insert.setInt(1, storeId);
            insert.setInt(2, sourceID);
            insert.setInt(3, serverID);
            insert.setString(
                4,
                DirectoryUtils.truncateString(
                    store.getInternalName(), DB_STORE_INTERNALNAME_LENGTH));
            insert.setString(
                5,
                DirectoryUtils.truncateString(store.getDisplayName(), DB_STORE_DISPLAYNAME_LENGTH));
            insert.executeUpdate();
          }
        }
      }
      return storeIDs;
    } finally {
      if (delete != null) {
        delete.close();
      }
      if (insert != null) {
        insert.close();
      }
    }
  }
Esempio n. 12
0
  private static void deleteTopologyData(
      Connection c, int sourceID, Collection groupIDs, Collection serverIDs, Collection storeIDs)
      throws SQLException {
    PreparedStatement ps = null;
    int purgeInterval =
        ManagementContainer.getInstance()
            .getConfiguration()
            .getIntProperty(PURGE_DAY_INTERVAL_PROP, DEFAULT_PURGE_DAY_INTERVAL);
    List<Integer> deletedStores = null;
    List<Integer> deletedServers = null;
    List<Integer> deletedSites = null;

    try {
      // First determine what stores to delete
      String strQuery =
          "select store_id from dat_customer_stores where exists ( "
              + "  select * from dat_customer_servers svr "
              + "    where svr.server_id = dat_customer_stores.server_id and "
              + "      exists ( "
              + "        select * from dat_customer_sites s "
              + "        where s.site_id = svr.admin_group_id and source_id = ? "
              + "      ) "
              + "  ) and "
              + "  store_id not in "
              + QueryUtils.literal(storeIDs)
              + " and is_deleted = false";
      ps = c.prepareStatement(strQuery);

      ps.setInt(1, sourceID);
      ResultSet rs = ps.executeQuery();
      // Convert the result set to a list of store id's to be deleted
      while (rs.next()) {
        if (deletedStores == null) deletedStores = new ArrayList<Integer>();

        deletedStores.add(rs.getInt(1));
      }

      ps.close();

      if (deletedStores != null) // Check to see if we have anything to delete
      {
        strQuery =
            "update dat_customer_stores set is_deleted = true, purge_time = current_timestamp + '"
                + purgeInterval
                + " days'::interval"
                + "    where store_id in "
                + QueryUtils.literal(deletedStores);
        ps = c.prepareStatement(strQuery);
        ps.executeUpdate();
        ps.close();

        // Log what we marked for deletion
        logPoliciesUsingRemovedTopologyObjs(
            "Marking as deleted", "store", deletedStores, IUserManager.PROP_STORE_ID, c);
      }

      ps = null;

      // delete the servers
      // First determine what servers to delete
      strQuery =
          "select server_id from dat_customer_servers "
              + "where "
              + "  exists ( "
              + "    select * from dat_customer_sites s "
              + "    where s.site_id = dat_customer_servers.admin_group_id and source_id = ? "
              + "  ) and "
              + "  server_id not in "
              + QueryUtils.literal(serverIDs)
              + " and is_deleted = false";
      ps = c.prepareStatement(strQuery);

      ps.setInt(1, sourceID);
      rs = ps.executeQuery();
      // Convert the result set to a list of server id's to be deleted
      while (rs.next()) {
        if (deletedServers == null) deletedServers = new ArrayList<Integer>();

        deletedServers.add(rs.getInt(1));
      }

      ps.close();

      if (deletedServers != null) // Check to see if we have anything to delete
      {
        strQuery =
            "update dat_customer_servers set is_deleted = true, purge_time = current_timestamp + '"
                + purgeInterval
                + " days'::interval"
                + "    where server_id in "
                + QueryUtils.literal(deletedServers);
        ps = c.prepareStatement(strQuery);
        ps.executeUpdate();
        ps.close();

        // Log what we marked for deletion
        logPoliciesUsingRemovedTopologyObjs(
            "Marking as deleted", "server", deletedServers, IUserManager.PROP_SERVER_ID, c);
      }

      ps = null;

      // delete the sites
      // First determine what sites to delete
      strQuery =
          "select site_id from dat_customer_sites "
              + "where "
              + "  source_id = ? and is_deleted = false and "
              + "  site_id not in "
              + QueryUtils.literal(groupIDs);
      ps = c.prepareStatement(strQuery);

      ps.setInt(1, sourceID);
      rs = ps.executeQuery();
      // Convert the result set to a list of site id's to be deleted
      while (rs.next()) {
        if (deletedSites == null) deletedSites = new ArrayList<Integer>();

        deletedSites.add(rs.getInt(1));
      }

      ps.close();

      if (deletedSites != null) // Check to see if we have anything to delete
      {
        strQuery =
            "update dat_customer_sites set is_deleted = true, purge_time = current_timestamp + '"
                + purgeInterval
                + " days'::interval"
                + "    where site_id in "
                + QueryUtils.literal(deletedSites);
        ps = c.prepareStatement(strQuery);
        ps.executeUpdate();
        ps.close();

        // Log what we marked for deletion
        logPoliciesUsingRemovedTopologyObjs(
            "Marking as deleted", "site", deletedSites, IUserManager.PROP_ROUTING_GROUP_ID, c);
      }

      ps = null;
    } finally {
      if (ps != null) {
        ps.close();
      }
    }
  }