protected static void logPoliciesUsingRemovedTopologyObjs( String operation, String topologyType, Collection<Integer> deletedNodes, final String constraintName, Connection c) throws SQLException { String query = "select sets.customer_id, p.policy_name, pc.policy_id, sets.user_set_id, sets.constraint_value " + " from policies p, policy_criteria pc, dat_saved_user_sets sets " + " where sets.constraint_name = " + QueryUtils.literal(constraintName) + " and " + QueryUtils.dbCast("sets.constraint_value", QueryUtils.CastType.INTEGER) + " in " + QueryUtils.literal(deletedNodes) + " and sets.user_set_id = pc.userset_id and pc.policy_id = p.policy_id"; s_logger.debug(query); Statement s = null; ResultSet rs = null; try { s = c.createStatement(); rs = s.executeQuery(query); while (rs.next()) { int custID = rs.getInt(1); String policyName = rs.getString(2); int policyID = rs.getInt(3); int userSetID = rs.getInt(4); int nodeID = rs.getInt(5); StringBuilder bld = new StringBuilder(operation) .append(" ") .append(topologyType) .append(" ") .append(nodeID) .append(" which is refered to by userset ") .append(userSetID) .append(" in policy ") .append(policyName) .append(" with id ") .append(policyID) .append(" for customer ") .append(custID); s_logger.warn(bld.toString()); } } finally { if (s != null) s.close(); if (rs != null) rs.close(); } }
private static Set<Integer> purgeDeletedSites(Connection c) throws SQLException { Statement stmt = null; ResultSet rs = null; try { // find server objects that need to be purged List<Integer> purgeSiteIds = new ArrayList<Integer>(); Set<Integer> custIds = new HashSet<Integer>(); String query = "select site.site_id, src.customer_id from dat_dirsync_sources src, dat_customer_sites site where site.is_deleted = true and current_timestamp > site.purge_time and src.source_id = site.source_id"; stmt = c.createStatement(); s_logger.debug(query); rs = stmt.executeQuery(query); while (rs.next()) { purgeSiteIds.add(rs.getInt(1)); custIds.add(rs.getInt(2)); } if (CollectionsUtils.isNullOrEmpty(purgeSiteIds)) { return null; } int totalDeletes = 0; for (List<Integer> ids : ChunkedListIterator.iterable(purgeSiteIds, BATCH_SIZE)) { logPoliciesUsingRemovedTopologyObjs( "Purging ", "site", ids, IUserManager.PROP_ROUTING_GROUP_ID, c); // purge servers String idList = QueryUtils.literal(ids); query = "delete from dat_customer_sites where site_id in " + idList; s_logger.debug(query); totalDeletes += stmt.executeUpdate(query); // don't purge constraints because the scope of the constraints will expand. Worst case, // when all constraints are deleted, a saved user set will have global scope and this // is definitely not what the customer wants. } s_logger.info("Purged " + totalDeletes + " sites for " + custIds.size() + " customers"); return custIds; } finally { if (stmt != null) { stmt.close(); } if (rs != null) { rs.close(); } } }
private static void deleteTopologyData( Connection c, int sourceID, Collection groupIDs, Collection serverIDs, Collection storeIDs) throws SQLException { PreparedStatement ps = null; int purgeInterval = ManagementContainer.getInstance() .getConfiguration() .getIntProperty(PURGE_DAY_INTERVAL_PROP, DEFAULT_PURGE_DAY_INTERVAL); List<Integer> deletedStores = null; List<Integer> deletedServers = null; List<Integer> deletedSites = null; try { // First determine what stores to delete String strQuery = "select store_id from dat_customer_stores where exists ( " + " select * from dat_customer_servers svr " + " where svr.server_id = dat_customer_stores.server_id and " + " exists ( " + " select * from dat_customer_sites s " + " where s.site_id = svr.admin_group_id and source_id = ? " + " ) " + " ) and " + " store_id not in " + QueryUtils.literal(storeIDs) + " and is_deleted = false"; ps = c.prepareStatement(strQuery); ps.setInt(1, sourceID); ResultSet rs = ps.executeQuery(); // Convert the result set to a list of store id's to be deleted while (rs.next()) { if (deletedStores == null) deletedStores = new ArrayList<Integer>(); deletedStores.add(rs.getInt(1)); } ps.close(); if (deletedStores != null) // Check to see if we have anything to delete { strQuery = "update dat_customer_stores set is_deleted = true, purge_time = current_timestamp + '" + purgeInterval + " days'::interval" + " where store_id in " + QueryUtils.literal(deletedStores); ps = c.prepareStatement(strQuery); ps.executeUpdate(); ps.close(); // Log what we marked for deletion logPoliciesUsingRemovedTopologyObjs( "Marking as deleted", "store", deletedStores, IUserManager.PROP_STORE_ID, c); } ps = null; // delete the servers // First determine what servers to delete strQuery = "select server_id from dat_customer_servers " + "where " + " exists ( " + " select * from dat_customer_sites s " + " where s.site_id = dat_customer_servers.admin_group_id and source_id = ? " + " ) and " + " server_id not in " + QueryUtils.literal(serverIDs) + " and is_deleted = false"; ps = c.prepareStatement(strQuery); ps.setInt(1, sourceID); rs = ps.executeQuery(); // Convert the result set to a list of server id's to be deleted while (rs.next()) { if (deletedServers == null) deletedServers = new ArrayList<Integer>(); deletedServers.add(rs.getInt(1)); } ps.close(); if (deletedServers != null) // Check to see if we have anything to delete { strQuery = "update dat_customer_servers set is_deleted = true, purge_time = current_timestamp + '" + purgeInterval + " days'::interval" + " where server_id in " + QueryUtils.literal(deletedServers); ps = c.prepareStatement(strQuery); ps.executeUpdate(); ps.close(); // Log what we marked for deletion logPoliciesUsingRemovedTopologyObjs( "Marking as deleted", "server", deletedServers, IUserManager.PROP_SERVER_ID, c); } ps = null; // delete the sites // First determine what sites to delete strQuery = "select site_id from dat_customer_sites " + "where " + " source_id = ? and is_deleted = false and " + " site_id not in " + QueryUtils.literal(groupIDs); ps = c.prepareStatement(strQuery); ps.setInt(1, sourceID); rs = ps.executeQuery(); // Convert the result set to a list of site id's to be deleted while (rs.next()) { if (deletedSites == null) deletedSites = new ArrayList<Integer>(); deletedSites.add(rs.getInt(1)); } ps.close(); if (deletedSites != null) // Check to see if we have anything to delete { strQuery = "update dat_customer_sites set is_deleted = true, purge_time = current_timestamp + '" + purgeInterval + " days'::interval" + " where site_id in " + QueryUtils.literal(deletedSites); ps = c.prepareStatement(strQuery); ps.executeUpdate(); ps.close(); // Log what we marked for deletion logPoliciesUsingRemovedTopologyObjs( "Marking as deleted", "site", deletedSites, IUserManager.PROP_ROUTING_GROUP_ID, c); } ps = null; } finally { if (ps != null) { ps.close(); } } }
private List getNextSequenceImpl(TypeDescriptor typeDescriptor, Session session) { Field[] pkFields = typeDescriptor.getPkFields(); Assert.condition( 1 == pkFields.length, "Automatic PK values are only supported for types with a single PK field."); String createPKStmt = dbDescriptor.getCreatePKStatement( sqlUtils.getSchemaName(), typeDescriptor.getPkSequence(), this.sequenceBatchSize); Field field = pkFields[0]; if (session.isUsingPreparedStatements(typeDescriptor.getType())) { PreparedStatement pkStatement = null; try { Connection connection = session.getConnection(); PreparedStatement result; try { result = connection.prepareStatement(createPKStmt); } catch (SQLException x) { throw new InternalException(x); } pkStatement = result; ResultSet pkQuery = pkStatement.executeQuery(); List newIds = new LinkedList(); while (pkQuery.next()) { newIds.add( DmlManager.getJavaValue( field.getType(), typeDescriptor.getPersistentField(field).getLength(), pkQuery, 1, true, false)); } return newIds; } catch (SQLException e) { throw new InternalException(e); } finally { QueryUtils.closeStatement(pkStatement); } } else { Statement pkStmt = null; try { Connection connection = session.getConnection(); pkStmt = connection.createStatement(); ResultSet pkQuery = pkStmt.executeQuery(createPKStmt); List newIds = new LinkedList(); while (pkQuery.next()) { newIds.add( DmlManager.getJavaValue( field.getType(), typeDescriptor.getPersistentField(field).getLength(), pkQuery, 1, true, false)); } return newIds; } catch (SQLException e) { throw new InternalException(e); } finally { QueryUtils.closeStatement(pkStmt); } } }