@Test public void testReadDomainsAndGuids() throws Exception { CustomerDAO dao = new CustomerDAO(); IConfiguration configuration = EasyMock.createStrictMock(IConfiguration.class); dao.setConfiguration(configuration); ResultSet resultSet = EasyMock.createStrictMock(ResultSet.class); Set<String> domains = new HashSet<String>(); Set<GlobalIdentifier> guids = new HashSet<GlobalIdentifier>(); int custId = 34; int cloudService = 2; int replicationZone = 453; int cloudService2 = 1; int replicationZone2 = 13; int cloudService3 = 3; // first exec of loop EasyMock.expect(resultSet.getString(10)).andReturn("domain123"); EasyMock.expect(resultSet.getString(11)).andReturn("guid123"); EasyMock.expect(resultSet.getInt(12)).andReturn(cloudService); EasyMock.expect(resultSet.getInt(13)).andReturn(replicationZone); EasyMock.expect(resultSet.next()).andReturn(true); EasyMock.expect(resultSet.getInt(1)).andReturn(custId); // second exec of loop EasyMock.expect(resultSet.getString(10)).andReturn("domain456"); EasyMock.expect(resultSet.getString(11)).andReturn("guid456"); EasyMock.expect(resultSet.getInt(12)).andReturn(cloudService2); EasyMock.expect(resultSet.getInt(13)).andReturn(replicationZone2); EasyMock.expect(resultSet.next()).andReturn(true); EasyMock.expect(resultSet.getInt(1)).andReturn(custId); // third exec of loop (guid not valid with no cloud service) EasyMock.expect(resultSet.getString(10)).andReturn("domain456"); EasyMock.expect(resultSet.getString(11)).andReturn("guid789"); EasyMock.expect(resultSet.getInt(12)).andReturn(cloudService3); EasyMock.expect(resultSet.next()).andReturn(true); EasyMock.expect(resultSet.getInt(1)).andReturn(custId + 1); // ends loop with mismatched custid EasyMock.replay(resultSet); assertTrue( "Should have another item even.", dao.readDomainsAndGuids(resultSet, custId, domains, guids)); EasyMock.verify(resultSet); assertEquals("Should have 2 domains.", 2, domains.size()); assertTrue("Domain123 not found.", domains.contains("domain123")); assertTrue("Domain456 not found.", domains.contains("domain456")); assertEquals("Should have 2 guids.", 2, guids.size()); for (GlobalIdentifier guid : guids) { if (guid.getGuid().equals("guid123")) { assertEquals("Wrong cloud service in guid123", CloudService.GOOGLE, guid.getService()); assertEquals("Wrong replication zone.", replicationZone, guid.getReplicationZone()); } else { assertEquals("Wrong cloud service in guid456", CloudService.OFFICE365, guid.getService()); assertEquals("Wrong replication zone.", replicationZone2, guid.getReplicationZone()); } } }
public List<ClientEventEntry> findLogEvents( List<SearchConstraint> constraints, List<String> orderBy, int offset, int limit, Connection c) throws SQLException { StringBuffer sql = new StringBuffer( "select e.event_id, e.customer_id, e.user_id, e.event_time, e.description, e.has_log_file from dat_client_log_events e "); appendWhere(sql, constraints, s_propToColumnMap); appendOrderBy(sql, orderBy, "e.event_id desc", s_propToColumnMap); appendLimits(sql, offset, limit); Statement stmt = null; ResultSet rs = null; try { stmt = c.createStatement(); rs = stmt.executeQuery(sql.toString()); List<ClientEventEntry> results = new ArrayList<ClientEventEntry>(); while (rs.next()) { ClientEventEntry entry = new ClientEventEntry( rs.getInt(1), rs.getInt(2), rs.getInt(3), resolveDate(rs.getTimestamp(4)), rs.getString(5), rs.getInt(6) != 0); results.add(entry); } return results; } finally { DbUtils.safeClose(rs); DbUtils.safeClose(stmt); } }
@SuppressWarnings("unchecked") @Test public void testFindCustomersWithConnection() throws Exception { CustomerDAO dao = EasyMock.createMockBuilder(CustomerDAO.class) .addMockedMethod("readNextCustomer") .addMockedMethod("getCustomerQuery") .createStrictMock(); ResultSet resultSet = EasyMock.createStrictMock(ResultSet.class); Connection connection = EasyMock.createStrictMock(Connection.class); Statement statement = EasyMock.createStrictMock(Statement.class); List<SearchConstraint> constraints = new LinkedList<SearchConstraint>(); EasyMock.expect(dao.getCustomerQuery(constraints)).andReturn("aQuery"); EasyMock.expect(connection.createStatement()).andReturn(statement); EasyMock.expect(statement.executeQuery("aQuery")).andReturn(resultSet); EasyMock.expect(resultSet.next()).andReturn(true); EasyMock.expect(dao.readNextCustomer(EasyMock.eq(resultSet), EasyMock.isA(List.class))) .andReturn(true); EasyMock.expect(dao.readNextCustomer(EasyMock.eq(resultSet), EasyMock.isA(List.class))) .andReturn(true); EasyMock.expect(dao.readNextCustomer(EasyMock.eq(resultSet), EasyMock.isA(List.class))) .andReturn(false); resultSet.close(); EasyMock.expectLastCall(); statement.close(); }
/** * Returns the siteId for the site with the given source, name, and type. If no such site is * found, this method returns 0; */ private static int queryLookupSiteId( int sourceID, String siteInternalName, int siteType, Connection c) throws SQLException { PreparedStatement ps = null; ResultSet rs = null; try { ps = c.prepareStatement( "select site_id from dat_customer_sites " + "where source_id = ? and internal_name = ? and site_type = ?"); int siteID = 0; ps.setInt(1, sourceID); ps.setString(2, siteInternalName); ps.setInt(3, siteType); rs = ps.executeQuery(); if (rs.next()) { siteID = rs.getInt(1); } return siteID; } finally { if (rs != null) { rs.close(); } if (ps != null) { ps.close(); } } }
private static int queryLookupServerId(int sourceID, String serverInternalName, Connection c) throws SQLException { PreparedStatement idSelect = null; ResultSet rs = null; try { int serverId = 0; idSelect = c.prepareStatement( "select server_id from dat_customer_servers " + "where source_id = ? and internal_name = ?"); idSelect.setInt(1, sourceID); idSelect.setString(2, serverInternalName); rs = idSelect.executeQuery(); if (rs.next()) { serverId = rs.getInt(1); } return serverId; } finally { if (rs != null) { rs.close(); } if (idSelect != null) { idSelect.close(); } } }
private static int getSourceID(Customer cust, String sourceName, Connection c) throws SQLException { PreparedStatement ps = null; ResultSet rs = null; try { ps = c.prepareStatement( "select source_id from dat_dirsync_sources where customer_id = ? and source = ?"); ps.setInt(1, cust.getCustID()); ps.setString(2, sourceName); rs = ps.executeQuery(); if (rs.next()) { return rs.getInt(1); } rs.close(); rs = null; ps.close(); ps = null; // doesn't already exist, so create it with nulls for scheduling stuff ps = c.prepareStatement( "insert into dat_dirsync_sources (source_id, customer_id, source) values (nextval('seq_source_id'), ?, ?)"); ps.setInt(1, cust.getCustID()); ps.setString(2, sourceName); ps.executeUpdate(); ps.close(); ps = null; // and grab the sourceID ps = c.prepareStatement("select currval('seq_source_id')"); rs = ps.executeQuery(); rs.next(); return rs.getInt(1); } finally { if (rs != null) { rs.close(); } if (ps != null) { ps.close(); } } }
/** Return the user's secure_hash_key. If one wasn't found create one on the fly. */ private SecureHashKeyResult getSecureHashKey(UserAccount user, Connection c) throws SQLException { PreparedStatement select = null; // Create lazily. PreparedStatement update = null; int userId = user.getUserID(); boolean justCreated = false; byte[] key = null; try { // TODO: consider having UserManager returning secure_hash_key. // TODO: We have similar logic in several places for creating secure_hash_key just-in-time. // D.R.Y. this out. Sorry I couldn't resist using this cliche :) select = c.prepareStatement("SELECT secure_hash_key FROM dat_user_account WHERE object_id=?"); select.setInt(1, userId); ResultSet rs = select.executeQuery(); if (!rs.next()) { LogMessageGen lmg = new LogMessageGen(); lmg.setSubject("dat_user_account row not found"); lmg.param(LoggingConsts.USER_ID, userId); // possible that the user simply disappeared by the time we got here. m_logCategory.warn(lmg.toString()); } else { key = rs.getBytes(1); if (key == null || key.length == 0) { // hash key not found; create one on the fly. update = c.prepareStatement("UPDATE dat_user_account SET secure_hash_key=? WHERE object_id=?"); key = createNewRandomKey(); update.setBytes(1, key); update.setInt(2, userId); int ct = update.executeUpdate(); if (ct != 1) { LogMessageGen lmg = new LogMessageGen(); lmg.setSubject("Unable to update dat_user_account.secure_hash_key"); lmg.param(LoggingConsts.USER_ID, userId); m_logCategory.error(lmg.toString()); } else { justCreated = true; } } // needed to set key. } // user found } finally { DbUtils.safeClose(select); DbUtils.safeClose(update); } return new SecureHashKeyResult(key, justCreated); }
protected static void logPoliciesUsingRemovedTopologyObjs( String operation, String topologyType, Collection<Integer> deletedNodes, final String constraintName, Connection c) throws SQLException { String query = "select sets.customer_id, p.policy_name, pc.policy_id, sets.user_set_id, sets.constraint_value " + " from policies p, policy_criteria pc, dat_saved_user_sets sets " + " where sets.constraint_name = " + QueryUtils.literal(constraintName) + " and " + QueryUtils.dbCast("sets.constraint_value", QueryUtils.CastType.INTEGER) + " in " + QueryUtils.literal(deletedNodes) + " and sets.user_set_id = pc.userset_id and pc.policy_id = p.policy_id"; s_logger.debug(query); Statement s = null; ResultSet rs = null; try { s = c.createStatement(); rs = s.executeQuery(query); while (rs.next()) { int custID = rs.getInt(1); String policyName = rs.getString(2); int policyID = rs.getInt(3); int userSetID = rs.getInt(4); int nodeID = rs.getInt(5); StringBuilder bld = new StringBuilder(operation) .append(" ") .append(topologyType) .append(" ") .append(nodeID) .append(" which is refered to by userset ") .append(userSetID) .append(" in policy ") .append(policyName) .append(" with id ") .append(policyID) .append(" for customer ") .append(custID); s_logger.warn(bld.toString()); } } finally { if (s != null) s.close(); if (rs != null) rs.close(); } }
private static Set<Integer> purgeDeletedSites(Connection c) throws SQLException { Statement stmt = null; ResultSet rs = null; try { // find server objects that need to be purged List<Integer> purgeSiteIds = new ArrayList<Integer>(); Set<Integer> custIds = new HashSet<Integer>(); String query = "select site.site_id, src.customer_id from dat_dirsync_sources src, dat_customer_sites site where site.is_deleted = true and current_timestamp > site.purge_time and src.source_id = site.source_id"; stmt = c.createStatement(); s_logger.debug(query); rs = stmt.executeQuery(query); while (rs.next()) { purgeSiteIds.add(rs.getInt(1)); custIds.add(rs.getInt(2)); } if (CollectionsUtils.isNullOrEmpty(purgeSiteIds)) { return null; } int totalDeletes = 0; for (List<Integer> ids : ChunkedListIterator.iterable(purgeSiteIds, BATCH_SIZE)) { logPoliciesUsingRemovedTopologyObjs( "Purging ", "site", ids, IUserManager.PROP_ROUTING_GROUP_ID, c); // purge servers String idList = QueryUtils.literal(ids); query = "delete from dat_customer_sites where site_id in " + idList; s_logger.debug(query); totalDeletes += stmt.executeUpdate(query); // don't purge constraints because the scope of the constraints will expand. Worst case, // when all constraints are deleted, a saved user set will have global scope and this // is definitely not what the customer wants. } s_logger.info("Purged " + totalDeletes + " sites for " + custIds.size() + " customers"); return custIds; } finally { if (stmt != null) { stmt.close(); } if (rs != null) { rs.close(); } } }
/** * Returns the next value from the named sequence. Throws SQLException if the query fails or * doesn't return any rows. The query is <code>select nextval('<i>seqName</i>')</code>. */ private static int getNextFromSequence(String seqName, Connection c) throws SQLException { Statement st = null; ResultSet rs = null; try { st = c.createStatement(); rs = st.executeQuery("select nextval('" + seqName + "')"); if (rs.next()) { return rs.getInt(1); } else { throw new SQLException("Couldn't get new siteID from sequence '" + seqName + "'."); } } finally { if (rs != null) { rs.close(); } if (st != null) { st.close(); } } }
public void purgeLogEvents(final List<SearchConstraint> constraints, final int daysToKeep) { blockHereIfBadNfs(); StringBuffer sql = new StringBuffer( "select u.mail_directory || '/logs/' || e.event_id || '.log' " + "from dat_client_log_events e, dat_user_account u " + "where event_time < current_timestamp - (? || ' days')::interval " + "and e.has_log_file != 0 " + "and e.user_id = u.object_id"); appendWhereConstraints(sql, constraints, s_propToColumnMap); m_logCategory.info("Purge event logs query is\n" + sql); StringBuffer sql2 = new StringBuffer( "delete from dat_client_log_events " + "where event_time < current_timestamp - (? || ' days')::interval"); if (!constraints.isEmpty()) { sql2.append(" and event_id in (select e.event_id from dat_client_log_events e "); appendWhere(sql2, constraints, s_propToColumnMap); sql2.append(")"); } m_logCategory.info("Purge event logs query is\n" + sql2); try { Connection c = m_txManager.getConnection(); PreparedStatement stmt = null; ResultSet rs = null; boolean needsRollback = true; try { stmt = c.prepareStatement(sql.toString()); stmt.setInt(1, daysToKeep); rs = stmt.executeQuery(); while (rs.next()) { File logFile = new File(getNfsRoot(), rs.getString(1)); // nfs usage, but DB transaction takes no locks. if (logFile.exists()) { boolean ok = logFile.delete(); if (!ok) { m_logger .log("Unable to delete") .param(LoggingConsts.FILENAME, logFile.getAbsolutePath()) .warn(); } } // file exists. } // each row // Below, no nfs usage occurs. We can use the same connection. // the 'sql delete' may use DB locks. stmt = c.prepareStatement(sql2.toString()); stmt.setInt(1, daysToKeep); stmt.executeUpdate(); c.commit(); needsRollback = false; } finally { DbUtils.safeClose(rs); DbUtils.safeClose(stmt); if (needsRollback) { c.rollback(); } m_txManager.returnConnection(c); } } catch (Exception ex) { m_logger.log("Error in purgeLogEvents").warn(ex); } }
public void setKeyCreatingIfNeeded( List<? extends IClientInfo> clients, List<IClientInfo> clientsNeedingSignalUpdate, Connection c) throws SQLException { PreparedStatement select = null; // Create lazily. PreparedStatement update = null; try { select = c.prepareStatement("SELECT secure_hash_key FROM dat_user_account WHERE object_id=?"); for (IClientInfo client : clients) { int userId = client.getUserId(); // ensure dat_user_account.secure_hash_key is filled. select.setInt(1, userId); ResultSet rs = select.executeQuery(); if (!rs.next()) { LogMessageGen lmg = new LogMessageGen(); lmg.setSubject("dat_user_account row not found"); lmg.param(LoggingConsts.USER_ID, userId); // possible that the user simply disappeared by the time we got here. m_logCategory.warn(lmg.toString()); continue; } boolean firstTimeCreate = false; byte[] key = rs.getBytes(1); if (key == null || key.length == 0) { if (update == null) { update = c.prepareStatement( "UPDATE dat_user_account SET secure_hash_key=? WHERE object_id=?"); } key = createNewRandomKey(); update.setBytes(1, key); update.setInt(2, userId); int ct = update.executeUpdate(); if (ct != 1) { LogMessageGen lmg = new LogMessageGen(); lmg.setSubject("Unable to update dat_user_account.secure_hash_key"); lmg.param(LoggingConsts.USER_ID, userId); m_logCategory.error(lmg.toString()); continue; } else { firstTimeCreate = true; } } // no existing key. client.getHashSupport().setHashKey(key); if (firstTimeCreate) { if (clientsNeedingSignalUpdate != null) { // EMSDEV-7854. Don't actually do // updateSignalFiles(client) right here; we want to avoid nfs usage in the middle of a // db // transaction. clientsNeedingSignalUpdate.add(client); } } } // each client. } finally { DbUtils.safeClose(select); DbUtils.safeClose(update); } }
/** Writes user state information to the shared filesystem */ private void updateStateCache( Customer customer, int transitionId, CustomerState newState, Connection c) throws SQLException { PreparedStatement pst = null; ResultSet rs = null; try { pst = c.prepareStatement( "SELECT u.secure_hash_key, u.object_id, u.user_state, u.is_deleted, u.last_activation_id " + "FROM dat_user_account u, dat_transition_users tr " + "WHERE u.object_id = tr.user_id AND u.customer_id=? " + "AND tr.transition_id=?;"); // The state files are used by Outlook, BB, and maybe future clients. pst.setInt(1, customer.getCustID()); pst.setInt(2, transitionId); rs = pst.executeQuery(); while (rs.next()) { int i = 0; byte[] key = rs.getBytes(++i); int userId = rs.getInt(++i); CustomerState state = CustomerState.fromInt(rs.getInt(++i)); int isDeletedInt = rs.getInt(++i); int lastActivationId = rs.getInt(++i); // If the user is marked deleted but has a key, we'll try cleaning // up state files. boolean isDeleted = isDeletedInt != IUserManager.USER_NOT_DELETED; if (key == null || key.length == 0) { LogMessageGen lmg = new LogMessageGen(); lmg.setSubject("dat_user_account.secure_hash_key not set"); lmg.param(LoggingConsts.USER_ID, userId); m_logCategory.info(lmg.toString()); // Without a key, we can't determine the signal filenames // so no cleanup is possible. continue; } ClientHashSupport hash = null; hash = new ClientHashSupport(); hash.setCustomerId(customer.getCustID()); hash.setUserId(userId); hash.setHashKey(key); hash.setLastActivationId(lastActivationId); if (m_logCategory.isInfoEnabled()) { LogMessageGen lmg = new LogMessageGen(); lmg.setSubject("Updating signal files"); lmg.param(LoggingConsts.USER_ID, userId); m_logCategory.info(lmg.toString()); } // wrt EMSDEV-7854 nfs calls and database transactions. // Above is only doing a select; no locks are taken and this // method is private. updateSignalFiles(hash, state, isDeleted); } // each row. } finally { DbUtils.safeClose(rs); DbUtils.safeClose(pst); } }
private static void deleteTopologyData( Connection c, int sourceID, Collection groupIDs, Collection serverIDs, Collection storeIDs) throws SQLException { PreparedStatement ps = null; int purgeInterval = ManagementContainer.getInstance() .getConfiguration() .getIntProperty(PURGE_DAY_INTERVAL_PROP, DEFAULT_PURGE_DAY_INTERVAL); List<Integer> deletedStores = null; List<Integer> deletedServers = null; List<Integer> deletedSites = null; try { // First determine what stores to delete String strQuery = "select store_id from dat_customer_stores where exists ( " + " select * from dat_customer_servers svr " + " where svr.server_id = dat_customer_stores.server_id and " + " exists ( " + " select * from dat_customer_sites s " + " where s.site_id = svr.admin_group_id and source_id = ? " + " ) " + " ) and " + " store_id not in " + QueryUtils.literal(storeIDs) + " and is_deleted = false"; ps = c.prepareStatement(strQuery); ps.setInt(1, sourceID); ResultSet rs = ps.executeQuery(); // Convert the result set to a list of store id's to be deleted while (rs.next()) { if (deletedStores == null) deletedStores = new ArrayList<Integer>(); deletedStores.add(rs.getInt(1)); } ps.close(); if (deletedStores != null) // Check to see if we have anything to delete { strQuery = "update dat_customer_stores set is_deleted = true, purge_time = current_timestamp + '" + purgeInterval + " days'::interval" + " where store_id in " + QueryUtils.literal(deletedStores); ps = c.prepareStatement(strQuery); ps.executeUpdate(); ps.close(); // Log what we marked for deletion logPoliciesUsingRemovedTopologyObjs( "Marking as deleted", "store", deletedStores, IUserManager.PROP_STORE_ID, c); } ps = null; // delete the servers // First determine what servers to delete strQuery = "select server_id from dat_customer_servers " + "where " + " exists ( " + " select * from dat_customer_sites s " + " where s.site_id = dat_customer_servers.admin_group_id and source_id = ? " + " ) and " + " server_id not in " + QueryUtils.literal(serverIDs) + " and is_deleted = false"; ps = c.prepareStatement(strQuery); ps.setInt(1, sourceID); rs = ps.executeQuery(); // Convert the result set to a list of server id's to be deleted while (rs.next()) { if (deletedServers == null) deletedServers = new ArrayList<Integer>(); deletedServers.add(rs.getInt(1)); } ps.close(); if (deletedServers != null) // Check to see if we have anything to delete { strQuery = "update dat_customer_servers set is_deleted = true, purge_time = current_timestamp + '" + purgeInterval + " days'::interval" + " where server_id in " + QueryUtils.literal(deletedServers); ps = c.prepareStatement(strQuery); ps.executeUpdate(); ps.close(); // Log what we marked for deletion logPoliciesUsingRemovedTopologyObjs( "Marking as deleted", "server", deletedServers, IUserManager.PROP_SERVER_ID, c); } ps = null; // delete the sites // First determine what sites to delete strQuery = "select site_id from dat_customer_sites " + "where " + " source_id = ? and is_deleted = false and " + " site_id not in " + QueryUtils.literal(groupIDs); ps = c.prepareStatement(strQuery); ps.setInt(1, sourceID); rs = ps.executeQuery(); // Convert the result set to a list of site id's to be deleted while (rs.next()) { if (deletedSites == null) deletedSites = new ArrayList<Integer>(); deletedSites.add(rs.getInt(1)); } ps.close(); if (deletedSites != null) // Check to see if we have anything to delete { strQuery = "update dat_customer_sites set is_deleted = true, purge_time = current_timestamp + '" + purgeInterval + " days'::interval" + " where site_id in " + QueryUtils.literal(deletedSites); ps = c.prepareStatement(strQuery); ps.executeUpdate(); ps.close(); // Log what we marked for deletion logPoliciesUsingRemovedTopologyObjs( "Marking as deleted", "site", deletedSites, IUserManager.PROP_ROUTING_GROUP_ID, c); } ps = null; } finally { if (ps != null) { ps.close(); } } }