private static Set<Integer> purgeDeletedToplogyObjects(Connection c) throws SQLException { Set<Integer> stores = purgeDeletedStores(c); Set<Integer> servers = purgeDeletedServers(c); Set<Integer> sites = purgeDeletedSites(c); Set<Integer> returnSet = new HashSet<Integer>(); if (!CollectionsUtils.isNullOrEmpty(stores)) returnSet.addAll(stores); if (!CollectionsUtils.isNullOrEmpty(servers)) returnSet.addAll(servers); if (!CollectionsUtils.isNullOrEmpty(sites)) returnSet.addAll(sites); if (CollectionsUtils.isNullOrEmpty(returnSet)) return null; return returnSet; }
@AfterMethod public void cleanUp() throws Exception { if (!CollectionsUtils.isNullOrEmpty(m_testFiles)) { for (File file : m_testFiles) { if (!file.delete()) { LOGGER.error("Failed to delete " + file.getAbsolutePath()); } } } }
private static Set<Integer> purgeDeletedSites(Connection c) throws SQLException { Statement stmt = null; ResultSet rs = null; try { // find server objects that need to be purged List<Integer> purgeSiteIds = new ArrayList<Integer>(); Set<Integer> custIds = new HashSet<Integer>(); String query = "select site.site_id, src.customer_id from dat_dirsync_sources src, dat_customer_sites site where site.is_deleted = true and current_timestamp > site.purge_time and src.source_id = site.source_id"; stmt = c.createStatement(); s_logger.debug(query); rs = stmt.executeQuery(query); while (rs.next()) { purgeSiteIds.add(rs.getInt(1)); custIds.add(rs.getInt(2)); } if (CollectionsUtils.isNullOrEmpty(purgeSiteIds)) { return null; } int totalDeletes = 0; for (List<Integer> ids : ChunkedListIterator.iterable(purgeSiteIds, BATCH_SIZE)) { logPoliciesUsingRemovedTopologyObjs( "Purging ", "site", ids, IUserManager.PROP_ROUTING_GROUP_ID, c); // purge servers String idList = QueryUtils.literal(ids); query = "delete from dat_customer_sites where site_id in " + idList; s_logger.debug(query); totalDeletes += stmt.executeUpdate(query); // don't purge constraints because the scope of the constraints will expand. Worst case, // when all constraints are deleted, a saved user set will have global scope and this // is definitely not what the customer wants. } s_logger.info("Purged " + totalDeletes + " sites for " + custIds.size() + " customers"); return custIds; } finally { if (stmt != null) { stmt.close(); } if (rs != null) { rs.close(); } } }
@Override protected boolean processMessage(IMessageContext message) throws CapabilitiesLoadException { Capabilities caps = message.getCustomer().getCapabilities(); if (caps == null) throw new CapabilitiesLoadException(); Capabilities.ActiveMailboxSupport amSupportCap = caps.getEnumCapability( Capabilities.CAP_ACTIVEMAILBOX_SUPPORT, Capabilities.ActiveMailboxSupport.NONE); boolean useNFS = ManagementContainer.getInstance() .getConfiguration() .getBooleanProperty(CONFIG_USE_NFS, DEFAULT_USE_NFS); if (amSupportCap == Capabilities.ActiveMailboxSupport.AM_CUSTOMER_ENCRYPTION || message.getEnvelope().isCustomerEncrypted()) { if (!useNFS) { return writeMetaFiles(message); } else { // Direct nfs usage; doesn't occur in production (mmfs is used instead) boolean success = true; String metaContents = createMetaContents(message); String metaFileName = new File(message.getStoreFileSubPath()).getName(); if (!CollectionsUtils.isNullOrEmpty(message.getResolvedRecipients())) { for (Iterator<ReplicatedRecipient> it = message.getResolvedRecipients().iterator(); it.hasNext(); ) { ReplicatedRecipient rr = it.next(); if (!writeMetaFile(rr, message, false, metaFileName, metaContents)) { success = false; } } } if (message.getResolvedSender() != null) { if (!writeMetaFile( message.getResolvedSender(), message, true, metaFileName, metaContents)) { success = false; } } if (!success) { m_importer.getStatComponent().addToValue(STAT_META_STORE_FAILURES, 1); m_importer.consumerImportFailed(message, true); return false; } } } return true; }
/** Purge all topology objects that are marked as deleted and the purge time has passed */ public static void purgeDeletedToplogyObjects() { ManagementContainer mc = ManagementContainer.getInstance(); ITransactionManager pool; pool = mc.getPool(ManagementContainer.POOL_NAME); Set<Integer> custIds = pool.executeWithConnection( new ConnectionExecuteFunction<Set<Integer>>() { public Set<Integer> execute(Connection c) throws SQLException { return purgeDeletedToplogyObjects(c); } }); // must run the following refresh queries after the transaction above is committed if (!CollectionsUtils.isNullOrEmpty(custIds)) { for (Integer custID : custIds) { ManagementContainer.getInstance() .getPolicyMetaManager() .notifyOfExternalPolicyChanges(custID); } } }
public Set<String> updateIndexData( String collectionName, ClusterLocation loc, Reference<List<Document>> docs) throws IndexException { if (null == docs || CollectionsUtils.isNullOrEmpty(docs.getValue())) { return Collections.emptySet(); } try { return new HashSet<String>( (new FASTExecuteFunction(collectionName, loc, "update", docs) { public String executeWithFAST(IContentManager fast) throws ContentManagerException { return fast.updateContents(getDocs()); } }) .execute(getBatchTimeoutMillis()) .keySet()); } catch (ContentManagerException e) { throw new IndexException("ContentManagerException", e); } }
public Map<String, Collection<com.m1.ems.search.document.ContentError>> addIndexData( String collectionName, ClusterLocation loc, Reference<List<Document>> docs) throws IndexException { if (null == docs || CollectionsUtils.isNullOrEmpty(docs.getValue())) { return Collections.emptyMap(); } try { Map<String, Collection<DocumentError>> errors = (new FASTExecuteFunction(collectionName, loc, "add", docs) { public String executeWithFAST(IContentManager fast) throws ContentManagerException { return fast.addContents(getDocs()); } }) .execute(getBatchTimeoutMillis()); return ESPDocumentConverter.convertESPErrorMap(errors); } catch (ContentManagerException e) { throw new IndexException("ContentManagerException", e); } }
/** * Method writeMetaFiles. This uses the MailStoreFileManger API to store files remotely. * * @param message */ private boolean writeMetaFiles(IMessageContext message) { String metaContents = createMetaContents(message); String metaFileName = new File(message.getStoreFileSubPath()).getName(); Collection<MailStoreFileManager.MetaMessageDescriptor> msgs = new ArrayList<MailStoreFileManager.MetaMessageDescriptor>(); if (!CollectionsUtils.isNullOrEmpty(message.getResolvedRecipients())) { for (ReplicatedRecipient rr : message.getResolvedRecipients()) { msgs.add( new MailStoreFileManager.MetaMessageDescriptor( message.getInternalId(), rr.getMaildir(), metaFileName, message.getEnvelope().getSentDate(), message.getEnvelope().getReceivedDate(), false, rr.getState().toInt())); } } if (message.getResolvedSender() != null) { ReplicatedRecipient rr = message.getResolvedSender(); msgs.add( new MailStoreFileManager.MetaMessageDescriptor( message.getInternalId(), rr.getMaildir(), metaFileName, message.getEnvelope().getSentDate(), message.getEnvelope().getReceivedDate(), true, rr.getState().toInt())); } IMailStoreFileManager imsfm = ManagementContainer.getInstance().getMailStoreFileManager(); if (imsfm.storeMetaMessage(msgs, metaContents)) { m_importer.audit(message, getName(), metaFileName); return true; } else { m_importer.getStatComponent().addToValue(STAT_META_STORE_FAILURES, 1); m_importer.consumerImportFailed(message, true); return false; } }
private StorageLocation getStorageLocationInternal(int storageId, Connection c) throws SQLException { List<StorageLocation> pList = getStorageLocations(" where id = " + storageId, c); return CollectionsUtils.isNullOrEmpty(pList) ? null : pList.get(0); }
/** * @param loc ClusterLocation * @param keysToRemove content ids to remove * @return ids that failed to purge * @throws IndexException if an error occurs */ public Set<String> purgeIndexData( final String collectionName, final ClusterLocation loc, final Set<String> keysToRemove) throws IndexException { if (s_logCategory.isDebugEnabled()) { s_logCategory.debug( "purgeIndexData(" + collectionName + "," + loc.getName() + ",...) # items = " + keysToRemove.size()); } // if not thing to do, just return empty list if (CollectionsUtils.isNullOrEmpty(keysToRemove)) { return Collections.emptySet(); } Cluster cluster = m_clusterManager.getCluster(loc.getClusterId()); if (!cluster.isPurgeEnabled() || !loc.isPurgeEnabled()) { s_logCategory.warn( "Not processing " + keysToRemove.size() + " items because purge is disabled on " + cluster + " and " + loc); return keysToRemove; // fail all } try { final RoutingAttributes ra = new RoutingAttributes(); ra.addElement("column", String.valueOf(loc.getColumnId())); final List<RoutingAttributes> routingAttributes = new ArrayList<RoutingAttributes>( Itertools.map( new UnaryFunction<String, RoutingAttributes>() { public RoutingAttributes execute(String key) { return ra; } }, keysToRemove)); Set<String> results = new HashSet<String>( (new FASTExecuteFunction(collectionName, loc, "purge", keysToRemove) { public String executeWithFAST(IContentManager fast) throws ContentManagerException { // s_logCategory.debug("Calling fast.removeContents() with keys, subsystems // and routing attributes (sz=" + routingAttributes.size() + ")"); return fast.removeContents( keysToRemove, Arrays.asList(DOC_PROC, INDEXING), routingAttributes); } }) .execute(getBatchTimeoutMillis()) .keySet()); if (!CollectionsUtils.isNullOrEmpty(results)) { s_logCategory.info("There were " + results.size() + " failed items at location " + loc); } if (s_logCategory.isDebugEnabled()) { s_logCategory.debug("Failed items at location " + loc.getName() + ": " + results); } return results; } catch (ContentManagerException e) { throw new IndexException("ContentManagerException", e); } }