private static Map saveSites(ExchSite3[] sites, int siteType, int sourceID, Connection c) throws SQLException { PreparedStatement insert = null; PreparedStatement delete = null; try { insert = c.prepareStatement( "insert into dat_customer_sites (site_id, site_type, source_id, internal_name, display_name) " + "values (?, ?, ?, ?, ?)"); delete = c.prepareStatement("delete from dat_customer_sites where site_id = ?"); Map siteIDs = new HashMap(sites.length * 2 + 1); for (int i = 0; i < sites.length; i++) { ExchSite3 site = sites[i]; int siteID = queryLookupSiteId(sourceID, site.getInternalName(), siteType, c); if (siteID == 0) { // if we couldn't find an existing siteID, grab the next one from the sequence siteID = getNextFromSequence("seq_site_id", c); } else { // if there is an existing siteID, delete it so we can insert the changes delete.setInt(1, siteID); int deleted = delete.executeUpdate(); if (deleted != 1) { throw new SQLException("Delete for siteID " + siteID + " returned " + deleted); } } siteIDs.put(site.getInternalName(), siteID); insert.setInt(1, siteID); insert.setInt(2, siteType); insert.setInt(3, sourceID); insert.setString( 4, DirectoryUtils.truncateString(site.getInternalName(), DB_SITE_INTERNALNAME_LENGTH)); insert.setString( 5, DirectoryUtils.truncateString(site.getDisplayName(), DB_SITE_DISPLAYNAME_LENGTH)); insert.executeUpdate(); } return siteIDs; } finally { if (delete != null) { delete.close(); } if (insert != null) { insert.close(); } } }
private static void normalizeTopolgyData(ExchSite3[] groups) { for (int i = 0; i < groups.length; i++) { ExchSite3 site = groups[i]; site.setInternalName(DirectoryUtils.escapeCommas(site.getInternalName())); ExchServer3[] servers = site.getServers(); for (int j = 0; j < servers.length; j++) { ExchServer3 server = servers[j]; server.setInternalName(DirectoryUtils.escapeCommas(server.getInternalName())); ExchStore2[] stores = server.getStores(); for (int k = 0; k < stores.length; k++) { ExchStore2 store = stores[k]; store.setInternalName(DirectoryUtils.escapeCommas(store.getInternalName())); } } } }
public static List<DbUser> sync(List<DbUser> dbUsers) { List<DbUser> usersToUpdate = new ArrayList<>(); Map<String, Map<String, Set<String>>> authzToNamespaceToUserIds = new HashMap<>(); Map<String, List<DbUser>> dbUsersPerAuthz = new HashMap<>(); // Initialize the entries based on authz in the map for (DbUser dbUser : dbUsers) { MultiValueMapUtils.addToMap(dbUser.getDomain(), dbUser, dbUsersPerAuthz); if (!authzToNamespaceToUserIds.containsKey(dbUser.getDomain())) { authzToNamespaceToUserIds.put(dbUser.getDomain(), new HashMap<String, Set<String>>()); } MultiValueMapUtils.addToMapOfSets( dbUser.getNamespace(), dbUser.getExternalId(), authzToNamespaceToUserIds.get(dbUser.getDomain())); } for (Entry<String, Map<String, Set<String>>> entry : authzToNamespaceToUserIds.entrySet()) { Map<String, DbUser> activeUsers = new HashMap<>(); String authz = entry.getKey(); try { ExtensionProxy authzExtension = EngineExtensionsManager.getInstance().getExtensionByName(authz); for (Entry<String, Set<String>> userIdsPerNamespace : entry.getValue().entrySet()) { for (ExtMap principal : AuthzUtils.fetchPrincipalsByIdsRecursively( authzExtension, userIdsPerNamespace.getKey(), userIdsPerNamespace.getValue())) { DirectoryUtils.flatGroups(principal); DbUser dbUser = DirectoryUtils.mapPrincipalRecordToDbUser(authz, principal); dbUser.setGroupIds(DirectoryUtils.getGroupIdsFromPrincipal(authz, principal)); activeUsers.put(dbUser.getExternalId(), dbUser); } } for (DbUser dbUser : dbUsersPerAuthz.get(authz)) { DbUser activeUser = activeUsers.get(dbUser.getExternalId()); if (activeUser != null) { if (!activeUser.equals(dbUser)) { activeUser.setId(dbUser.getId()); activeUser.setAdmin(dbUser.isAdmin()); log.infoFormat( "Principal {0}::{1} synchronized", activeUser.getLoginName(), activeUser.getDomain()); usersToUpdate.add(activeUser); } } else { log.infoFormat( "Deactivating non existing principal {0}::{1}", dbUser.getLoginName(), dbUser.getDomain()); dbUser.setActive(false); usersToUpdate.add(dbUser); } } } catch (Exception ex) { log.errorFormat( "Error during user synchronization of extension {0}. Exception message is {1}", authz, ex.getMessage()); log.debug("", ex); } } return usersToUpdate; }
private static Map saveServers( TopologyData3 td, int sourceID, Map adminGroupIDs, Map routingGroupIDs, Connection c) throws SQLException { PreparedStatement insert = null; PreparedStatement update = null; try { insert = c.prepareStatement( "insert into dat_customer_servers (server_id, source_id, admin_group_id, routing_group_id, internal_name, display_name, sink_capable, sink_enabled, server_role, server_version, cloud_service_id) " + "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); update = c.prepareStatement( "update dat_customer_servers " + "set source_id = ?, admin_group_id = ?, routing_group_id = ?, internal_name = ?, " + " display_name = ?, sink_capable = ?, server_role = ?, server_version = ?, is_deleted = false, purge_time = NULL, cloud_service_id = ? " + "where server_id = ?"); Map serverIDs = new HashMap(adminGroupIDs.size() * 8 + 1); ExchSite3[] adminGroups = td.getAdminGroups(); ExchSite3[] routingGroups = td.getRoutingGroups(); for (int i = 0; i < adminGroups.length; i++) { ExchSite3 adminGroup = adminGroups[i]; int adminGroupID = ((Number) adminGroupIDs.get(adminGroup.getInternalName())).intValue(); ExchServer3[] servers = adminGroup.getServers(); for (int j = 0; j < servers.length; j++) { ExchServer3 server = servers[j]; int serverID = queryLookupServerId(sourceID, server.getInternalName(), c); int routingGroupID = findRoutingGroupID(server.getInternalName(), routingGroups, routingGroupIDs); // for non-sinkable servers, attempt to parse cloud service from version field CloudService cloudService = CloudService.NONE; if (!server.isSinkCapable()) { Matcher matcher = SERVER_VERSION_PATTERN.matcher(server.getServerVersion()); if (matcher.matches()) { cloudService = CloudService.valueOfCaseInsensitive(matcher.group(1), cloudService); } } if (serverID == 0) { // if we couldn't find an existing serverID, grab the next one from the sequence serverID = getNextFromSequence("seq_server_id", c); serverIDs.put(server.getInternalName(), serverID); insert.setInt(1, serverID); insert.setInt(2, sourceID); insert.setInt(3, adminGroupID); insert.setInt(4, routingGroupID); insert.setString( 5, DirectoryUtils.truncateString( server.getInternalName(), DB_SERVER_INTERNALNAME_LENGTH)); insert.setString( 6, DirectoryUtils.truncateString( server.getDisplayName(), DB_SERVER_DISPLAYNAME_LENGTH)); insert.setInt(7, server.isSinkCapable() ? 1 : 0); insert.setInt( 8, cloudService.getId() <= CloudService.ONPREMISES.getId() ? 1 : 0); // by default, all non-cloud servers are sink-enabled (so if they become // e2k servers, they're enabled) insert.setInt(9, server.getServerRole()); insert.setString( 10, DirectoryUtils.truncateString(server.getServerVersion(), DB_SERVER_VERSION_LENGTH)); insert.setInt(11, cloudService.getId()); insert.executeUpdate(); } else { // if there is an existing serverID, update it (preserve value of sink_enabled) serverIDs.put(server.getInternalName(), serverID); update.setInt(1, sourceID); update.setInt(2, adminGroupID); update.setInt(3, routingGroupID); update.setString( 4, DirectoryUtils.truncateString( server.getInternalName(), DB_SERVER_INTERNALNAME_LENGTH)); update.setString( 5, DirectoryUtils.truncateString( server.getDisplayName(), DB_SERVER_DISPLAYNAME_LENGTH)); update.setInt(6, server.isSinkCapable() ? 1 : 0); update.setInt(7, server.getServerRole()); update.setString( 8, DirectoryUtils.truncateString(server.getServerVersion(), DB_SERVER_VERSION_LENGTH)); update.setInt(9, cloudService.getId()); update.setInt(10, serverID); update.executeUpdate(); } } } return serverIDs; } finally { if (update != null) { update.close(); } if (insert != null) { insert.close(); } } }
private static Map saveStores( TopologyData3 td, int sourceID, ExchSite3[] sites, Map serverIDs, Connection c) throws SQLException { PreparedStatement insert = null; PreparedStatement delete = null; try { insert = c.prepareStatement( "insert into dat_customer_stores (store_id, source_id, server_id, internal_name, display_name) " + "values (?, ?, ?, ?, ?)"); delete = c.prepareStatement("delete from dat_customer_stores where store_id = ?"); Map storeIDs = new HashMap(101); for (int i = 0; i < sites.length; i++) { ExchSite3 site = sites[i]; ExchServer3[] servers = site.getServers(); for (int j = 0; j < servers.length; j++) { ExchServer3 server = servers[j]; int serverID = ((Number) serverIDs.get(server.getInternalName())).intValue(); ExchStore2[] stores = server.getStores(); for (int k = 0; k < stores.length; k++) { ExchStore2 store = stores[k]; int storeId = queryLookupStoreId(sourceID, store.getInternalName(), c); if (storeId == 0) { // if we couldn't find an existing storeID, grab the next one from the sequence storeId = getNextFromSequence("seq_store_id", c); } else { // if there is an existing storeID, delete it so we can insert the changes delete.setInt(1, storeId); int deleted = delete.executeUpdate(); if (deleted != 1) { throw new SQLException("Delete for store " + storeId + " returned " + deleted); } } storeIDs.put(store.getInternalName(), storeId); insert.setInt(1, storeId); insert.setInt(2, sourceID); insert.setInt(3, serverID); insert.setString( 4, DirectoryUtils.truncateString( store.getInternalName(), DB_STORE_INTERNALNAME_LENGTH)); insert.setString( 5, DirectoryUtils.truncateString(store.getDisplayName(), DB_STORE_DISPLAYNAME_LENGTH)); insert.executeUpdate(); } } } return storeIDs; } finally { if (delete != null) { delete.close(); } if (insert != null) { insert.close(); } } }
@Test public void testRenameFile() throws IOException { final ShardId shardId = new ShardId(new Index("index"), 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random(), false); Store store = new Store( shardId, ImmutableSettings.EMPTY, directoryService, randomDistributor(directoryService), new DummyShardLock(shardId)); { IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT); int iters = scaledRandomIntBetween(10, 100); for (int i = 0; i < iters; i++) { BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024)); output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length); } CodecUtil.writeFooter(output); output.close(); } store.renameFile("foo.bar", "bar.foo"); assertThat(store.directory().listAll().length, is(1)); final long lastChecksum; try (IndexInput input = store.directory().openInput("bar.foo", IOContext.DEFAULT)) { lastChecksum = CodecUtil.checksumEntireFile(input); } try { store.directory().openInput("foo.bar", IOContext.DEFAULT); fail("file was renamed"); } catch (FileNotFoundException | NoSuchFileException ex) { // expected } { IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT); int iters = scaledRandomIntBetween(10, 100); for (int i = 0; i < iters; i++) { BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024)); output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length); } CodecUtil.writeFooter(output); output.close(); } DistributorDirectory distributorDirectory = DirectoryUtils.getLeaf(store.directory(), DistributorDirectory.class); if (distributorDirectory != null && distributorDirectory.getDirectory("foo.bar") != distributorDirectory.getDirectory("bar.foo")) { try { store.renameFile("foo.bar", "bar.foo"); fail("target file already exists in a different directory"); } catch (IOException ex) { // expected } try (IndexInput input = store.directory().openInput("bar.foo", IOContext.DEFAULT)) { assertThat(lastChecksum, equalTo(CodecUtil.checksumEntireFile(input))); } assertThat(store.directory().listAll().length, is(2)); assertDeleteContent(store, directoryService); IOUtils.close(store); } else { store.renameFile("foo.bar", "bar.foo"); assertThat(store.directory().listAll().length, is(1)); assertDeleteContent(store, directoryService); IOUtils.close(store); } }
@Test public void testGetLeave() throws IOException { Path file = createTempDir(); final int iters = scaledRandomIntBetween(10, 100); for (int i = 0; i < iters; i++) { { BaseDirectoryWrapper dir = newFSDirectory(file); FSDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(dir) {}, FSDirectory.class, null); assertThat(directory, notNullValue()); assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null))); dir.close(); } { BaseDirectoryWrapper dir = newFSDirectory(file); FSDirectory directory = DirectoryUtils.getLeaf(dir, FSDirectory.class, null); assertThat(directory, notNullValue()); assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null))); dir.close(); } { Set<String> stringSet = Collections.emptySet(); BaseDirectoryWrapper dir = newFSDirectory(file); FSDirectory directory = DirectoryUtils.getLeaf( new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean()), FSDirectory.class, null); assertThat(directory, notNullValue()); assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null))); dir.close(); } { Set<String> stringSet = Collections.emptySet(); BaseDirectoryWrapper dir = newFSDirectory(file); FSDirectory directory = DirectoryUtils.getLeaf( new FilterDirectory( new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean())) {}, FSDirectory.class, null); assertThat(directory, notNullValue()); assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null))); dir.close(); } { Set<String> stringSet = Collections.emptySet(); BaseDirectoryWrapper dir = newFSDirectory(file); RAMDirectory directory = DirectoryUtils.getLeaf( new FilterDirectory( new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean())) {}, RAMDirectory.class, null); assertThat(directory, nullValue()); dir.close(); } } }