/** * Closes all opened connections. * * @param waitCompletion If {@code true} waits for all pending requests to be proceeded. */ @SuppressWarnings("TooBroadScope") @Override public void stop(boolean waitCompletion) { Collection<GridClientConnection> closeConns; if (closed) return; // Mark manager as closed. closed = true; // Remove all connections from cache. closeConns = new ArrayList<>(conns.values()); conns.clear(); nodeConns.clear(); // Close old connection outside the writer lock. for (GridClientConnection conn : closeConns) conn.close(CLIENT_CLOSED, waitCompletion); if (pingExecutor != null) GridClientUtils.shutdownNow(GridClientConnectionManager.class, pingExecutor, log); GridClientUtils.shutdownNow(GridClientConnectionManager.class, executor, log); if (srv != null) srv.stop(); }
/** * This method is public only so it can be invoked by unit testing, but should not otherwise be * used ! */ @ManagedOperation( description = "Trashes all connections to other nodes. This is only used for testing") public void removeAllConnections() { send_table.clear(); sendStableMessages(); for (ReceiverEntry entry2 : recv_table.values()) entry2.reset(); recv_table.clear(); }
@Override protected void closeListeners() { deregisterAllCacheEntryListener(syncListenerRegistrations.values()); deregisterAllCacheEntryListener(asyncListenerRegistrations.values()); syncListenerRegistrations.clear(); asyncListenerRegistrations.clear(); notifyAndClearSyncListenerLatches(); }
/** reset all statistics */ public void clear() { secondLevelCacheHitCount.set(0); secondLevelCacheMissCount.set(0); secondLevelCachePutCount.set(0); naturalIdCacheHitCount.set(0); naturalIdCacheMissCount.set(0); naturalIdCachePutCount.set(0); naturalIdQueryExecutionCount.set(0); naturalIdQueryExecutionMaxTime.set(0); naturalIdQueryExecutionMaxTimeRegion = null; sessionCloseCount.set(0); sessionOpenCount.set(0); flushCount.set(0); connectCount.set(0); prepareStatementCount.set(0); closeStatementCount.set(0); entityDeleteCount.set(0); entityInsertCount.set(0); entityUpdateCount.set(0); entityLoadCount.set(0); entityFetchCount.set(0); collectionRemoveCount.set(0); collectionUpdateCount.set(0); collectionRecreateCount.set(0); collectionLoadCount.set(0); collectionFetchCount.set(0); queryExecutionCount.set(0); queryCacheHitCount.set(0); queryExecutionMaxTime.set(0); queryExecutionMaxTimeQueryString = null; queryCacheMissCount.set(0); queryCachePutCount.set(0); updateTimestampsCacheMissCount.set(0); updateTimestampsCacheHitCount.set(0); updateTimestampsCachePutCount.set(0); transactionCount.set(0); committedTransactionCount.set(0); optimisticFailureCount.set(0); secondLevelCacheStatistics.clear(); entityStatistics.clear(); collectionStatistics.clear(); queryStatistics.clear(); naturalIdCacheStatistics.clear(); startTime = System.currentTimeMillis(); }
/** Clears the registry to prepare for re-registration (e.g. as part of a reload). */ public void clear() { synchronized (extensions) { // we synchronize just to guard unnamedMerged profileRegistration = null; deploymentsRegistration = null; transformerRegistry = TransformerRegistry.Factory.create(this); extensions.clear(); reverseMap.clear(); unnamedMerged = false; } }
@Override public void stop(final StopContext context) { locks.clear(); directories.clear(); if (callbackHandle != null) { callbackHandle.remove(); } factory = null; configuration = null; }
@Override public void deleteWorld() { waitForCompletionOfPreviousSave(); unloadedAndUnsavedChunkMap.clear(); unloadedAndSavingChunkMap.clear(); unloadedAndUnsavedPlayerMap.clear(); unloadedAndSavingPlayerMap.clear(); try { FilesUtil.recursiveDelete(getStoragePathProvider().getWorldPath()); } catch (IOException e) { logger.error("Failed to purge chunks", e); } }
public void destroy() { active = false; for (SerializerAdapter serializer : typeMap.values()) { serializer.destroy(); } typeMap.clear(); idMap.clear(); global.set(null); constantTypesMap.clear(); for (BufferObjectDataOutput output : outputPool) { IOUtil.closeResource(output); } outputPool.clear(); }
@Override public void destroy() { // May not be inited yet if (authFilter != null) { authFilter.destroy(); } if (nonUiAuthCache != null) { nonUiAuthCache.clear(); nonUiAuthCache = null; } if (userChangedCache != null) { userChangedCache.clear(); userChangedCache = null; } }
public void modelRemoved(SModelDescriptor modelDescriptor) { ConcurrentMap<SReferenceBase, Object> refSet = myReferences.remove(modelDescriptor.getSModelReference()); if (refSet != null) { refSet.clear(); } }
private void clearRecordsMap(Map<Data, Record> excludeRecords) { InMemoryFormat inMemoryFormat = recordFactory.getStorageFormat(); switch (inMemoryFormat) { case BINARY: case OBJECT: records.clear(); if (excludeRecords != null && !excludeRecords.isEmpty()) { records.putAll(excludeRecords); } return; case OFFHEAP: Iterator<Record> iter = records.values().iterator(); while (iter.hasNext()) { Record record = iter.next(); if (excludeRecords == null || !excludeRecords.containsKey(record.getKey())) { record.invalidate(); iter.remove(); } } return; default: throw new IllegalArgumentException("Unknown storage format: " + inMemoryFormat); } }
/** Releases the channels. */ private void freeChannels() { for (ChannelInfo info : channels.values()) { freePacket(info.getReadPacket()); freePacket(info.getWritePacket()); } channels.clear(); }
/** * Initialize log level overrides from debug options. * * <p>This may only be called during bootstrapping before any custom overrides are set. Your * milage may vary if called while the application is running. * * @throws Exception */ void initializeLogLevelOverrides() throws Exception { // reset current overrides overriddenLogLevels.clear(); // add a note to the status manager final LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory(); final StatusManager sm = lc.getStatusManager(); if (sm != null) { sm.add(new InfoStatus("Initializing log level overrides.", this)); } // apply new overrides try { final Map<String, String> options = BootActivator.getInstance().getService(DebugOptions.class).getOptions(); for (final Entry<String, String> e : options.entrySet()) { final String loggerName = getLoggerNameForDebugOption(e.getKey()); if (loggerName != null) { if ((null != e.getValue()) && !"false".equalsIgnoreCase(e.getValue())) { setLogLevelOverride(loggerName, "DEBUG"); } } } } catch (final ServiceNotAvailableException e) { // no debug options available (ignore) } }
void shutdown() { final Collection<NodeEngineImpl> values = new ArrayList<NodeEngineImpl>(nodes.values()); nodes.clear(); for (NodeEngineImpl value : values) { value.getHazelcastInstance().getLifecycleService().shutdown(); } }
private void addPlayersToSaveTransaction( SaveTransactionBuilder saveTransactionBuilder, NetworkSystem networkSystem) { unloadedAndSavingPlayerMap.clear(); /** * New entries might be added concurrently. By using putAll + clear to transfer entries we might * loose new ones added in between putAll and clear. By iterating we can make sure that all * entities removed from unloadedAndUnsavedPlayerMap get added to unloadedAndSavingPlayerMap. */ Iterator<Map.Entry<String, EntityData.PlayerStore>> unsavedEntryIterator = unloadedAndUnsavedPlayerMap.entrySet().iterator(); while (unsavedEntryIterator.hasNext()) { Map.Entry<String, EntityData.PlayerStore> entry = unsavedEntryIterator.next(); unloadedAndSavingPlayerMap.put(entry.getKey(), entry.getValue()); unsavedEntryIterator.remove(); } for (Client client : networkSystem.getPlayers()) { // If there is a newer undisposed version of the player,we don't need to save the disposed // version: unloadedAndSavingPlayerMap.remove(client.getId()); EntityRef character = client.getEntity().getComponent(ClientComponent.class).character; saveTransactionBuilder.addLoadedPlayer(client.getId(), createPlayerStore(client, character)); } for (Map.Entry<String, EntityData.PlayerStore> entry : unloadedAndSavingPlayerMap.entrySet()) { saveTransactionBuilder.addUnloadedPlayer(entry.getKey(), entry.getValue()); } }
public void destroy() { Gravity gravity = getGravity(); gravity.cancel(publisher); gravity.cancel(receiver); subscriptions.clear(); }
public synchronized void stop() { if (!started) { return; } if (!paused) { InVMRegistry.instance.unregisterAcceptor(id); } for (Connection connection : connections.values()) { listener.connectionDestroyed(connection.getID()); } connections.clear(); if (notificationService != null) { TypedProperties props = new TypedProperties(); props.putSimpleStringProperty( new SimpleString("factory"), new SimpleString(InVMAcceptorFactory.class.getName())); props.putIntProperty(new SimpleString("id"), id); Notification notification = new Notification(null, NotificationType.ACCEPTOR_STOPPED, props); try { notificationService.sendNotification(notification); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } } started = false; paused = false; }
public synchronized PathMatcher clearPaths() { paths.clear(); exactPathMatches.clear(); this.lengths = new int[0]; defaultHandler = null; return this; }
private void postJoin() { blacklistedAddresses.clear(); if (logger.isFinestEnabled()) { logger.finest( "PostJoin master: " + node.getMasterAddress() + ", isMaster: " + node.isMaster()); } if (node.getState() != NodeState.ACTIVE) { return; } if (tryCount.incrementAndGet() == JOIN_TRY_COUNT) { logger.warning("Join try count exceed limit, setting this node as master!"); node.setAsMaster(); } if (node.joined()) { if (!node.isMaster()) { ensureConnectionToAllMembers(); } if (clusterService.getSize() == 1) { logger.info('\n' + node.clusterService.membersString()); } } }
@Override public void clear() { for (EntityManager entityManager : instances.values()) { entityManager.close(); } instances.clear(); }
@Override public void handleClusterView(boolean mergeView, int newViewId) { synchronized (viewHandlingLock) { // check to ensure this is not an older view if (newViewId <= viewId) { log.tracef("Ignoring old cluster view notification: %s", newViewId); return; } boolean becameCoordinator = !isCoordinator && transport.isCoordinator(); isCoordinator = transport.isCoordinator(); if (trace) { log.tracef( "Received new cluster view: %d, isCoordinator = %s, becameCoordinator = %s", (Object) newViewId, isCoordinator, becameCoordinator); } mustRecoverClusterStatus |= mergeView || becameCoordinator; if (!isCoordinator) return; if (mustRecoverClusterStatus) { // Clean up leftover cache status information from the last time we were coordinator. // E.g. if the local node was coordinator, started a rebalance, and then lost coordinator // status because of a merge, the existing cache statuses may have a rebalance in progress. cacheStatusMap.clear(); try { recoverClusterStatus(newViewId, mergeView, transport.getMembers()); mustRecoverClusterStatus = false; } catch (InterruptedException e) { log.tracef("Cluster state recovery interrupted because the coordinator is shutting down"); // the CTMI has already stopped, no need to update the view id or notify waiters return; } catch (SuspectException e) { // We will retry when we receive the new view and then we'll reset the // mustRecoverClusterStatus flag return; } catch (Exception e) { if (!isShuttingDown) { log.failedToRecoverClusterState(e); } else { log.tracef("Cluster state recovery failed because the coordinator is shutting down"); } } } // update the view id last, so join requests from other nodes wait until we recovered existing // members' info synchronized (viewUpdateLock) { viewId = newViewId; viewUpdateLock.notifyAll(); } } if (!mustRecoverClusterStatus) { try { updateCacheMembers(transport.getMembers()); } catch (Exception e) { log.errorUpdatingMembersList(e); } } }
/** Closes the reference database. */ @Override public void close() { if (refs != null) { refs.clear(); refs = null; } }
private void addChunksToSaveTransaction( SaveTransactionBuilder saveTransactionBuilder, ChunkProvider chunkProvider) { unloadedAndSavingChunkMap.clear(); /** * New entries might be added concurrently. By using putAll + clear to transfer entries we might * loose new ones added in between putAll and clear. Bz iterating we can make sure that all * entires removed from unloadedAndUnsavedChunkMap get added to unloadedAndSavingChunkMap. */ Iterator<Map.Entry<Vector3i, CompressedChunkBuilder>> unsavedEntryIterator = unloadedAndUnsavedChunkMap.entrySet().iterator(); while (unsavedEntryIterator.hasNext()) { Map.Entry<Vector3i, CompressedChunkBuilder> entry = unsavedEntryIterator.next(); unloadedAndSavingChunkMap.put(entry.getKey(), entry.getValue()); unsavedEntryIterator.remove(); } chunkProvider .getAllChunks() .stream() .filter(ManagedChunk::isReady) .forEach( chunk -> { // If there is a newer undisposed version of the chunk,we don't need to save the // disposed version: unloadedAndSavingChunkMap.remove(chunk.getPosition()); ChunkImpl chunkImpl = (ChunkImpl) chunk; // this storage manager can only work with ChunkImpls saveTransactionBuilder.addLoadedChunk(chunk.getPosition(), chunkImpl); }); for (Map.Entry<Vector3i, CompressedChunkBuilder> entry : unloadedAndSavingChunkMap.entrySet()) { saveTransactionBuilder.addUnloadedChunk(entry.getKey(), entry.getValue()); } }
/** * Tests statement cache clear. * * @throws SecurityException * @throws NoSuchFieldException * @throws IllegalArgumentException * @throws IllegalAccessException * @throws SQLException */ @SuppressWarnings({"unchecked", "rawtypes"}) @Test public void testStatementCacheClear() throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException, SQLException { ConcurrentMap mockCache = createNiceMock(ConcurrentMap.class); List<StatementHandle> mockStatementCollections = createNiceMock(List.class); StatementCache testClass = new StatementCache(1, false, null); Field field = testClass.getClass().getDeclaredField("cache"); field.setAccessible(true); field.set(testClass, mockCache); Iterator<StatementHandle> mockIterator = createNiceMock(Iterator.class); StatementHandle mockStatement = createNiceMock(StatementHandle.class); expect(mockCache.values()).andReturn(mockStatementCollections).anyTimes(); expect(mockStatementCollections.iterator()).andReturn(mockIterator).anyTimes(); expect(mockIterator.hasNext()).andReturn(true).times(2).andReturn(false).once(); expect(mockIterator.next()).andReturn(mockStatement).anyTimes(); mockStatement.close(); expectLastCall().once().andThrow(new SQLException()).once(); mockCache.clear(); expectLastCall().once(); replay(mockCache, mockStatementCollections, mockIterator, mockStatement); testClass.clear(); verify(mockCache, mockStatement); }
/** Update the recorded test results to remove all tests. Reports any changes. */ public void empty() { Set<Test> wontRun = new HashSet<Test>(results.keySet()); results.clear(); for (Test test : wontRun) { reportRemove(test); } }
@Override public synchronized void stop() throws Exception { if (!started) { return; } // Channel may be null if there isn't a connection to a live server if (channel != null) { channel.close(); } for (ReplicatedLargeMessage largeMessage : largeMessages.values()) { largeMessage.releaseResources(); } largeMessages.clear(); for (Entry<JournalContent, Map<Long, JournalSyncFile>> entry : filesReservedForSync.entrySet()) { for (JournalSyncFile filesReserved : entry.getValue().values()) { filesReserved.close(); } } filesReservedForSync.clear(); if (journals != null) { for (Journal j : journals) { if (j instanceof FileWrapperJournal) j.stop(); } } for (ConcurrentMap<Integer, Page> map : pageIndex.values()) { for (Page page : map.values()) { try { page.sync(); page.close(); } catch (Exception e) { ActiveMQServerLogger.LOGGER.errorClosingPageOnReplication(e); } } } pageManager.stop(); pageIndex.clear(); final CountDownLatch latch = new CountDownLatch(1); executor.execute( new Runnable() { @Override public void run() { latch.countDown(); } }); latch.await(30, TimeUnit.SECONDS); // Storage needs to be the last to stop storageManager.stop(); started = false; }
@Override public void close() throws SQLException { try { SQLCloseables.closeAll(connectionQueryServicesMap.values()); } finally { connectionQueryServicesMap.clear(); } }
@Override protected void doStop() throws Exception { if (START_COUNTER.decrementAndGet() <= 0) { // clear queues when no more direct-vm components in use CONSUMERS.clear(); } super.doStop(); }
/** Disconnect the whole mongo instances. */ public static void dispose() { disconnect(); Collection<Mongo> mongos = mongosCache.values(); for (Mongo mongo : mongos) { mongo.close(); } mongosCache.clear(); }
void cleanup() { nodesWriteLock.lock(); try { nodes.clear(); } finally { nodesWriteLock.unlock(); } }