/** Shuts the AirReceiver down gracefully */ public static void onShutdown() { /* Close channels */ final ChannelGroupFuture allChannelsClosed = s_allChannels.close(); /* Stop all mDNS responders */ synchronized (s_jmDNSInstances) { for (final JmDNS jmDNS : s_jmDNSInstances) { try { jmDNS.unregisterAllServices(); s_logger.info("Unregistered all services on " + jmDNS.getInterface()); } catch (final IOException e) { s_logger.info("Failed to unregister some services"); } } } /* Wait for all channels to finish closing */ allChannelsClosed.awaitUninterruptibly(); /* Stop the ExecutorService */ ExecutorService.shutdown(); /* Release the OrderedMemoryAwareThreadPoolExecutor */ ChannelExecutionHandler.releaseExternalResources(); }
/** Stops this server. */ public void shutdown() { // This is so we don't run this twice (/stop and actual shutdown) if (isShuttingDown) return; isShuttingDown = true; logger.info("The server is shutting down..."); monitor.interrupt(); // Stop scheduler and disable plugins scheduler.stop(); pluginManager.clearPlugins(); // Kick (and save) all players for (Player player : getOnlinePlayers()) { player.kickPlayer("Server shutting down."); } // Save worlds for (World world : getWorlds()) { unloadWorld(world, true); } storeQueue.end(); // Gracefully stop Netty group.close(); bootstrap.getFactory().releaseExternalResources(); // And finally kill the console consoleManager.stop(); }
private void actualClose(final Context closeContext, final Handler<Void> done) { if (id != null) { vertx.sharedNetServers().remove(id); } for (DefaultNetSocket sock : socketMap.values()) { sock.internalClose(); } // We need to reset it since sock.internalClose() above can call into the close handlers of // sockets on the same thread // which can cause context id for the thread to change! Context.setContext(closeContext); ChannelGroupFuture fut = serverChannelGroup.close(); if (done != null) { fut.addListener( new ChannelGroupFutureListener() { public void operationComplete(ChannelGroupFuture channelGroupFuture) throws Exception { executeCloseDone(closeContext, done); } }); } }
public void stop() { log.info("Shutting down proxy"); final ChannelGroupFuture future = allChannels.close(); future.awaitUninterruptibly(6 * 1000); serverBootstrap.releaseExternalResources(); log.info("Done shutting down proxy"); }
/** Stop the RPC service */ @LogMessageDocs({ @LogMessageDoc( level = "WARN", message = "Failed to cleanly shut down RPC server", explanation = "Could not close all open sockets cleanly"), @LogMessageDoc( level = "WARN", message = "Interrupted while shutting down RPC server", explanation = "Could not close all open sockets cleanly") }) public void shutdown() { shutDown = true; try { if (!cg.close().await(5, TimeUnit.SECONDS)) { logger.warn("Failed to cleanly shut down RPC server"); return; } if (clientBootstrap != null) clientBootstrap.releaseExternalResources(); clientBootstrap = null; if (serverBootstrap != null) serverBootstrap.releaseExternalResources(); serverBootstrap = null; if (pipelineFactory != null) pipelineFactory.releaseExternalResources(); pipelineFactory = null; if (bossExecutor != null) bossExecutor.shutdown(); bossExecutor = null; if (workerExecutor != null) workerExecutor.shutdown(); workerExecutor = null; } catch (InterruptedException e) { logger.warn("Interrupted while shutting down RPC server"); } logger.debug("Internal floodlight RPC shut down"); }
public void stop() { log.info("Shutting down proxy"); if (stopped.get()) { log.info("Already stopped"); return; } stopped.set(true); log.info("Closing all channels..."); // See http://static.netty.io/3.5/guide/#start.12 final ChannelGroupFuture future = allChannels.close(); future.awaitUninterruptibly(10 * 1000); if (!future.isCompleteSuccess()) { final Iterator<ChannelFuture> iter = future.iterator(); while (iter.hasNext()) { final ChannelFuture cf = iter.next(); if (!cf.isSuccess()) { log.warn("Cause of failure for {} is {}", cf.getChannel(), cf.getCause()); } } } log.info("Stopping timer"); timer.stop(); serverChannelFactory.releaseExternalResources(); clientChannelFactory.releaseExternalResources(); log.info("Done shutting down proxy"); }
/** close all channels, and release resources */ public synchronized void close() { if (allChannels != null) { allChannels.close().awaitUninterruptibly(); factory.releaseExternalResources(); allChannels = null; } }
/** Stop the network manager, closing all connections and cleaning up all resources. */ public void stop() { started.set(false); logger.info("Network manager stopping!"); ChannelGroupFuture future = allChannels.close(); future.awaitUninterruptibly(); channelFactory.releaseExternalResources(); }
@Override public void dispose() { try { openChannels.close().awaitUninterruptibly(); } finally { channelFactory.releaseExternalResources(); } LOG.info("web server stopped"); }
/** * Shutdown this client and close all open connections. The client should be discarded after * calling shutdown. */ public void shutdown() { for (Channel c : channels) { ChannelPipeline pipeline = c.getPipeline(); RedisAsyncConnection<?, ?> connection = pipeline.get(RedisAsyncConnection.class); connection.close(); } ChannelGroupFuture future = channels.close(); future.awaitUninterruptibly(); bootstrap.releaseExternalResources(); }
public void stop() { try { myScheduler.shutdown(); myBuildsExecutor.shutdown(); final ChannelGroupFuture closeFuture = myAllOpenChannels.close(); closeFuture.awaitUninterruptibly(); } finally { myChannelFactory.releaseExternalResources(); } }
/** Stop the Server */ public void stop() { try { handler.destroy(); final ChannelGroupFuture future = ALL_CHANNELS.close(); future.awaitUninterruptibly(); bootstrap.getFactory().releaseExternalResources(); ALL_CHANNELS.clear(); } finally { started.set(false); } }
public void close() throws IOException { ChannelGroupFuture f = group.close().awaitUninterruptibly(); if (!f.isCompleteSuccess()) { for (ChannelFuture future : f) { if (!future.isSuccess()) { throw new IOException(future.getCause()); } } } bootstrap.releaseExternalResources(); }
public static void closeChannels(ChannelGroup allChannels) { if (allChannels.size() > 0) { // TODO : allow an option here to control if we need to drain connections and wait instead of // killing them all try { log.info("Closing %s open client connections", allChannels.size()); if (!allChannels.close().await(5, TimeUnit.SECONDS)) { log.warn("Failed to close all open client connections"); } } catch (InterruptedException e) { log.warn("Interrupted while closing client connections"); Thread.currentThread().interrupt(); } } }
@Override public synchronized void stop() { try { accepted.close().awaitUninterruptibly(10, TimeUnit.SECONDS); ServerBootstrap bootstrap = new ServerBootstrap(selector); bootstrap.releaseExternalResources(); pipelineFact.destroy(); localFS.close(); } catch (Throwable t) { LOG.error(t); } finally { super.stop(); } }
@Override public void stop() throws Throwable { // Close all open connections shuttingDown = true; targetCallExecutor.shutdown(); targetCallExecutor.awaitTermination(10, TimeUnit.SECONDS); unfinishedTransactionExecutor.shutdown(); unfinishedTransactionExecutor.awaitTermination(10, TimeUnit.SECONDS); silentChannelExecutor.shutdown(); silentChannelExecutor.awaitTermination(10, TimeUnit.SECONDS); channelGroup.close().awaitUninterruptibly(); bootstrap.releaseExternalResources(); }
public void stop() { try { for (ChannelFutureListener listener : closingListeners) { try { listener.operationComplete(null); } catch (Exception e) { LOG.error(e); } } } finally { try { openChannels.close().awaitUninterruptibly(); } finally { channelFactory.releaseExternalResources(); } } }
// IDEA-91436 idea <121 binds to 127.0.0.1, but >=121 must be available not only from localhost // but if we bind only to any local port (0.0.0.0), instance of idea <121 can bind to our ports // and any request to us will be intercepted // so, we bind to 127.0.0.1 and 0.0.0.0 private int bind(int firstPort, int portsCount, boolean tryAnyPort, ServerBootstrap bootstrap) { InetAddress localAddress; try { localAddress = InetAddress.getByName("127.0.0.1"); } catch (UnknownHostException e) { LOG.error(e); return -1; } for (int i = 0; i < portsCount; i++) { int port = firstPort + i; try { openChannels.add(bootstrap.bind(new InetSocketAddress(localAddress, port))); return port; } catch (ChannelException e) { if (!openChannels.isEmpty()) { openChannels.close(); openChannels.clear(); } if (portsCount == 1) { throw e; } else if (!tryAnyPort && i == (portsCount - 1)) { LOG.error(e); } } } if (tryAnyPort) { LOG.info("We cannot bind to our default range, so, try to bind to any free port"); try { Channel channel = bootstrap.bind(new InetSocketAddress(localAddress, 0)); openChannels.add(channel); return ((InetSocketAddress) channel.getLocalAddress()).getPort(); } catch (ChannelException e) { LOG.error(e); } } return -1; }
@Override public void run() { Configuration conf = OmidConfiguration.create(); // *** Start the Netty configuration *** // Start server with Nb of active threads = 2*NB CPU + 1 as maximum. // int maxSocketThreads = conf.getInt("tso.maxsocketthread", // (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2); // more worder threads has an inverse impact on performance, unless the one is saturated int maxSocketThreads = conf.getInt("tso.maxsocketthread", 1); ChannelFactory factory = new NioServerSocketChannelFactory( Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), maxSocketThreads); ServerBootstrap bootstrap = new ServerBootstrap(factory); // Create the global ChannelGroup ChannelGroup channelGroup = new DefaultChannelGroup(TSOServer.class.getName()); // threads max // int maxThreads = Runtime.getRuntime().availableProcessors() *2 + 1; // More concurrency gives lower performance due to synchronizations int maxThreads = conf.getInt("tso.maxthread", 4); System.out.println("maxThreads: " + maxThreads); // int maxThreads = 5; // Memory limitation: 1MB by channel, 1GB global, 100 ms of timeout ThreadPoolExecutor pipelineExecutor = new OrderedMemoryAwareThreadPoolExecutor( maxThreads, 1048576, 1073741824, 100, TimeUnit.MILLISECONDS, Executors.defaultThreadFactory()); // This is the only object of timestamp oracle // TODO: make it singleton // TimestampOracle timestampOracle = new TimestampOracle(); // The wrapper for the shared state of TSO state = BookKeeperStateBuilder.getState(this.config); if (state == null) { LOG.error("Couldn't build state"); return; } TSOState.BATCH_SIZE = config.getBatchSize(); System.out.println("PARAM MAX_ITEMS: " + TSOState.MAX_ITEMS); System.out.println("PARAM BATCH_SIZE: " + TSOState.BATCH_SIZE); System.out.println("PARAM LOAD_FACTOR: " + TSOState.LOAD_FACTOR); System.out.println("PARAM MAX_THREADS: " + maxThreads); final TSOHandler handler = new TSOHandler(channelGroup, state); bootstrap.setPipelineFactory(new TSOPipelineFactory(pipelineExecutor, handler)); bootstrap.setOption("tcpNoDelay", false); bootstrap.setOption("child.tcpNoDelay", false); bootstrap.setOption("child.keepAlive", true); bootstrap.setOption("child.reuseAddress", true); bootstrap.setOption("child.connectTimeoutMillis", 60000); bootstrap.setOption("readWriteFair", true); // *** Start the Netty running *** // Create the monitor ThroughputMonitor monitor = new ThroughputMonitor(state); // Add the parent channel to the group Channel channel = bootstrap.bind(new InetSocketAddress(config.getPort())); channelGroup.add(channel); // Compacter handler ChannelFactory comFactory = new NioServerSocketChannelFactory( Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2); ServerBootstrap comBootstrap = new ServerBootstrap(comFactory); ChannelGroup comGroup = new DefaultChannelGroup("compacter"); final CompacterHandler comHandler = new CompacterHandler(comGroup, state); comBootstrap.setPipelineFactory( new ChannelPipelineFactory() { @Override public ChannelPipeline getPipeline() throws Exception { ChannelPipeline pipeline = Channels.pipeline(); pipeline.addLast("decoder", new ObjectDecoder()); pipeline.addLast("encoder", new ObjectEncoder()); pipeline.addLast("handler", comHandler); return pipeline; } }); comBootstrap.setOption("tcpNoDelay", false); comBootstrap.setOption("child.tcpNoDelay", false); comBootstrap.setOption("child.keepAlive", true); comBootstrap.setOption("child.reuseAddress", true); comBootstrap.setOption("child.connectTimeoutMillis", 100); comBootstrap.setOption("readWriteFair", true); channel = comBootstrap.bind(new InetSocketAddress(config.getPort() + 1)); // Starts the monitor monitor.start(); synchronized (lock) { while (!finish) { try { lock.wait(); } catch (InterruptedException e) { break; } } } // timestampOracle.stop(); handler.stop(); comHandler.stop(); state.stop(); // *** Start the Netty shutdown *** // End the monitor System.out.println("End of monitor"); monitor.interrupt(); // Now close all channels System.out.println("End of channel group"); channelGroup.close().awaitUninterruptibly(); comGroup.close().awaitUninterruptibly(); // Close the executor for Pipeline System.out.println("End of pipeline executor"); pipelineExecutor.shutdownNow(); // Now release resources System.out.println("End of resources"); factory.releaseExternalResources(); comFactory.releaseExternalResources(); }
// IDEA-91436 idea <121 binds to 127.0.0.1, but >=121 must be available not only from localhost // but if we bind only to any local port (0.0.0.0), instance of idea <121 can bind to our ports // and any request to us will be intercepted // so, we bind to 127.0.0.1 and 0.0.0.0 private int bind(int firstPort, int portsCount, boolean tryAnyPort, ServerBootstrap bootstrap) { String property = System.getProperty(PROPERTY_ONLY_ANY_HOST); boolean onlyAnyHost = property == null ? (SystemInfo.isLinux || SystemInfo.isWindows && !SystemInfo.isWinVistaOrNewer) : (property.isEmpty() || Boolean.valueOf(property)); boolean portChecked = false; for (int i = 0; i < portsCount; i++) { int port = firstPort + i; ChannelException channelException = null; try { openChannels.add(bootstrap.bind(new InetSocketAddress(port))); if (!onlyAnyHost) { InetSocketAddress localAddress = null; try { localAddress = new InetSocketAddress(InetAddress.getByName("127.0.0.1"), port); openChannels.add(bootstrap.bind(localAddress)); } catch (UnknownHostException ignored) { return port; } catch (ChannelException e) { channelException = e; if (!portChecked) { portChecked = true; assert localAddress != null; if (checkPortSafe(localAddress)) { return port; } } } } } catch (ChannelException e) { channelException = e; } if (channelException == null) { return port; } else { if (!openChannels.isEmpty()) { openChannels.close(); openChannels.clear(); } if (portsCount == 1) { throw channelException; } else if (!tryAnyPort && i == (portsCount - 1)) { LOG.error(channelException); } } } if (tryAnyPort) { LOG.info("We cannot bind to our default range, so, try to bind to any free port"); try { Channel channel = bootstrap.bind(new InetSocketAddress(0)); openChannels.add(channel); return ((InetSocketAddress) channel.getLocalAddress()).getPort(); } catch (ChannelException e) { LOG.error(e); } } return -1; }
/** * Releases all resources associated with this server so the JVM can shutdown cleanly. Call this * method to finish using the server. To utilize the default shutdown hook in main() provided by * RestExpress, call awaitShutdown() instead. */ public void shutdown() { ChannelGroupFuture future = allChannels.close(); future.awaitUninterruptibly(); shutdownPlugins(); bootstrap.getFactory().releaseExternalResources(); }
public void shutdown() { channelGroup.close().awaitUninterruptibly(); bootstrap.releaseExternalResources(); }
public void stopListening() { final ChannelGroupFuture closeFuture = myAllOpenChannels.close(); closeFuture.awaitUninterruptibly(); }