public void connection(String host, int port) throws Exception { // 定义主线程组,处理轮询 NioEventLoopGroup bossGroup = new NioEventLoopGroup(); try { // 这是客户端的启动类 Bootstrap bs = new Bootstrap(); // 设置线程组 bs.group(bossGroup) // 设置option参数 .option(ChannelOption.TCP_NODELAY, true) // 设置客户端SocketChannel .channel(NioSocketChannel.class) // 设置客户端处理器 .handler( new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(new TimeClientHandler()); } }); // 连接到服务器端 ChannelFuture future = bs.connect(host, port).sync(); future.channel().closeFuture().sync(); } finally { // 优雅退出 bossGroup.shutdownGracefully(); } }
public void run() { // Configure the client. final ThreadFactory connectFactory = new UtilThreadFactory("connect"); final NioEventLoopGroup connectGroup = new NioEventLoopGroup(1, connectFactory, NioUdtProvider.BYTE_PROVIDER); try { final Bootstrap boot = new Bootstrap(); boot.group(connectGroup) .channelFactory(NioUdtProvider.BYTE_CONNECTOR) .handler( new ChannelInitializer() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline() .addLast(new LoggingHandler(LogLevel.INFO), new ModemClientHandler()); } }); // Start the client. final ChannelFuture f = boot.connect(host, port).sync(); f.channel().closeFuture().sync(); } catch (Exception e) { e.printStackTrace(); } finally { // Shut down the event loop to terminate all threads. connectGroup.shutdownGracefully(); } }
@Override public void start() throws Exception { NioEventLoopGroup bossGroup = new NioEventLoopGroup(); NioEventLoopGroup workerGroup = new NioEventLoopGroup(); try { ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap .group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .handler( new ChannelInitializer<ServerSocketChannel>() { @Override protected void initChannel(ServerSocketChannel ch) throws Exception { ch.pipeline().addLast("log", new LoggingHandler(LogLevel.INFO)); ch.pipeline() .addLast( "redis", Tangerine.getInstance().getBean(NotifyServiceHandler.class)); } }) .childHandler(new ServerChannelInitializer()) .option(ChannelOption.SO_BACKLOG, 1024); channel = bootstrap.bind(host, port).sync().channel(); channel.closeFuture().sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
public static void main(String[] args) throws Exception { // Address to bind on / connect to. // final LocalAddress addr = new LocalAddress(PORT); final InetAddress HOST = InetAddress.getByName("192.168.220.128"); // final InetSocketAddress addr = InetSocketAddress.createUnresolved("192.168.220.128", // Integer.parseInt(PORT)); NioEventLoopGroup clientGroup = new NioEventLoopGroup(); // NIO event loops are also OK final SslContext sslCtx = null; try { Bootstrap b = new Bootstrap(); b.group(clientGroup) .channel(NioSocketChannel.class) .option(ChannelOption.TCP_NODELAY, true) .handler( new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); // p.addLast(new LoggingHandler(LogLevel.INFO)); p.addLast(new LocalEchoClientHandler()); } }); // Start the client. ChannelFuture f = b.connect(HOST, PORT).sync(); ByteBuf subsequentMessage = null; byte[] bytes = null; Channel ch = b.connect(HOST, PORT).channel(); String line = null; System.out.println("Feel free to chat here"); while (true) { line = new BufferedReader(new InputStreamReader(System.in)).readLine(); if (line.equalsIgnoreCase("quit") || !ch.isActive()) { System.out.println("Prepare to close the connection."); break; } subsequentMessage = Unpooled.buffer(); bytes = line.getBytes(); subsequentMessage.writeBytes(bytes); ch.writeAndFlush(subsequentMessage); } // Wait until the connection is closed. f.channel().closeFuture().sync(); } finally { clientGroup.shutdownGracefully(); System.out.println("Client closed"); } }
@Override public EventLoopGroup create() { // Use the executor based constructor so we can work with both Netty4 & Netty5. ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat(name + "-%d").build(); int parallelism = numEventLoops == 0 ? Runtime.getRuntime().availableProcessors() * 2 : numEventLoops; final ExecutorService executor = Executors.newFixedThreadPool(parallelism, threadFactory); NioEventLoopGroup nioEventLoopGroup = new NioEventLoopGroup(parallelism, executor); nioEventLoopGroup .terminationFuture() .addListener( new GenericFutureListener<Future<Object>>() { @Override public void operationComplete(Future<Object> future) throws Exception { executor.shutdown(); } }); return nioEventLoopGroup; }
private void finishShutdown() { if (!_finishingShutdown.compareAndSet(false, true)) { return; } if (_shutdownTimeoutTask != null) { _shutdownTimeoutTask.cancel(false); } if (_shutdownFactory) { LOG.info("Shutdown Netty Event Loop"); _eventLoopGroup.shutdownGracefully(0, 0, TimeUnit.SECONDS); } if (_shutdownExecutor) { // Due to a bug in ScheduledThreadPoolExecutor, shutdownNow() returns cancelled // tasks as though they were still pending execution. If the executor has a large // number of cancelled tasks, shutdownNow() could take a long time to copy the array // of tasks. Calling shutdown() first will purge the cancelled tasks. Bug filed with // Oracle; will provide bug number when available. May be fixed in JDK7 already. _executor.shutdown(); _executor.shutdownNow(); LOG.info("Scheduler shutdown complete"); } if (_shutdownCallbackExecutor) { LOG.info("Shutdown callback executor"); _callbackExecutorGroup.shutdown(); _callbackExecutorGroup.shutdownNow(); } final Callback<None> callback; synchronized (_mutex) { callback = _factoryShutdownCallback; } LOG.info("Shutdown complete"); callback.onSuccess(None.none()); }