protected void setProtocol(final Channel channel, final ByteBuf input, ProtocolVersion version) throws Exception { if (protocolSet) { return; } protocolSet = true; ProtocolStorage.setProtocolVersion(Utils.getNetworkManagerSocketAddress(channel), version); channel.pipeline().remove(ChannelHandlers.INITIAL_DECODER); pipelineBuilders.get(version).buildPipeLine(channel, version); input.readerIndex(0); channel.pipeline().firstContext().fireChannelRead(input); }
private PacketInterceptor injectChannelInternal(Channel channel) { try { PacketInterceptor interceptor = (PacketInterceptor) channel.pipeline().get(handlerName); if (interceptor == null) { interceptor = new PacketInterceptor(); channel.pipeline().addBefore("packet_handler", handlerName, interceptor); uninjectedChannels.remove(channel); } return interceptor; } catch (IllegalArgumentException e) { return (PacketInterceptor) channel.pipeline().get(handlerName); } }
public void close() { if (closed) { return; } final SslHandler sslHandler = (SslHandler) channel.pipeline().get("ssl"); EventLoop eventLoop = channel.eventLoop(); boolean inEventLoop = eventLoop.inEventLoop(); // if we are in an event loop we need to close the channel after the writes have finished if (!inEventLoop) { closeSSLAndChannel(sslHandler, channel); } else { eventLoop.execute( new Runnable() { @Override public void run() { closeSSLAndChannel(sslHandler, channel); } }); } closed = true; listener.connectionDestroyed(getID()); }
public void start(ServerTransportListener listener) { Preconditions.checkState(this.listener == null, "Handler already registered"); this.listener = listener; // Create the Netty handler for the pipeline. final NettyServerHandler grpcHandler = createHandler(listener); // Notify when the channel closes. channel .closeFuture() .addListener( new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { notifyTerminated(grpcHandler.connectionError()); } }); ChannelHandler handler = grpcHandler; if (sslContext != null) { SSLEngine sslEngine = sslContext.newEngine(channel.alloc()); handler = ProtocolNegotiators.serverTls(sslEngine, grpcHandler); } channel.pipeline().addLast(handler); }
@Test public void testPerformOpeningHandshake() { Channel channelMock = EasyMock.createMock(Channel.class); DefaultChannelPipeline pipeline = createPipeline(channelMock); EasyMock.expect(channelMock.pipeline()).andReturn(pipeline); // capture the http response in order to verify the headers Capture<HttpResponse> res = new Capture<HttpResponse>(); EasyMock.expect(channelMock.write(capture(res))) .andReturn(new DefaultChannelFuture(channelMock, true)); replay(channelMock); HttpRequest req = new DefaultHttpRequest(HTTP_1_1, HttpMethod.GET, "/chat"); req.setHeader(Names.HOST, "server.example.com"); req.setHeader(Names.UPGRADE, WEBSOCKET.toLowerCase()); req.setHeader(Names.CONNECTION, "Upgrade"); req.setHeader(Names.SEC_WEBSOCKET_KEY, "dGhlIHNhbXBsZSBub25jZQ=="); req.setHeader(Names.SEC_WEBSOCKET_ORIGIN, "http://example.com"); req.setHeader(Names.SEC_WEBSOCKET_PROTOCOL, "chat, superchat"); req.setHeader(Names.SEC_WEBSOCKET_VERSION, "13"); WebSocketServerHandshaker13 handsaker = new WebSocketServerHandshaker13("ws://example.com/chat", "chat", false, Integer.MAX_VALUE); handsaker.handshake(channelMock, req); Assert.assertEquals( "s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", res.getValue().getHeader(Names.SEC_WEBSOCKET_ACCEPT)); Assert.assertEquals("chat", res.getValue().getHeader(Names.SEC_WEBSOCKET_PROTOCOL)); }
@Override public void enableChannel(NetConnectionType connectionType, Channel ch) throws Exception { if (null == connectionType) return; if (null == ch) return; ChannelPipeline pipeline = ch.pipeline(); switch (connectionType) { case NODE_IN_AGENT_CHAT: case NODE_IN_AGENT_LOGINSERVER: case NODE_IN_AGENT_NPC: case NODE_IN_AGENT_SCENE: logger.info("{} add agent codec", ch); // pipeline.addBefore(DEFAULT_DECODER_LENGTH, AGENT_PACKET_DECODER_LENGTH, new // LengthFieldBasedFrameDecoder(2048,0, 4/** ,0,4*/)); pipeline.addBefore( DEFAULT_DECODER_LENGTH, AGENT_PACKET_DECODER, new AgentNettyDecoder(netServiceHandler)); pipeline.addBefore( DEFAULT_ENCODER_LENGTH_APPENDER, AGENT_PACKET_ENCODER, new AgentNettyEncoder()); // pipeline.addBefore(DEFAULT_ENCODER_LENGTH_APPENDER, AGENT_PACKET_ENCODER_LENGTH_APPENDER, // new LengthFieldPrepender(4)); break; default: break; } }
private boolean handleConnectOKAndExit( int statusCode, Realm realm, final Request request, HttpRequest httpRequest, HttpResponse response, final NettyResponseFuture<?> future, ProxyServer proxyServer, final Channel channel) throws IOException { if (statusCode == OK.code() && httpRequest.getMethod() == HttpMethod.CONNECT) { LOGGER.debug("Connected to {}:{}", proxyServer.getHost(), proxyServer.getPort()); if (future.isKeepAlive()) { future.attachChannel(channel, true); } try { LOGGER.debug("Connecting to proxy {} for scheme {}", proxyServer, request.getUrl()); channels.upgradeProtocol(channel.pipeline(), request.getURI().getScheme()); } catch (Throwable ex) { channels.abort(future, ex); } future.setReuseChannel(true); future.setConnectAllowed(false); requestSender.sendNextRequest(new RequestBuilder(future.getRequest()).build(), future); return true; } return false; }
/** * 채널 파이프라인 설정. Netty.Server.Configuration.NettyServerConfiguration 에서 등록한 Bean 을 이용해 사용자의 통신을 처리할 * Handler 도 등록. Netty.Server.Handler.JsonHandler 에서 실제 사용자 요청 처리. * * @param channel * @throws Exception */ @Override protected void initChannel(Channel channel) throws Exception { ChannelPipeline channelPipeline = channel.pipeline(); switch (transferType) { case "websocket": channelPipeline .addLast(new HttpServerCodec()) .addLast(new HttpObjectAggregator(65536)) .addLast(new WebSocketServerCompressionHandler()) .addLast( new WebSocketServerProtocolHandler( transferWebsocketPath, transferWebsocketSubProtocol, transferWebsocketAllowExtensions)) .addLast(new LoggingHandler(LogLevel.valueOf(logLevelPipeline))) .addLast(websocketHandler); case "tcp": default: channelPipeline .addLast(new LineBasedFrameDecoder(Integer.MAX_VALUE)) .addLast(STRING_DECODER) .addLast(STRING_ENCODER) .addLast(new LoggingHandler(LogLevel.valueOf(logLevelPipeline))) .addLast(jsonHandler); } }
// Synchronous HTTP action @Override public String httpActionSync( String uri, String method, List<HttpParam> parametersQuery, List<HttpParam> parametersForm, List<HttpResponse> errors) throws RestException { Channel ch; try { HttpRequest request = buildRequest(uri, method, parametersQuery, parametersForm); // handler.reset(); ch = bootStrap.connect(baseUri.getHost(), baseUri.getPort()).sync().channel(); NettyHttpClientHandler handler = (NettyHttpClientHandler) ch.pipeline().get("http-handler"); ch.writeAndFlush(request); ch.closeFuture().sync(); if (httpResponseOkay(handler.getResponseStatus())) { return handler.getResponseText(); } else { throw makeException(handler.getResponseStatus(), handler.getResponseText(), errors); } } catch (InterruptedException e) { throw new RestException(e); } }
public OutboundConnectionQueue( Channel channel, RemoteReceiver receiver, NetworkConnectionManager connectionManager, int closeAfterIdleForMs) { this.channel = channel; this.receiver = receiver; this.connectionManager = connectionManager; channel.pipeline().addFirst("Outbound Connection Queue", this); channel .pipeline() .addFirst( "Idle State Handler", new IdleStateHandler(0, 0, closeAfterIdleForMs, TimeUnit.MILLISECONDS)); }
/** * Adds channel handlers that perform encryption / decryption of data using SASL. * * @param channel The channel. * @param backend The SASL backend. * @param maxOutboundBlockSize Max size in bytes of outgoing encrypted blocks, to control memory * usage. */ static void addToChannel( Channel channel, SaslEncryptionBackend backend, int maxOutboundBlockSize) { channel .pipeline() .addFirst(ENCRYPTION_HANDLER_NAME, new EncryptionHandler(backend, maxOutboundBlockSize)) .addFirst("saslDecryption", new DecryptionHandler(backend)) .addFirst("saslFrameDecoder", NettyUtils.createFrameDecoder()); }
static SessionProtocol protocol(Channel ch) { final HttpSessionHandler sessionHandler = ch.pipeline().get(HttpSessionHandler.class); if (sessionHandler == null || !sessionHandler.active) { return null; } else { return sessionHandler.sessionProtocol; } }
static void deactivate(Channel ch) { final HttpSessionHandler sessionHandler = ch.pipeline().get(HttpSessionHandler.class); if (sessionHandler == null) { // Protocol has not been determined yet. } else { sessionHandler.deactivateSession(); } }
public static ChannelPipeline getPipeLineOfConnection(NettyTCPMessageSender messageSender) { if (null != messageSender) { Channel channel = messageSender.getChannel(); ChannelPipeline pipeline = channel.pipeline(); return pipeline; } return null; }
public static void uninject(Player p) { try { Object craftOnline = ReflectionUtil.getBukkitClass("entity.CraftPlayer").cast(p); Object craftHandle = ReflectionUtil.invokeMethod(craftOnline.getClass(), craftOnline, "getHandle"); Object playerCon = ReflectionUtil.getField(craftHandle.getClass(), "playerConnection").get(craftHandle); Object manager = ReflectionUtil.getField(playerCon.getClass(), "networkManager").get(playerCon); Channel channel = (Channel) ReflectionUtil.getField(manager.getClass(), "channel").get(manager); if (channel.pipeline().context("PacketListener") != null) channel.pipeline().remove("PacketListener"); } catch (Exception e) { e.printStackTrace(); } }
/** * Creates an instance of {@link NettyPacketReader}. If this is used to read a block remotely, it * requires the block to be locked beforehand and the lock ID is passed to this class. * * @param context the file system context * @param address the netty data server network address * @param id the block ID or UFS file ID * @param offset the offset * @param len the length to read * @param lockId the lock ID * @param sessionId the session ID * @param type the request type (block or UFS file) * @throws IOException if it fails to acquire a netty channel */ private NettyPacketReader( FileSystemContext context, InetSocketAddress address, long id, long offset, long len, long lockId, long sessionId, Protocol.RequestType type) throws IOException { Preconditions.checkArgument(offset >= 0 && len > 0); mContext = context; mAddress = address; mId = id; mStart = offset; mPosToRead = offset; mBytesToRead = len; mRequestType = type; mChannel = context.acquireNettyChannel(address); ChannelPipeline pipeline = mChannel.pipeline(); if (!(pipeline.last() instanceof RPCMessageDecoder)) { throw new RuntimeException( String.format( "Channel pipeline has unexpected handlers %s.", pipeline.last().getClass().getCanonicalName())); } mChannel.pipeline().addLast(new PacketReadHandler()); Protocol.ReadRequest readRequest = Protocol.ReadRequest.newBuilder() .setId(id) .setOffset(offset) .setLength(len) .setLockId(lockId) .setSessionId(sessionId) .setType(type) .build(); mChannel .writeAndFlush(new RPCProtoMessage(readRequest)) .addListener(ChannelFutureListener.CLOSE_ON_FAILURE); }
@Override protected void initChannel(Channel ch) throws Exception { ChannelPipeline pipeline = ch.pipeline(); pipeline.addLast(new ClientStateHandler(m_descriptor.getName())); for (Map.Entry<String, ChannelHandler> e : m_descriptor.getHandlers().entrySet()) { pipeline.addLast(e.getKey(), e.getValue()); } }
static boolean isActive(Channel ch) { final boolean active; if (!ch.isActive()) { active = false; } else { final HttpSessionHandler sessionHandler = ch.pipeline().get(HttpSessionHandler.class); active = sessionHandler != null ? sessionHandler.active : false; } return active; }
@Override protected void initChannel(Channel channel) throws Exception { ChannelPipeline pipeline = channel.pipeline(); int maxChunkSize = conf.getIntVar(TajoConf.ConfVars.SHUFFLE_FETCHER_CHUNK_MAX_SIZE); int readTimeout = conf.getIntVar(TajoConf.ConfVars.SHUFFLE_FETCHER_READ_TIMEOUT); pipeline.addLast("codec", new HttpClientCodec(4096, 8192, maxChunkSize)); pipeline.addLast("inflater", new HttpContentDecompressor()); pipeline.addLast("timeout", new ReadTimeoutHandler(readTimeout, TimeUnit.SECONDS)); pipeline.addLast("handler", new HttpClientHandler(file)); }
public void connect() throws Exception { /* mySocket = new Socket(this.host,this.port); os = mySocket.getOutputStream(); */ group = new NioEventLoopGroup(); b = new Bootstrap(); b.group(group).channel(NioSocketChannel.class).handler(new ClientInitializer()); ch = b.connect(host, port).sync().channel(); handler = ch.pipeline().get(ClientHandler.class); isServerAvaialable = true; }
public void a(int var1) { if (var1 >= 0) { if (k.pipeline().get("decompress") instanceof class_ei) { ((class_ei) k.pipeline().get("decompress")).a(var1); } else { k.pipeline().addBefore("decoder", "decompress", new class_ei(var1)); } if (k.pipeline().get("compress") instanceof class_ej) { ((class_ej) k.pipeline().get("decompress")).a(var1); } else { k.pipeline().addBefore("encoder", "compress", new class_ej(var1)); } } else { if (k.pipeline().get("decompress") instanceof class_ei) { k.pipeline().remove("decompress"); } if (k.pipeline().get("compress") instanceof class_ej) { k.pipeline().remove("compress"); } } }
@Test public void testLocalAddressReuse() throws Exception { for (int i = 0; i < 2; i++) { LocalAddress addr = new LocalAddress(LOCAL_ADDR_ID); Bootstrap cb = new Bootstrap(); ServerBootstrap sb = new ServerBootstrap(); cb.eventLoop(new LocalEventLoop()) .channel(new LocalChannel()) .remoteAddress(addr) .handler(new TestHandler()); sb.eventLoop(new LocalEventLoop(), new LocalEventLoop()) .channel(new LocalServerChannel()) .localAddress(addr) .childHandler( new ChannelInitializer<LocalChannel>() { @Override public void initChannel(LocalChannel ch) throws Exception { ch.pipeline().addLast(new TestHandler()); } }); // Start server Channel sc = sb.bind().sync().channel(); // Connect to the server Channel cc = cb.connect().sync().channel(); // Send a message event up the pipeline. cc.pipeline().inboundMessageBuffer().add("Hello, World"); cc.pipeline().fireInboundBufferUpdated(); // Close the channel cc.close().sync(); sb.shutdown(); cb.shutdown(); sc.closeFuture().sync(); Assert.assertTrue( String.format( "Expected null, got channel '%s' for local address '%s'", LocalChannelRegistry.get(addr), addr), LocalChannelRegistry.get(addr) == null); } }
public synchronized void addChannel(Channel ch) { // 标识连接已建立 ch.attr(GlobalConstance.attributeKey).set(SessionState.Connect); getChannels().add(ch); int cnt = incrementConn(); // 如果是CMPP端口 if (getEndpointEntity() instanceof CMPPEndpointEntity) { // 创建持久化Map用于存储发送的message Map<Long, Message> storedMap = BDBStoredMapFactoryImpl.INS.buildMap( getEndpointEntity().getId(), "Session_" + getEndpointEntity().getId()); Map<Long, Message> preSendMap = new HashMap<Long, Message>(); logger.debug( "Channel added To Endpoint {} .totalCnt:{} ,Channel.ID: {}", endpoint, cnt, ch.id()); if (cnt == 1) { // 如果是第一个连接。要把上次发送失败的消息取出,再次发送一次 if (storedMap != null && storedMap.size() > 0) { for (Map.Entry<Long, Message> entry : storedMap.entrySet()) { preSendMap.put(entry.getKey(), entry.getValue()); } } } CMPPEndpointEntity cmppentity = (CMPPEndpointEntity) getEndpointEntity(); // 将SessinManager放在messageHeaderCodec后边。因为要处理Submit 和 deliver消息的长短信分拆 ch.pipeline() .addBefore( CMPPCodecChannelInitializer.codecName, "sessionStateManager", new SessionStateManager(cmppentity, storedMap, preSendMap)); // 加载业务handler bindHandler(ch.pipeline(), cmppentity); } }
@Test public void nonHttp2ExceptionInPipelineShouldNotCloseConnection() throws Exception { bootstrapEnv(1, 1, 1, 1); // Create a latch to track when the close occurs. final CountDownLatch closeLatch = new CountDownLatch(1); clientChannel .closeFuture() .addListener( new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { closeLatch.countDown(); } }); // Create a single stream by sending a HEADERS frame to the server. final Http2Headers headers = dummyHeaders(); runInChannel( clientChannel, new Http2Runnable() { @Override public void run() { http2Client .encoder() .writeHeaders(ctx(), 3, headers, 0, (short) 16, false, 0, false, newPromise()); } }); // Wait for the server to create the stream. assertTrue(serverSettingsAckLatch.await(5, SECONDS)); assertTrue(requestLatch.await(5, SECONDS)); // Add a handler that will immediately throw an exception. clientChannel .pipeline() .addFirst( new ChannelHandlerAdapter() { @Override public void handlerAdded(ChannelHandlerContext ctx) throws Exception { throw new RuntimeException("Fake Exception"); } }); // The close should NOT occur. assertFalse(closeLatch.await(5, SECONDS)); assertTrue(clientChannel.isOpen()); }
@Before public void before() throws Exception { when(context.channel()).thenReturn(channel); when(channel.pipeline()).thenReturn(pipeline); when(channel.eventLoop()).thenReturn(eventLoop); when(eventLoop.submit(any(Runnable.class))) .thenAnswer( new Answer<Future<?>>() { @Override public Future<?> answer(InvocationOnMock invocation) throws Throwable { Runnable r = (Runnable) invocation.getArguments()[0]; r.run(); return null; } }); }
private boolean exitAfterHandlingReactiveStreams( Channel channel, NettyResponseFuture<?> future, HttpResponse response, AsyncHandler<?> handler, HttpRequest httpRequest) throws IOException { if (handler instanceof StreamedAsyncHandler) { StreamedAsyncHandler<?> streamedAsyncHandler = (StreamedAsyncHandler<?>) handler; StreamedResponsePublisher publisher = new StreamedResponsePublisher(channel.eventLoop(), channelManager, future, channel); // FIXME do we really need to pass the event loop? // FIXME move this to ChannelManager channel.pipeline().addLast(channel.eventLoop(), "streamedAsyncHandler", publisher); Channels.setAttribute(channel, publisher); return streamedAsyncHandler.onStream(publisher) != State.CONTINUE; } return false; }
private void bootstrapEnv( int dataCountDown, int settingsAckCount, int requestCountDown, int trailersCountDown) throws Exception { requestLatch = new CountDownLatch(requestCountDown); serverSettingsAckLatch = new CountDownLatch(settingsAckCount); dataLatch = new CountDownLatch(dataCountDown); trailersLatch = new CountDownLatch(trailersCountDown); sb = new ServerBootstrap(); cb = new Bootstrap(); sb.group(new NioEventLoopGroup(), new NioEventLoopGroup()); sb.channel(NioServerSocketChannel.class); sb.childHandler( new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ChannelPipeline p = ch.pipeline(); serverFrameCountDown = new FrameCountDown( serverListener, serverSettingsAckLatch, requestLatch, dataLatch, trailersLatch); p.addLast(new Http2ConnectionHandler(true, serverFrameCountDown)); } }); cb.group(new NioEventLoopGroup()); cb.channel(NioSocketChannel.class); cb.handler( new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ChannelPipeline p = ch.pipeline(); p.addLast(new Http2ConnectionHandler(false, clientListener)); } }); serverChannel = sb.bind(new InetSocketAddress(0)).sync().channel(); int port = ((InetSocketAddress) serverChannel.localAddress()).getPort(); ChannelFuture ccf = cb.connect(new InetSocketAddress(NetUtil.LOCALHOST, port)); assertTrue(ccf.awaitUninterruptibly().isSuccess()); clientChannel = ccf.channel(); http2Client = clientChannel.pipeline().get(Http2ConnectionHandler.class); }
private void unregisterChannelHandler() { if (serverChannelHandler == null) return; for (Channel serverChannel : serverChannels) { final ChannelPipeline pipeline = serverChannel.pipeline(); serverChannel .eventLoop() .execute( new Runnable() { @Override public void run() { try { pipeline.remove(serverChannelHandler); } catch (NoSuchElementException e) { } } }); } }
private boolean exitAfterHandlingConnect( // final Channel channel, // final NettyResponseFuture<?> future, // final Request request, // ProxyServer proxyServer, // int statusCode, // HttpRequest httpRequest) throws IOException { if (future.isKeepAlive()) future.attachChannel(channel, true); Uri requestUri = request.getUri(); logger.debug("Connecting to proxy {} for scheme {}", proxyServer, requestUri.getScheme()); channelManager.upgradeProtocol(channel.pipeline(), requestUri); future.setReuseChannel(true); future.setConnectAllowed(false); requestSender.drainChannelAndExecuteNextRequest( channel, future, new RequestBuilder(future.getTargetRequest()).build()); return true; }
/** * Reconnect to the remote address that the closed channel was connected to. This creates a new * {@link ChannelPipeline} with the same handler instances contained in the old channel's * pipeline. * * @param timeout Timer task handle. * @throws Exception when reconnection fails. */ @Override public void run(Timeout timeout) throws Exception { ChannelPipeline old = channel.pipeline(); final CommandHandler<?, ?> handler = old.get(CommandHandler.class); final RedisAsyncConnection<?, ?> connection = old.get(RedisAsyncConnection.class); ChannelFuture connect = null; // TODO use better concurrent workaround synchronized (bootstrap) { connect = bootstrap .handler( new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast(this, handler, connection); } }) .connect(); } connect.sync(); }