private void fireException(Throwable cause) { // Only fire the exception if the channel is open and registered // if not the pipeline is not setup and so it would hit the tail // of the pipeline. // See https://github.com/netty/netty/issues/1517 if (fireException && channel.isRegistered()) { channel.pipeline().fireExceptionCaught(cause); } }
@Override protected void initChannel(Channel channel) throws Exception { ChannelPipeline pipeline = channel.pipeline(); int maxChunkSize = conf.getIntVar(TajoConf.ConfVars.SHUFFLE_FETCHER_CHUNK_MAX_SIZE); int readTimeout = conf.getIntVar(TajoConf.ConfVars.SHUFFLE_FETCHER_READ_TIMEOUT); pipeline.addLast("codec", new HttpClientCodec(4096, 8192, maxChunkSize)); pipeline.addLast("inflater", new HttpContentDecompressor()); pipeline.addLast("timeout", new ReadTimeoutHandler(readTimeout, TimeUnit.SECONDS)); pipeline.addLast("handler", new HttpClientHandler(file)); }
/** * Reconnect to the remote address that the closed channel was connected to. This creates a new * {@link ChannelPipeline} with the same handler instances contained in the old channel's * pipeline. * * @param timeout Timer task handle. * @throws Exception when reconnection fails. */ @Override public void run(Timeout timeout) throws Exception { ChannelPipeline old = channel.pipeline(); final CommandHandler<?, ?> handler = old.get(CommandHandler.class); final RedisAsyncConnection<?, ?> connection = old.get(RedisAsyncConnection.class); ChannelFuture connect = null; // TODO use better concurrent workaround synchronized (bootstrap) { connect = bootstrap .handler( new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast(this, handler, connection); } }) .connect(); } connect.sync(); }
protected void createSession( Long sessionId, Channel channel, SmppSessionConfiguration config, BaseBindResp preparedBindResponse) throws SmppProcessingException { // NOTE: exactly one PDU (bind request) was read from the channel, we // now need to delegate permitting this bind request by calling a method // further upstream. Only after the server-side is completely ready to // start processing requests from this session, do we want to actually // return the bind response and start reading further requests -- we'll // initially block reading from the channel first -- this will be turned // back on via the "serverReady()" method call on the session object // make sure the channel is not being read/processed (until we flag we're ready later on) channel.config().setAutoRead(false); // auto negotiate the interface version in use based on the requested interface version byte interfaceVersion = this.autoNegotiateInterfaceVersion(config.getInterfaceVersion()); // create a new server session associated with this server DefaultSmppSession session = new DefaultSmppSession( SmppSession.Type.SERVER, config, channel, this, sessionId, preparedBindResponse, interfaceVersion, monitorExecutor); // replace name of thread used for renaming SmppSessionThreadRenamer threadRenamer = (SmppSessionThreadRenamer) channel.pipeline().get(SmppChannelConstants.PIPELINE_SESSION_THREAD_RENAMER_NAME); threadRenamer.setThreadName(config.getName()); // add a logging handler after the thread renamer SmppSessionLogger loggingHandler = new SmppSessionLogger( DefaultSmppSession.class.getCanonicalName(), config.getLoggingOptions()); channel .pipeline() .addAfter( SmppChannelConstants.PIPELINE_SESSION_THREAD_RENAMER_NAME, SmppChannelConstants.PIPELINE_SESSION_LOGGER_NAME, loggingHandler); // decoder in pipeline is ok (keep it) // create a new wrapper around a session to pass the pdu up the chain channel.pipeline().remove(SmppChannelConstants.PIPELINE_SESSION_WRAPPER_NAME); channel .pipeline() .addLast( SmppChannelConstants.PIPELINE_SESSION_WRAPPER_NAME, new SmppSessionWrapper(session)); // check if the # of channels exceeds maxConnections if (this.channels.size() > this.configuration.getMaxConnectionSize()) { logger.warn( "The current connection size [{}] exceeds the configured max connection size [{}]", this.channels.size(), this.configuration.getMaxConnectionSize()); } // session created, now pass it upstream counters.incrementSessionCreatedAndGet(); incrementSessionSizeCounters(session); this.serverHandler.sessionCreated(sessionId, session, preparedBindResponse); // register this session as an mbean if (configuration.isJmxEnabled()) { session.registerMBean( configuration.getJmxDomain() + ":type=" + configuration.getName() + "Sessions,name=" + sessionId); } }
@Override protected void initChannel(Channel ch) throws Exception { ChannelPipeline pipeline = ch.pipeline(); pipeline.addLast(new HttpClientCodec()); pipeline.addLast(new HttpObjectAggregator(Integer.MAX_VALUE)); }
@Override protected void initChannel(Channel ch) throws Exception { ChannelPipeline pipeline = ch.pipeline(); pipeline.addLast(new LineBasedFrameDecoder(65 * 1024)); pipeline.addLast(new FrameHandler()); }
public static ChannelFuture handshake( final ChannelHandlerContext ctx, final HttpRequest request, final String websocketPath, final ChannelHandler handler) { final String connHead = request.getHttpHeaders().getHeaderString(HttpHeaders.Names.CONNECTION); final String upHead = request.getHttpHeaders().getHeaderString(HttpHeaders.Names.UPGRADE); final String sockHead = request.getHttpHeaders().getHeaderString(HttpHeaders.Names.SEC_WEBSOCKET_VERSION); final String keyHead = request.getHttpHeaders().getHeaderString(HttpHeaders.Names.SEC_WEBSOCKET_KEY); try { DefaultHttpRequest req = new DefaultHttpRequest( HttpVersion.HTTP_1_0, HttpMethod.GET, request.getUri().getAbsolutePath().toString()); req.setHeader(HttpHeaders.Names.SEC_WEBSOCKET_VERSION, sockHead); req.setHeader(HttpHeaders.Names.SEC_WEBSOCKET_KEY, keyHead); final Channel channel = ctx.channel(); final String location = getWebSocketLocation(channel.pipeline(), request, websocketPath); final WebSocketServerHandshakerFactory wsFactory = new WebSocketServerHandshakerFactory(location, null, false); final WebSocketServerHandshaker handshaker = wsFactory.newHandshaker(req); if (handshaker == null) { WebSocketServerHandshakerFactory.sendUnsupportedWebSocketVersionResponse(channel); return null; } else if (!connHead.toLowerCase().contains(HttpHeaders.Values.UPGRADE.toLowerCase()) || !upHead.toLowerCase().contains(HttpHeaders.Values.WEBSOCKET.toLowerCase())) { // Not a valid socket open request logger.info("Invalid request: " + request.getUri()); return null; } else { // We need to remove the RESTEasy stuff otherwise the Netty logic to write the handshake to // the channel // will never make it back to the client channel.pipeline().remove("resteasyEncoder"); channel.pipeline().remove("resteasyDecoder"); final ChannelFuture handshakeFuture = handshaker.handshake(channel, req); handshakeFuture.addListener( new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (!future.isSuccess()) { ctx.fireExceptionCaught(future.cause()); } else { final ChannelPipeline pipeline = future.channel().pipeline(); pipeline.replace(pipeline.last(), "customSocketHandler", handler); pipeline.addBefore( "customSocketHandler", "socketHandler", new WebSocketProtocolHandler()); } } }); WebSocketProtocolHandler.setHandshaker(ctx, handshaker); return handshakeFuture; // channel.pipeline().addBefore("timeout", "WS403Responder", // WebSocketProtocolHandler.forbiddenHttpRequestResponder()); } } catch (Exception e) { logger.error("Error trying to upgrade the channel to a socket", e); } return null; }