@Override protected void encode(ChannelHandlerContext chc, SubscribeMessage message, ByteBuf out) { if (message.subscriptions().isEmpty()) { throw new IllegalArgumentException("Found a subscribe message with empty topics"); } if (message.getQos() != AbstractMessage.QOSType.LEAST_ONE) { throw new IllegalArgumentException( "Expected a message with QOS 1, found " + message.getQos()); } ByteBuf variableHeaderBuff = chc.alloc().buffer(4); ByteBuf buff = null; try { variableHeaderBuff.writeShort(message.getMessageID()); for (SubscribeMessage.Couple c : message.subscriptions()) { variableHeaderBuff.writeBytes(Utils.encodeString(c.topicFilter)); variableHeaderBuff.writeByte(c.qos); } int variableHeaderSize = variableHeaderBuff.readableBytes(); byte flags = Utils.encodeFlags(message); buff = chc.alloc().buffer(2 + variableHeaderSize); buff.writeByte(AbstractMessage.SUBSCRIBE << 4 | flags); buff.writeBytes(Utils.encodeRemainingLength(variableHeaderSize)); buff.writeBytes(variableHeaderBuff); out.writeBytes(buff); } finally { variableHeaderBuff.release(); buff.release(); } }
void responseReceived(int id, @Nullable ByteBuf buffer) { Channel channel = requests.remove(id); if (channel == null || !channel.isActive()) { if (buffer != null) { buffer.release(); } return; } if (buffer == null) { Responses.sendStatus(HttpResponseStatus.BAD_GATEWAY, channel); return; } HttpResponse httpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, buffer); try { parseHeaders(httpResponse, buffer); Responses.addServer(httpResponse); if (!HttpHeaderUtil.isContentLengthSet(httpResponse)) { HttpHeaderUtil.setContentLength(httpResponse, buffer.readableBytes()); } } catch (Throwable e) { buffer.release(); try { LOG.error(e); } finally { Responses.sendStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR, channel); } return; } channel.writeAndFlush(httpResponse); }
@Test public void testLargeFileRegionChunked() throws Exception { EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseEncoder()); HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); response.headers().set(HttpHeaders.Names.TRANSFER_ENCODING, HttpHeaders.Values.CHUNKED); assertTrue(channel.writeOutbound(response)); ByteBuf buffer = channel.readOutbound(); assertEquals( "HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n", buffer.toString(CharsetUtil.US_ASCII)); buffer.release(); assertTrue(channel.writeOutbound(FILE_REGION)); buffer = channel.readOutbound(); assertEquals("80000000\r\n", buffer.toString(CharsetUtil.US_ASCII)); buffer.release(); FileRegion region = channel.readOutbound(); assertSame(FILE_REGION, region); region.release(); buffer = channel.readOutbound(); assertEquals("\r\n", buffer.toString(CharsetUtil.US_ASCII)); buffer.release(); assertTrue(channel.writeOutbound(LastHttpContent.EMPTY_LAST_CONTENT)); buffer = channel.readOutbound(); assertEquals("0\r\n\r\n", buffer.toString(CharsetUtil.US_ASCII)); buffer.release(); assertFalse(channel.finish()); }
@Test public void testRemoveItselfWithReplayError() { EmbeddedChannel channel = new EmbeddedChannel( new ReplayingDecoder() { private boolean removed; @Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { assertFalse(removed); ctx.pipeline().remove(this); in.readBytes(1000); removed = true; } }); ByteBuf buf = Unpooled.wrappedBuffer(new byte[] {'a', 'b', 'c'}); channel.writeInbound(buf.copy()); ByteBuf b = channel.readInbound(); assertEquals("Expect to have still all bytes in the buffer", b, buf); b.release(); buf.release(); }
@Test public void testRemoveItselfWriteBuffer() { final ByteBuf buf = Unpooled.buffer().writeBytes(new byte[] {'a', 'b', 'c'}); EmbeddedChannel channel = new EmbeddedChannel( new ReplayingDecoder() { private boolean removed; @Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { assertFalse(removed); in.readByte(); ctx.pipeline().remove(this); // This should not let it keep call decode buf.writeByte('d'); removed = true; } }); channel.writeInbound(buf.copy()); ByteBuf b = channel.readInbound(); assertEquals(b, Unpooled.wrappedBuffer(new byte[] {'b', 'c'})); b.release(); buf.release(); }
/** * Reads the content of the entry into a new buffer. Use {@link #readContent(ByteBufAllocator, * InputStream, int)} when the length of the stream is known. */ protected ByteBuf readContent(ByteBufAllocator alloc, InputStream in) throws IOException { ByteBuf buf = null; boolean success = false; try { buf = alloc.directBuffer(); for (; ; ) { if (buf.writeBytes(in, 8192) < 0) { break; } } success = true; if (buf.isReadable()) { return buf; } else { buf.release(); return Unpooled.EMPTY_BUFFER; } } finally { if (!success && buf != null) { buf.release(); } } }
public <SEND extends MessageLite, RECEIVE extends MessageLite> void send( RpcOutcomeListener<RECEIVE> listener, C connection, T rpcType, SEND protobufBody, Class<RECEIVE> clazz, boolean allowInEventLoop, ByteBuf... dataBodies) { Preconditions.checkArgument( allowInEventLoop || !connection.inEventLoop(), "You attempted to send while inside the rpc event thread. This isn't allowed because sending will block if the channel is backed up."); ByteBuf pBuffer = null; boolean completed = false; try { if (!allowInEventLoop && !connection.blockOnNotWritable(listener)) { // if we're in not in the event loop and we're interrupted while blocking, skip sending this // message. return; } assert !Arrays.asList(dataBodies).contains(null); assert rpcConfig.checkSend(rpcType, protobufBody.getClass(), clazz); Preconditions.checkNotNull(protobufBody); ChannelListenerWithCoordinationId futureListener = queue.get(listener, clazz, connection); OutboundRpcMessage m = new OutboundRpcMessage( RpcMode.REQUEST, rpcType, futureListener.getCoordinationId(), protobufBody, dataBodies); ChannelFuture channelFuture = connection.getChannel().writeAndFlush(m); channelFuture.addListener(futureListener); channelFuture.addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); completed = true; } catch (Exception | AssertionError e) { listener.failed(new RpcException("Failure sending message.", e)); } finally { if (!completed) { if (pBuffer != null) { pBuffer.release(); } if (dataBodies != null) { for (ByteBuf b : dataBodies) { b.release(); } } } ; } }
@Override protected void deallocate() { if (currentHeader != null) { currentHeader.release(); } if (buf != null) { buf.release(); } if (region != null) { region.release(); } }
@Override public ByteBuf readPacket() throws IOException { Preconditions.checkState(!mClosed, "PacketReader is closed while reading packets."); ByteBuf buf = null; mLock.lock(); try { while (true) { if (mDone) { return null; } if (mPacketReaderException != null) { throw new IOException(mPacketReaderException); } buf = mPackets.poll(); // TODO(peis): Have a better criteria to resume so that we can have fewer state changes. if (!tooManyPacketsPending()) { resume(); } // Queue is empty. if (buf == null) { try { if (!mNotEmptyOrFailed.await(READ_TIMEOUT_MS, TimeUnit.MILLISECONDS)) { throw new IOException( String.format("Timeout while reading packet from block %d @ %s.", mId, mAddress)); } } catch (InterruptedException e) { throw Throwables.propagate(e); } } else { if (buf.readableBytes() == 0) { buf.release(); mDone = true; return null; } mPosToRead += buf.readableBytes(); Preconditions.checkState(mPosToRead - mStart <= mBytesToRead); return buf; } } } catch (Throwable e) { if (buf != null) { buf.release(); } throw e; } finally { mLock.unlock(); } }
@Test public void testGZIP2() throws Exception { ByteBuf data = Unpooled.wrappedBuffer("message".getBytes(CharsetUtil.UTF_8)); ByteBuf deflatedData = Unpooled.wrappedBuffer(gzip("message")); EmbeddedChannel chDecoderGZip = new EmbeddedChannel(createDecoder(ZlibWrapper.GZIP)); chDecoderGZip.writeInbound(deflatedData.copy()); assertTrue(chDecoderGZip.finish()); ByteBuf buf = chDecoderGZip.readInbound(); assertEquals(buf, data); assertNull(chDecoderGZip.readInbound()); data.release(); deflatedData.release(); buf.release(); }
private void testCompress0(ZlibWrapper encoderWrapper, ZlibWrapper decoderWrapper, ByteBuf data) throws Exception { EmbeddedChannel chEncoder = new EmbeddedChannel(createEncoder(encoderWrapper)); chEncoder.writeOutbound(data.copy()); chEncoder.flush(); EmbeddedChannel chDecoderZlib = new EmbeddedChannel(createDecoder(decoderWrapper)); for (; ; ) { ByteBuf deflatedData = chEncoder.readOutbound(); if (deflatedData == null) { break; } chDecoderZlib.writeInbound(deflatedData); } byte[] decompressed = new byte[data.readableBytes()]; int offset = 0; for (; ; ) { ByteBuf buf = chDecoderZlib.readInbound(); if (buf == null) { break; } int length = buf.readableBytes(); buf.readBytes(decompressed, offset, length); offset += length; buf.release(); if (offset == decompressed.length) { break; } } assertEquals(data, Unpooled.wrappedBuffer(decompressed)); assertNull(chDecoderZlib.readInbound()); // Closing an encoder channel will generate a footer. assertTrue(chEncoder.finish()); for (; ; ) { Object msg = chEncoder.readOutbound(); if (msg == null) { break; } ReferenceCountUtil.release(msg); } // But, the footer will be decoded into nothing. It's only for validation. assertFalse(chDecoderZlib.finish()); data.release(); }
public void encodeJsonP( Integer jsonpIndex, Queue<Packet> packets, ByteBuf out, ByteBufAllocator allocator, int limit) throws IOException { boolean jsonpMode = jsonpIndex != null; ByteBuf buf = allocateBuffer(allocator); int i = 0; while (true) { Packet packet = packets.poll(); if (packet == null || i == limit) { break; } ByteBuf packetBuf = allocateBuffer(allocator); encodePacket(packet, packetBuf, allocator, true); int packetSize = packetBuf.writerIndex(); buf.writeBytes(toChars(packetSize)); buf.writeBytes(B64_DELIMITER); buf.writeBytes(packetBuf); packetBuf.release(); i++; for (ByteBuf attachment : packet.getAttachments()) { ByteBuf encodedBuf = Base64.encode(attachment, Base64Dialect.URL_SAFE); buf.writeBytes(toChars(encodedBuf.readableBytes() + 2)); buf.writeBytes(B64_DELIMITER); buf.writeBytes(BINARY_HEADER); buf.writeBytes(encodedBuf); } } if (jsonpMode) { out.writeBytes(JSONP_HEAD); out.writeBytes(toChars(jsonpIndex)); out.writeBytes(JSONP_START); } processUtf8(buf, out, jsonpMode); buf.release(); if (jsonpMode) { out.writeBytes(JSONP_END); } }
/** * Reads the content of the entry into a new buffer. Use {@link #readContent(ByteBufAllocator, * InputStream)} when the length of the stream is unknown. */ protected ByteBuf readContent(ByteBufAllocator alloc, InputStream in, int length) throws IOException { if (length == 0) { return Unpooled.EMPTY_BUFFER; } ByteBuf buf = null; boolean success = false; try { buf = alloc.directBuffer(length); int remaining = length; for (; ; ) { final int readBytes = buf.writeBytes(in, remaining); if (readBytes < 0) { break; } remaining -= readBytes; if (remaining <= 0) { break; } } success = true; return buf; } finally { if (!success && buf != null) { buf.release(); } } }
@Override public MessageBuffer<ByteBuf> writeStrings(String... messages) { ByteBuf strMultiBuf = NettyUtils.writeStrings(messages); buffer.writeBytes(strMultiBuf); strMultiBuf.release(); return this; }
@Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { logger.debug("================进入客户端InBoundHandler channelRead============"); // 如果是response if (msg instanceof HttpResponse) { HttpResponse response = (HttpResponse) msg; logger.debug( "客户端收到的响应CONTENT_TYPE:" + response.headers().get(HttpHeaders.Names.CONTENT_TYPE)); if (HttpHeaders.isContentLengthSet(response)) { reader = new ByteBufToBytes((int) HttpHeaders.getContentLength(response)); } } if (msg instanceof HttpContent) { HttpContent httpContent = (HttpContent) msg; ByteBuf content = httpContent.content(); reader.reading(content); content.release(); if (reader.isEnd()) { String resultStr = new String(reader.readFull()); logger.debug("收到的服务端的消息是:" + resultStr); ctx.close(); } } logger.debug("================出客户端InBoundHandler channelRead============"); }
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { Protocol.DirectionData prot = this.server ? this.protocol.TO_SERVER : this.protocol.TO_CLIENT; ByteBuf copy = in.copy(); try { int packetId = DefinedPacket.readVarInt(in); DefinedPacket packet = null; if (prot.hasPacket(packetId)) { packet = prot.createPacket(packetId); packet.read(in, prot.getDirection(), this.protocolVersion); if (in.readableBytes() != 0) { throw new BadPacketException( "Did not read all bytes from packet " + packet.getClass() + " " + packetId + " Protocol " + this.protocol + " Direction " + prot); } } else { in.skipBytes(in.readableBytes()); } out.add(new PacketWrapper(packet, copy)); copy = null; } finally { if (copy != null) { copy.release(); } } }
@Override public HttpContent readChunk(ChannelHandlerContext ctx) throws Exception { long offset = this.offset; if (offset >= endOffset) { if (sentLastChunk) { return null; } else { // Send last chunk for this file sentLastChunk = true; return new DefaultLastHttpContent(); } } int chunkSize = (int) Math.min(this.chunkSize, endOffset - offset); // Check if the buffer is backed by an byte array. If so we can optimize it a bit an safe a copy ByteBuf buf = ctx.alloc().heapBuffer(chunkSize); boolean release = true; try { file.readFully(buf.array(), buf.arrayOffset(), chunkSize); buf.writerIndex(chunkSize); this.offset = offset + chunkSize; release = false; return new DefaultHttpContent(buf); } finally { if (release) { buf.release(); } } }
/** * Decodes the client connection preface string from the input buffer. * * @return {@code true} if processing of the client preface string is complete. Since client * preface strings can only be received by servers, returns true immediately for client * endpoints. */ private boolean readClientPrefaceString(ByteBuf in) throws Http2Exception { if (clientPrefaceString == null) { return true; } int prefaceRemaining = clientPrefaceString.readableBytes(); int bytesRead = min(in.readableBytes(), prefaceRemaining); // If the input so far doesn't match the preface, break the connection. if (bytesRead == 0 || !ByteBufUtil.equals( in, in.readerIndex(), clientPrefaceString, clientPrefaceString.readerIndex(), bytesRead)) { String receivedBytes = hexDump( in, in.readerIndex(), min(in.readableBytes(), clientPrefaceString.readableBytes())); throw connectionError( PROTOCOL_ERROR, "HTTP/2 client preface string missing or corrupt. " + "Hex dump for received bytes: %s", receivedBytes); } in.skipBytes(bytesRead); clientPrefaceString.skipBytes(bytesRead); if (!clientPrefaceString.isReadable()) { // Entire preface has been read. clientPrefaceString.release(); clientPrefaceString = null; return true; } return false; }
private static void processGoAwayWriteResult( final ChannelHandlerContext ctx, final int lastStreamId, final long errorCode, final ByteBuf debugData, ChannelFuture future) { try { if (future.isSuccess()) { if (errorCode != NO_ERROR.code()) { if (logger.isDebugEnabled()) { logger.debug( format( "Sent GOAWAY: lastStreamId '%d', errorCode '%d', " + "debugData '%s'. Forcing shutdown of the connection.", lastStreamId, errorCode, debugData.toString(UTF_8)), future.cause()); } ctx.close(); } } else { if (logger.isErrorEnabled()) { logger.error( format( "Sending GOAWAY failed: lastStreamId '%d', errorCode '%d', " + "debugData '%s'. Forcing shutdown of the connection.", lastStreamId, errorCode, debugData.toString(UTF_8)), future.cause()); } ctx.close(); } } finally { // We're done with the debug data now. debugData.release(); } }
private static void sendListing(ChannelHandlerContext ctx, File dir) { FullHttpResponse response = new DefaultFullHttpResponse(HTTP_1_1, OK); response.headers().set(CONTENT_TYPE, "text/html; charset=UTF-8"); StringBuilder buf = new StringBuilder(); String dirPath = dir.getPath(); buf.append("<!DOCTYPE html>\r\n"); buf.append("<html><head><title>"); buf.append(dirPath); buf.append(" 目录:"); buf.append("</title></head><body>\r\n"); buf.append("<h3>"); buf.append(dirPath).append(" 目录:"); buf.append("</h3>\r\n"); buf.append("<ul>"); buf.append("<li>链接:<a href=\"../\">..</a></li>\r\n"); for (File f : dir.listFiles()) { if (f.isHidden() || !f.canRead()) { continue; } String name = f.getName(); if (!ALLOWED_FILE_NAME.matcher(name).matches()) { continue; } buf.append("<li>链接:<a href=\""); buf.append(name); buf.append("\">"); buf.append(name); buf.append("</a></li>\r\n"); } buf.append("</ul></body></html>\r\n"); ByteBuf buffer = Unpooled.copiedBuffer(buf, CharsetUtil.UTF_8); response.content().writeBytes(buffer); buffer.release(); ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE); }
@Override protected int doReadMessages(List<Object> buf) throws Exception { SctpChannel ch = javaChannel(); RecvByteBufAllocator.Handle allocHandle = unsafe().recvBufAllocHandle(); ByteBuf buffer = allocHandle.allocate(config().getAllocator()); boolean free = true; try { ByteBuffer data = buffer.internalNioBuffer(buffer.writerIndex(), buffer.writableBytes()); int pos = data.position(); MessageInfo messageInfo = ch.receive(data, null, notificationHandler); if (messageInfo == null) { return 0; } buf.add( new SctpMessage( messageInfo, buffer.writerIndex(buffer.writerIndex() + data.position() - pos))); free = false; return 1; } catch (Throwable cause) { PlatformDependent.throwException(cause); return -1; } finally { int bytesRead = buffer.readableBytes(); allocHandle.record(bytesRead); if (free) { buffer.release(); } } }
private void testCompressNone(ZlibWrapper encoderWrapper, ZlibWrapper decoderWrapper) throws Exception { EmbeddedChannel chEncoder = new EmbeddedChannel(createEncoder(encoderWrapper)); // Closing an encoder channel without writing anything should generate both header and footer. assertTrue(chEncoder.finish()); EmbeddedChannel chDecoderZlib = new EmbeddedChannel(createDecoder(decoderWrapper)); for (; ; ) { ByteBuf deflatedData = chEncoder.readOutbound(); if (deflatedData == null) { break; } chDecoderZlib.writeInbound(deflatedData); } // Decoder should not generate anything at all. for (; ; ) { ByteBuf buf = chDecoderZlib.readInbound(); if (buf == null) { break; } buf.release(); fail("should decode nothing"); } assertFalse(chDecoderZlib.finish()); }
@Override public void appendMessages(Tpp tpp, Collection<MessageBatchWithRawData> batches) throws Exception { ByteBuf bodyBuf = Unpooled.buffer(); KafkaMessageBrokerSender sender = getSender(tpp.getTopic()); try { for (MessageBatchWithRawData batch : batches) { List<PartialDecodedMessage> pdmsgs = batch.getMessages(); for (PartialDecodedMessage pdmsg : pdmsgs) { m_messageCodec.encodePartial(pdmsg, bodyBuf); byte[] bytes = new byte[bodyBuf.readableBytes()]; bodyBuf.readBytes(bytes); bodyBuf.clear(); ByteBuf propertiesBuf = pdmsg.getDurableProperties(); HermesPrimitiveCodec codec = new HermesPrimitiveCodec(propertiesBuf); Map<String, String> propertiesMap = codec.readStringStringMap(); sender.send(tpp.getTopic(), propertiesMap.get("pK"), bytes); BrokerStatusMonitor.INSTANCE.kafkaSend(tpp.getTopic()); } } } finally { bodyBuf.release(); } }
@Override public void handlerRemoved(ChannelHandlerContext ctx) throws Exception { if (channelBuffer.refCnt() >= 1) { channelBuffer.release(); } super.handlerRemoved(ctx); }
@Override protected int doWrite(MessageList<Object> msgs, int index) throws Exception { int size = msgs.size(); int writeIndex = index; for (; ; ) { if (writeIndex >= size) { break; } Object msg = msgs.get(writeIndex); if (msg instanceof ByteBuf) { ByteBuf buf = (ByteBuf) msg; while (buf.isReadable()) { doWriteBytes(buf); } buf.release(); writeIndex++; } else if (msg instanceof FileRegion) { FileRegion region = (FileRegion) msg; doWriteFileRegion(region); region.release(); writeIndex++; } else { throw new UnsupportedOperationException( "unsupported message type: " + StringUtil.simpleClassName(msg)); } } return writeIndex - index; }
public void clear() { if (buffer != null && buffer != DeadBuf.DEAD_BUFFER) { buffer.release(); buffer = DeadBuf.DEAD_BUFFER; recordCount = 0; } }
@Override protected int doReadMessages(List<Object> buf) throws Exception { DatagramChannel ch = javaChannel(); DatagramChannelConfig config = config(); RecvByteBufAllocator.Handle allocHandle = this.allocHandle; if (allocHandle == null) { this.allocHandle = allocHandle = config.getRecvByteBufAllocator().newHandle(); } ByteBuf data = allocHandle.allocate(config.getAllocator()); boolean free = true; try { ByteBuffer nioData = data.internalNioBuffer(data.writerIndex(), data.writableBytes()); int pos = nioData.position(); InetSocketAddress remoteAddress = (InetSocketAddress) ch.receive(nioData); if (remoteAddress == null) { return 0; } int readBytes = nioData.position() - pos; data.writerIndex(data.writerIndex() + readBytes); allocHandle.record(readBytes); buf.add(new DatagramPacket(data, localAddress(), remoteAddress)); free = false; return 1; } catch (Throwable cause) { PlatformDependent.throwException(cause); return -1; } finally { if (free) { data.release(); } } }
static ByteBuf[] readCertificates(File file) throws CertificateException { String content; try { content = readContent(file); } catch (IOException e) { throw new CertificateException("failed to read a file: " + file, e); } List<ByteBuf> certs = new ArrayList<ByteBuf>(); Matcher m = CERT_PATTERN.matcher(content); int start = 0; for (; ; ) { if (!m.find(start)) { break; } ByteBuf base64 = Unpooled.copiedBuffer(m.group(1), CharsetUtil.US_ASCII); ByteBuf der = Base64.decode(base64); base64.release(); certs.add(der); start = m.end(); } if (certs.isEmpty()) { throw new CertificateException("found no certificates: " + file); } return certs.toArray(new ByteBuf[certs.size()]); }
public ByteBuf readChunk(ChannelHandlerContext var1) throws Exception { long var2 = this.offset; if (var2 >= this.endOffset) { return null; } else { int var4 = (int) Math.min((long) this.chunkSize, this.endOffset - var2); ByteBuf var5 = var1.alloc().buffer(var4); boolean var6 = true; try { int var7 = 0; while (true) { int var8 = var5.writeBytes((ScatteringByteChannel) this.in, var4 - var7); if (var8 >= 0) { var7 += var8; if (var7 != var4) { continue; } } this.offset += (long) var7; var6 = false; ByteBuf var12 = var5; return var12; } } finally { if (var6) { var5.release(); } } } }
/** * Adds a fragment to the block. * * @param fragment the fragment of the headers block to be added. * @param alloc allocator for new blocks if needed. * @param endOfHeaders flag indicating whether the current frame is the end of the headers. This * is used for an optimization for when the first fragment is the full block. In that case, * the buffer is used directly without copying. */ final void addFragment(ByteBuf fragment, ByteBufAllocator alloc, boolean endOfHeaders) throws Http2Exception { if (headerBlock == null) { if (fragment.readableBytes() > headersDecoder.configuration().maxHeaderSize()) { headerSizeExceeded(); } if (endOfHeaders) { // Optimization - don't bother copying, just use the buffer as-is. Need // to retain since we release when the header block is built. headerBlock = fragment.retain(); } else { headerBlock = alloc.buffer(fragment.readableBytes()); headerBlock.writeBytes(fragment); } return; } if (headersDecoder.configuration().maxHeaderSize() - fragment.readableBytes() < headerBlock.readableBytes()) { headerSizeExceeded(); } if (headerBlock.isWritable(fragment.readableBytes())) { // The buffer can hold the requested bytes, just write it directly. headerBlock.writeBytes(fragment); } else { // Allocate a new buffer that is big enough to hold the entire header block so far. ByteBuf buf = alloc.buffer(headerBlock.readableBytes() + fragment.readableBytes()); buf.writeBytes(headerBlock); buf.writeBytes(fragment); headerBlock.release(); headerBlock = buf; } }