@Override public HornetQBuffer encode(final RemotingConnection connection) { HornetQBuffer buffer = message.getEncodedBuffer(); // Sanity check if (buffer.writerIndex() != message.getEndOfMessagePosition()) { throw new IllegalStateException("Wrong encode position"); } buffer.writeBoolean(requiresResponse); size = buffer.writerIndex(); // Write standard headers int len = size - DataConstants.SIZE_INT; buffer.setInt(0, len); buffer.setByte(DataConstants.SIZE_INT, getType()); buffer.setLong(DataConstants.SIZE_INT + DataConstants.SIZE_BYTE, channelID); // Position reader for reading by Netty buffer.readerIndex(0); message.resetCopied(); return buffer; }
public synchronized void write(final PagedMessage message) throws Exception { if (!file.isOpen()) { return; } ByteBuffer buffer = fileFactory.newBuffer(message.getEncodeSize() + Page.SIZE_RECORD); HornetQBuffer wrap = HornetQBuffers.wrappedBuffer(buffer); wrap.clear(); wrap.writeByte(Page.START_BYTE); wrap.writeInt(0); int startIndex = wrap.writerIndex(); message.encode(wrap); int endIndex = wrap.writerIndex(); wrap.setInt(1, endIndex - startIndex); // The encoded length wrap.writeByte(Page.END_BYTE); buffer.rewind(); file.writeDirect(buffer, false); if (pageCache != null) { pageCache.addLiveMessage(message); } numberOfMessages.incrementAndGet(); size.addAndGet(buffer.limit()); storageManager.pageWrite(message, pageId); }
public void readBytes(final HornetQBuffer dst, final int length) { if (length > dst.writableBytes()) { throw new IndexOutOfBoundsException(); } readBytes(dst, dst.writerIndex(), length); dst.writerIndex(dst.writerIndex() + length); }
/** * Debug Helper!!!! * * <p>I'm leaving this message here without any callers for a reason: During debugs it's important * eventually to identify what's on the bodies, and this method will give you a good idea about * them. Add the message.bodyToString() to the Watch variables on the debugger view and this will * show up like a charm!!! * * @return */ public String bodyToString() { getEndOfBodyPosition(); int readerIndex1 = this.buffer.readerIndex(); buffer.readerIndex(0); byte[] buffer1 = new byte[buffer.writerIndex()]; buffer.readBytes(buffer1); buffer.readerIndex(readerIndex1); byte[] buffer2 = null; if (bodyBuffer != null) { int readerIndex2 = this.bodyBuffer.readerIndex(); bodyBuffer.readerIndex(0); buffer2 = new byte[bodyBuffer.writerIndex() - bodyBuffer.readerIndex()]; bodyBuffer.readBytes(buffer2); bodyBuffer.readerIndex(readerIndex2); } return "ServerMessage@" + Integer.toHexString(System.identityHashCode(this)) + "[" + ",bodyStart=" + getEndOfBodyPosition() + " buffer=" + ByteUtil.bytesToHex(buffer1, 1) + ", bodyBuffer=" + ByteUtil.bytesToHex(buffer2, 1); }
/** Write pending output into file */ public void flush() throws Exception { if (writingChannel != null) { sequentialFile.position(0); // To Fix the size of the file writingChannel.writerIndex(writingChannel.capacity()); sequentialFile.writeInternal(writingChannel.toByteBuffer()); sequentialFile.close(); newDataFiles.add(currentFile); } writingChannel = null; }
public HornetQBuffer encode(final RemotingConnection connection) { HornetQBuffer buffer = connection.createBuffer(PacketImpl.INITIAL_PACKET_SIZE); // The standard header fields buffer.writeInt(0); // The length gets filled in at the end buffer.writeByte(type); buffer.writeLong(channelID); encodeRest(buffer); size = buffer.writerIndex(); // The length doesn't include the actual length byte int len = size - DataConstants.SIZE_INT; buffer.setInt(0, len); return buffer; }
// This must be synchronized as it can be called concurrently id the message is being delivered // concurrently to // many queues - the first caller in this case will actually encode it private synchronized HornetQBuffer encodeToBuffer() { if (!bufferValid) { if (bufferUsed) { // Cannot use same buffer - must copy forceCopy(); } int bodySize = getEndOfBodyPosition(); // Clebert: I've started sending this on encoding due to conversions between protocols // and making sure we are not losing the buffer start position between protocols this.endOfBodyPosition = bodySize; // write it buffer.setInt(BUFFER_HEADER_SPACE, bodySize); // Position at end of body and skip past the message end position int. // check for enough room in the buffer even though it is dynamic if ((bodySize + 4) > buffer.capacity()) { buffer.setIndex(0, bodySize); buffer.writeInt(0); } else { buffer.setIndex(0, bodySize + DataConstants.SIZE_INT); } encodeHeadersAndProperties(buffer); // Write end of message position endOfMessagePosition = buffer.writerIndex(); buffer.setInt(bodySize, endOfMessagePosition); bufferValid = true; } return buffer; }
public HornetQBuffer toHornetQBuffer() throws Exception { if (buffer == null) { if (bytesBody != null) { buffer = HornetQBuffers.dynamicBuffer(bytesBody.length + 512); } else { buffer = HornetQBuffers.dynamicBuffer(512); } if (isPing()) { buffer.writeByte((byte) 10); return buffer; } StringBuffer head = new StringBuffer(); head.append(command); head.append(Stomp.NEWLINE); // Output the headers. for (Map.Entry<String, String> header : headers.entrySet()) { head.append(header.getKey()); head.append(Stomp.Headers.SEPARATOR); head.append(header.getValue()); head.append(Stomp.NEWLINE); } // Add a newline to separate the headers from the content. head.append(Stomp.NEWLINE); buffer.writeBytes(head.toString().getBytes("UTF-8")); if (bytesBody != null) { buffer.writeBytes(bytesBody); } buffer.writeBytes(END_OF_FRAME); size = buffer.writerIndex(); } return buffer; }
public synchronized List<PagedMessage> read(StorageManager storage) throws Exception { if (isDebug) { HornetQServerLogger.LOGGER.debug( "reading page " + this.pageId + " on address = " + storeName); } if (!file.isOpen()) { throw HornetQMessageBundle.BUNDLE.invalidPageIO(); } ArrayList<PagedMessage> messages = new ArrayList<PagedMessage>(); size.set((int) file.size()); // Using direct buffer, as described on https://jira.jboss.org/browse/HORNETQ-467 ByteBuffer directBuffer = storage.allocateDirectBuffer((int) file.size()); try { file.position(0); file.read(directBuffer); directBuffer.rewind(); HornetQBuffer fileBuffer = HornetQBuffers.wrappedBuffer(directBuffer); fileBuffer.writerIndex(fileBuffer.capacity()); while (fileBuffer.readable()) { final int position = fileBuffer.readerIndex(); byte byteRead = fileBuffer.readByte(); if (byteRead == Page.START_BYTE) { if (fileBuffer.readerIndex() + DataConstants.SIZE_INT < fileBuffer.capacity()) { int messageSize = fileBuffer.readInt(); int oldPos = fileBuffer.readerIndex(); if (fileBuffer.readerIndex() + messageSize < fileBuffer.capacity() && fileBuffer.getByte(oldPos + messageSize) == Page.END_BYTE) { PagedMessage msg = new PagedMessageImpl(); msg.decode(fileBuffer); byte b = fileBuffer.readByte(); if (b != Page.END_BYTE) { // Sanity Check: This would only happen if there is a bug on decode or any internal // code, as // this // constraint was already checked throw new IllegalStateException( "Internal error, it wasn't possible to locate END_BYTE " + b); } msg.initMessage(storage); if (isTrace) { HornetQServerLogger.LOGGER.trace( "Reading message " + msg + " on pageId=" + this.pageId + " for address=" + storeName); } messages.add(msg); } else { markFileAsSuspect(file.getFileName(), position, messages.size()); break; } } } else { markFileAsSuspect(file.getFileName(), position, messages.size()); break; } } } finally { storage.freeDirectBuffer(directBuffer); } numberOfMessages.set(messages.size()); return messages; }
public static SequentialFile writeControlFile( final SequentialFileFactory fileFactory, final List<JournalFile> files, final List<JournalFile> newFiles, final List<Pair<String, String>> renames) throws Exception { SequentialFile controlFile = fileFactory.createSequentialFile(AbstractJournalUpdateTask.FILE_COMPACT_CONTROL, 1); try { controlFile.open(1, false); JournalImpl.initFileHeader(fileFactory, controlFile, 0, 0); HornetQBuffer filesToRename = HornetQBuffers.dynamicBuffer(1); // DataFiles first if (files == null) { filesToRename.writeInt(0); } else { filesToRename.writeInt(files.size()); for (JournalFile file : files) { filesToRename.writeUTF(file.getFile().getFileName()); } } // New Files second if (newFiles == null) { filesToRename.writeInt(0); } else { filesToRename.writeInt(newFiles.size()); for (JournalFile file : newFiles) { filesToRename.writeUTF(file.getFile().getFileName()); } } // Renames from clean up third if (renames == null) { filesToRename.writeInt(0); } else { filesToRename.writeInt(renames.size()); for (Pair<String, String> rename : renames) { filesToRename.writeUTF(rename.getA()); filesToRename.writeUTF(rename.getB()); } } JournalInternalRecord controlRecord = new JournalAddRecord( true, 1, (byte) 0, new ByteArrayEncoding(filesToRename.toByteBuffer().array())); HornetQBuffer renameBuffer = HornetQBuffers.dynamicBuffer(filesToRename.writerIndex()); controlRecord.setFileID(0); controlRecord.encode(renameBuffer); ByteBuffer writeBuffer = fileFactory.newBuffer(renameBuffer.writerIndex()); writeBuffer.put(renameBuffer.toByteBuffer().array(), 0, renameBuffer.writerIndex()); writeBuffer.rewind(); controlFile.writeDirect(writeBuffer, true); return controlFile; } finally { controlFile.close(); } }
public int getEndOfBodyPosition() { if (endOfBodyPosition < 0) { endOfBodyPosition = buffer.writerIndex(); } return endOfBodyPosition; }
public int sendMessage(ServerMessage serverMessage, long consumerID, int deliveryCount) { LargeServerMessageImpl largeMessage = null; ServerMessage newServerMessage = serverMessage; try { StompSubscription subscription = subscriptions.get(consumerID); StompFrame frame = null; if (serverMessage.isLargeMessage()) { newServerMessage = serverMessage.copy(); largeMessage = (LargeServerMessageImpl) serverMessage; BodyEncoder encoder = largeMessage.getBodyEncoder(); encoder.open(); int bodySize = (int) encoder.getLargeBodySize(); // large message doesn't have a body. ((ServerMessageImpl) newServerMessage).createBody(bodySize); encoder.encode(newServerMessage.getBodyBuffer(), bodySize); encoder.close(); } if (serverMessage.getBooleanProperty(Message.HDR_LARGE_COMPRESSED)) { // decompress HornetQBuffer qbuff = newServerMessage.getBodyBuffer(); int bytesToRead = qbuff.writerIndex() - MessageImpl.BODY_OFFSET; Inflater inflater = new Inflater(); inflater.setInput(qbuff.readBytes(bytesToRead).toByteBuffer().array()); // get the real size of large message long sizeBody = newServerMessage.getLongProperty(Message.HDR_LARGE_BODY_SIZE); byte[] data = new byte[(int) sizeBody]; inflater.inflate(data); inflater.end(); qbuff.resetReaderIndex(); qbuff.resetWriterIndex(); qbuff.writeBytes(data); } frame = connection.createStompMessage(newServerMessage, subscription, deliveryCount); int length = frame.getEncodedSize(); if (subscription.getAck().equals(Stomp.Headers.Subscribe.AckModeValues.AUTO)) { if (manager.send(connection, frame)) { // we ack and commit only if the send is successful session.acknowledge(consumerID, newServerMessage.getMessageID()); session.commit(); } } else { messagesToAck.put( newServerMessage.getMessageID(), new Pair<Long, Integer>(consumerID, length)); // Must send AFTER adding to messagesToAck - or could get acked from client BEFORE it's been // added! manager.send(connection, frame); } return length; } catch (Exception e) { return 0; } finally { if (largeMessage != null) { largeMessage.releaseResources(); largeMessage = null; } } }