/* * Copy constructor */ protected MessageImpl(final MessageImpl other, TypedProperties properties) { messageID = other.getMessageID(); userID = other.getUserID(); address = other.getAddress(); type = other.getType(); durable = other.isDurable(); expiration = other.getExpiration(); timestamp = other.getTimestamp(); priority = other.getPriority(); this.properties = new TypedProperties(properties); // This MUST be synchronized using the monitor on the other message to prevent it running // concurrently // with getEncodedBuffer(), otherwise can introduce race condition when delivering concurrently // to // many subscriptions and bridging to other nodes in a cluster synchronized (other) { bufferValid = other.bufferValid; endOfBodyPosition = other.endOfBodyPosition; endOfMessagePosition = other.endOfMessagePosition; copied = other.copied; if (other.buffer != null) { other.bufferUsed = true; // We need to copy the underlying buffer too, since the different messsages thereafter might // have different // properties set on them, making their encoding different buffer = other.buffer.copy(0, other.buffer.writerIndex()); buffer.setIndex(other.buffer.readerIndex(), buffer.capacity()); } } }
public synchronized HornetQBuffer getBodyBufferCopy() { // Must copy buffer before sending it HornetQBuffer newBuffer = buffer.copy(0, buffer.capacity()); newBuffer.setIndex(0, getEndOfBodyPosition()); return new ResetLimitWrappedHornetQBuffer(BODY_OFFSET, newBuffer, null); }
private void forceCopy() { // Must copy buffer before sending it buffer = buffer.copy(0, buffer.capacity()); buffer.setIndex(0, getEndOfBodyPosition()); if (bodyBuffer != null) { bodyBuffer.setBuffer(buffer); } bufferUsed = false; }
/** Write pending output into file */ public void flush() throws Exception { if (writingChannel != null) { sequentialFile.position(0); // To Fix the size of the file writingChannel.writerIndex(writingChannel.capacity()); sequentialFile.writeInternal(writingChannel.toByteBuffer()); sequentialFile.close(); newDataFiles.add(currentFile); } writingChannel = null; }
public synchronized HornetQBuffer getEncodedBuffer() { HornetQBuffer buff = encodeToBuffer(); if (bufferUsed) { HornetQBuffer copied = buff.copy(0, buff.capacity()); copied.setIndex(0, endOfMessagePosition); return copied; } else { buffer.setIndex(0, endOfMessagePosition); bufferUsed = true; return buffer; } }
// This must be synchronized as it can be called concurrently id the message is being delivered // concurrently to // many queues - the first caller in this case will actually encode it private synchronized HornetQBuffer encodeToBuffer() { if (!bufferValid) { if (bufferUsed) { // Cannot use same buffer - must copy forceCopy(); } int bodySize = getEndOfBodyPosition(); // Clebert: I've started sending this on encoding due to conversions between protocols // and making sure we are not losing the buffer start position between protocols this.endOfBodyPosition = bodySize; // write it buffer.setInt(BUFFER_HEADER_SPACE, bodySize); // Position at end of body and skip past the message end position int. // check for enough room in the buffer even though it is dynamic if ((bodySize + 4) > buffer.capacity()) { buffer.setIndex(0, bodySize); buffer.writeInt(0); } else { buffer.setIndex(0, bodySize + DataConstants.SIZE_INT); } encodeHeadersAndProperties(buffer); // Write end of message position endOfMessagePosition = buffer.writerIndex(); buffer.setInt(bodySize, endOfMessagePosition); bufferValid = true; } return buffer; }
public synchronized List<PagedMessage> read(StorageManager storage) throws Exception { if (isDebug) { HornetQServerLogger.LOGGER.debug( "reading page " + this.pageId + " on address = " + storeName); } if (!file.isOpen()) { throw HornetQMessageBundle.BUNDLE.invalidPageIO(); } ArrayList<PagedMessage> messages = new ArrayList<PagedMessage>(); size.set((int) file.size()); // Using direct buffer, as described on https://jira.jboss.org/browse/HORNETQ-467 ByteBuffer directBuffer = storage.allocateDirectBuffer((int) file.size()); try { file.position(0); file.read(directBuffer); directBuffer.rewind(); HornetQBuffer fileBuffer = HornetQBuffers.wrappedBuffer(directBuffer); fileBuffer.writerIndex(fileBuffer.capacity()); while (fileBuffer.readable()) { final int position = fileBuffer.readerIndex(); byte byteRead = fileBuffer.readByte(); if (byteRead == Page.START_BYTE) { if (fileBuffer.readerIndex() + DataConstants.SIZE_INT < fileBuffer.capacity()) { int messageSize = fileBuffer.readInt(); int oldPos = fileBuffer.readerIndex(); if (fileBuffer.readerIndex() + messageSize < fileBuffer.capacity() && fileBuffer.getByte(oldPos + messageSize) == Page.END_BYTE) { PagedMessage msg = new PagedMessageImpl(); msg.decode(fileBuffer); byte b = fileBuffer.readByte(); if (b != Page.END_BYTE) { // Sanity Check: This would only happen if there is a bug on decode or any internal // code, as // this // constraint was already checked throw new IllegalStateException( "Internal error, it wasn't possible to locate END_BYTE " + b); } msg.initMessage(storage); if (isTrace) { HornetQServerLogger.LOGGER.trace( "Reading message " + msg + " on pageId=" + this.pageId + " for address=" + storeName); } messages.add(msg); } else { markFileAsSuspect(file.getFileName(), position, messages.size()); break; } } } else { markFileAsSuspect(file.getFileName(), position, messages.size()); break; } } } finally { storage.freeDirectBuffer(directBuffer); } numberOfMessages.set(messages.size()); return messages; }