public synchronized void write(final PagedMessage message) throws Exception { if (!file.isOpen()) { return; } ByteBuffer buffer = fileFactory.newBuffer(message.getEncodeSize() + Page.SIZE_RECORD); HornetQBuffer wrap = HornetQBuffers.wrappedBuffer(buffer); wrap.clear(); wrap.writeByte(Page.START_BYTE); wrap.writeInt(0); int startIndex = wrap.writerIndex(); message.encode(wrap); int endIndex = wrap.writerIndex(); wrap.setInt(1, endIndex - startIndex); // The encoded length wrap.writeByte(Page.END_BYTE); buffer.rewind(); file.writeDirect(buffer, false); if (pageCache != null) { pageCache.addLiveMessage(message); } numberOfMessages.incrementAndGet(); size.addAndGet(buffer.limit()); storageManager.pageWrite(message, pageId); }
public void createBody(final int initialMessageBufferSize) { buffer = HornetQBuffers.dynamicBuffer(initialMessageBufferSize); // There's a bug in netty which means a dynamic buffer won't resize until you write a byte buffer.writeByte((byte) 0); buffer.setIndex(BODY_OFFSET, BODY_OFFSET); }
@Before public void init() throws Exception { this.factory = new EventFactory(mockPrincipalProvider); this.principal = TestUtil.createOwnerPrincipal(); eventFilter = new EventFilter(new CandlepinCommonTestConfig()); when(mockPrincipalProvider.get()).thenReturn(this.principal); when(mockSessionFactory.createTransactedSession()).thenReturn(mockClientSession); when(mockClientSession.createProducer(anyString())).thenReturn(mockClientProducer); when(mockClientSession.createMessage(anyBoolean())).thenReturn(mockClientMessage); when(mockClientMessage.getBodyBuffer()).thenReturn(HornetQBuffers.fixedBuffer(2000)); when(mockSessionFactory.getServerLocator()).thenReturn(mockLocator); this.mapper = spy(new ObjectMapper()); this.eventSinkImpl = createEventSink(mockSessionFactory); o = new Owner("test owner"); }
public HornetQBuffer toHornetQBuffer() throws Exception { if (buffer == null) { if (bytesBody != null) { buffer = HornetQBuffers.dynamicBuffer(bytesBody.length + 512); } else { buffer = HornetQBuffers.dynamicBuffer(512); } if (isPing()) { buffer.writeByte((byte) 10); return buffer; } StringBuffer head = new StringBuffer(); head.append(command); head.append(Stomp.NEWLINE); // Output the headers. for (Map.Entry<String, String> header : headers.entrySet()) { head.append(header.getKey()); head.append(Stomp.Headers.SEPARATOR); head.append(header.getValue()); head.append(Stomp.NEWLINE); } // Add a newline to separate the headers from the content. head.append(Stomp.NEWLINE); buffer.writeBytes(head.toString().getBytes("UTF-8")); if (bytesBody != null) { buffer.writeBytes(bytesBody); } buffer.writeBytes(END_OF_FRAME); size = buffer.writerIndex(); } return buffer; }
/** @throws Exception */ protected void openFile() throws Exception { flush(); ByteBuffer bufferWrite = fileFactory.newBuffer(journal.getFileSize()); writingChannel = HornetQBuffers.wrappedBuffer(bufferWrite); currentFile = filesRepository.takeFile(false, false, false, true); sequentialFile = currentFile.getFile(); sequentialFile.open(1, false); currentFile = new JournalFileImpl(sequentialFile, nextOrderingID++, JournalImpl.FORMAT_VERSION); JournalImpl.writeHeader(writingChannel, journal.getUserVersion(), currentFile.getFileID()); }
@Test public void testSendPackets() throws Exception { setupServer(true); StorageManager storage = getStorage(); manager = liveServer.getReplicationManager(); waitForComponent(manager); Journal replicatedJournal = new ReplicatedJournal((byte) 1, new FakeJournal(), manager); replicatedJournal.appendPrepareRecord(1, new FakeData(), false); replicatedJournal.appendAddRecord(1, (byte) 1, new FakeData(), false); replicatedJournal.appendUpdateRecord(1, (byte) 2, new FakeData(), false); replicatedJournal.appendDeleteRecord(1, false); replicatedJournal.appendAddRecordTransactional(2, 2, (byte) 1, new FakeData()); replicatedJournal.appendUpdateRecordTransactional(2, 2, (byte) 2, new FakeData()); replicatedJournal.appendCommitRecord(2, false); replicatedJournal.appendDeleteRecordTransactional(3, 4, new FakeData()); replicatedJournal.appendPrepareRecord(3, new FakeData(), false); replicatedJournal.appendRollbackRecord(3, false); blockOnReplication(storage, manager); Assert.assertTrue( "Expecting no active tokens:" + manager.getActiveTokens(), manager.getActiveTokens().isEmpty()); ServerMessage msg = new ServerMessageImpl(1, 1024); SimpleString dummy = new SimpleString("dummy"); msg.setAddress(dummy); replicatedJournal.appendAddRecordTransactional(23, 24, (byte) 1, new FakeData()); PagedMessage pgmsg = new PagedMessageImpl(msg, new long[0]); manager.pageWrite(pgmsg, 1); manager.pageWrite(pgmsg, 2); manager.pageWrite(pgmsg, 3); manager.pageWrite(pgmsg, 4); blockOnReplication(storage, manager); PagingManager pagingManager = createPageManager( backupServer.getStorageManager(), backupServer.getConfiguration(), backupServer.getExecutorFactory(), backupServer.getAddressSettingsRepository()); PagingStore store = pagingManager.getPageStore(dummy); store.start(); Assert.assertEquals(4, store.getNumberOfPages()); store.stop(); manager.pageDeleted(dummy, 1); manager.pageDeleted(dummy, 2); manager.pageDeleted(dummy, 3); manager.pageDeleted(dummy, 4); manager.pageDeleted(dummy, 5); manager.pageDeleted(dummy, 6); blockOnReplication(storage, manager); ServerMessageImpl serverMsg = new ServerMessageImpl(); serverMsg.setMessageID(500); serverMsg.setAddress(new SimpleString("tttt")); HornetQBuffer buffer = HornetQBuffers.dynamicBuffer(100); serverMsg.encodeHeadersAndProperties(buffer); manager.largeMessageBegin(500); manager.largeMessageWrite(500, new byte[1024]); manager.largeMessageDelete(Long.valueOf(500)); blockOnReplication(storage, manager); store.start(); Assert.assertEquals(0, store.getNumberOfPages()); }
public synchronized List<PagedMessage> read(StorageManager storage) throws Exception { if (isDebug) { HornetQServerLogger.LOGGER.debug( "reading page " + this.pageId + " on address = " + storeName); } if (!file.isOpen()) { throw HornetQMessageBundle.BUNDLE.invalidPageIO(); } ArrayList<PagedMessage> messages = new ArrayList<PagedMessage>(); size.set((int) file.size()); // Using direct buffer, as described on https://jira.jboss.org/browse/HORNETQ-467 ByteBuffer directBuffer = storage.allocateDirectBuffer((int) file.size()); try { file.position(0); file.read(directBuffer); directBuffer.rewind(); HornetQBuffer fileBuffer = HornetQBuffers.wrappedBuffer(directBuffer); fileBuffer.writerIndex(fileBuffer.capacity()); while (fileBuffer.readable()) { final int position = fileBuffer.readerIndex(); byte byteRead = fileBuffer.readByte(); if (byteRead == Page.START_BYTE) { if (fileBuffer.readerIndex() + DataConstants.SIZE_INT < fileBuffer.capacity()) { int messageSize = fileBuffer.readInt(); int oldPos = fileBuffer.readerIndex(); if (fileBuffer.readerIndex() + messageSize < fileBuffer.capacity() && fileBuffer.getByte(oldPos + messageSize) == Page.END_BYTE) { PagedMessage msg = new PagedMessageImpl(); msg.decode(fileBuffer); byte b = fileBuffer.readByte(); if (b != Page.END_BYTE) { // Sanity Check: This would only happen if there is a bug on decode or any internal // code, as // this // constraint was already checked throw new IllegalStateException( "Internal error, it wasn't possible to locate END_BYTE " + b); } msg.initMessage(storage); if (isTrace) { HornetQServerLogger.LOGGER.trace( "Reading message " + msg + " on pageId=" + this.pageId + " for address=" + storeName); } messages.add(msg); } else { markFileAsSuspect(file.getFileName(), position, messages.size()); break; } } } else { markFileAsSuspect(file.getFileName(), position, messages.size()); break; } } } finally { storage.freeDirectBuffer(directBuffer); } numberOfMessages.set(messages.size()); return messages; }
public static SequentialFile writeControlFile( final SequentialFileFactory fileFactory, final List<JournalFile> files, final List<JournalFile> newFiles, final List<Pair<String, String>> renames) throws Exception { SequentialFile controlFile = fileFactory.createSequentialFile(AbstractJournalUpdateTask.FILE_COMPACT_CONTROL, 1); try { controlFile.open(1, false); JournalImpl.initFileHeader(fileFactory, controlFile, 0, 0); HornetQBuffer filesToRename = HornetQBuffers.dynamicBuffer(1); // DataFiles first if (files == null) { filesToRename.writeInt(0); } else { filesToRename.writeInt(files.size()); for (JournalFile file : files) { filesToRename.writeUTF(file.getFile().getFileName()); } } // New Files second if (newFiles == null) { filesToRename.writeInt(0); } else { filesToRename.writeInt(newFiles.size()); for (JournalFile file : newFiles) { filesToRename.writeUTF(file.getFile().getFileName()); } } // Renames from clean up third if (renames == null) { filesToRename.writeInt(0); } else { filesToRename.writeInt(renames.size()); for (Pair<String, String> rename : renames) { filesToRename.writeUTF(rename.getA()); filesToRename.writeUTF(rename.getB()); } } JournalInternalRecord controlRecord = new JournalAddRecord( true, 1, (byte) 0, new ByteArrayEncoding(filesToRename.toByteBuffer().array())); HornetQBuffer renameBuffer = HornetQBuffers.dynamicBuffer(filesToRename.writerIndex()); controlRecord.setFileID(0); controlRecord.encode(renameBuffer); ByteBuffer writeBuffer = fileFactory.newBuffer(renameBuffer.writerIndex()); writeBuffer.put(renameBuffer.toByteBuffer().array(), 0, renameBuffer.writerIndex()); writeBuffer.rewind(); controlFile.writeDirect(writeBuffer, true); return controlFile; } finally { controlFile.close(); } }
public String parseBytes(long id, byte[] data) { ServerMessageImpl serverMessage = new ServerMessageImpl(id, 50); serverMessage.decode(HornetQBuffers.wrappedBuffer(data)); return serverMessage.getBodyBuffer().readString(); }
/** * @param journalLocation * @return * @throws Exception */ protected static PageCursorsInfo loadCursorACKs(final String journalLocation) throws Exception { SequentialFileFactory messagesFF = new NIOSequentialFileFactory(journalLocation, null); // Will use only default values. The load function should adapt to anything different ConfigurationImpl defaultValues = new ConfigurationImpl(); JournalImpl messagesJournal = new JournalImpl( defaultValues.getJournalFileSize(), defaultValues.getJournalMinFiles(), 0, 0, messagesFF, "hornetq-data", "hq", 1); messagesJournal.start(); ArrayList<RecordInfo> records = new ArrayList<RecordInfo>(); ArrayList<PreparedTransactionInfo> txs = new ArrayList<PreparedTransactionInfo>(); messagesJournal.load(records, txs, null, false); PageCursorsInfo cursorInfo = new PageCursorsInfo(); for (RecordInfo record : records) { byte[] data = record.data; HornetQBuffer buff = HornetQBuffers.wrappedBuffer(data); if (record.userRecordType == JournalRecordIds.ACKNOWLEDGE_CURSOR) { CursorAckRecordEncoding encoding = new CursorAckRecordEncoding(); encoding.decode(buff); Set<PagePosition> set = cursorInfo.getCursorRecords().get(encoding.queueID); if (set == null) { set = new HashSet<PagePosition>(); cursorInfo.getCursorRecords().put(encoding.queueID, set); } set.add(encoding.position); } else if (record.userRecordType == JournalRecordIds.PAGE_CURSOR_COMPLETE) { CursorAckRecordEncoding encoding = new CursorAckRecordEncoding(); encoding.decode(buff); Long queueID = Long.valueOf(encoding.queueID); Long pageNR = Long.valueOf(encoding.position.getPageNr()); if (!cursorInfo.getCompletePages(queueID).add(pageNR)) { System.err.println( "Page " + pageNR + " has been already set as complete on queue " + queueID); } } else if (record.userRecordType == JournalRecordIds.PAGE_TRANSACTION) { if (record.isUpdate) { PageUpdateTXEncoding pageUpdate = new PageUpdateTXEncoding(); pageUpdate.decode(buff); cursorInfo.getPgTXs().add(pageUpdate.pageTX); } else { PageTransactionInfoImpl pageTransactionInfo = new PageTransactionInfoImpl(); pageTransactionInfo.decode(buff); pageTransactionInfo.setRecordID(record.id); cursorInfo.getPgTXs().add(pageTransactionInfo.getTransactionID()); } } } return cursorInfo; }
public int encode(final ByteBuffer bufferRead) throws HornetQException { HornetQBuffer buffer = HornetQBuffers.wrappedBuffer(bufferRead); return encode(buffer, bufferRead.capacity()); }
public boolean deliver() throws Exception { lockDelivery.readLock().lock(); try { if (largeMessage == null) { return true; } if (availableCredits != null && availableCredits.get() <= 0) { if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( this + "::FlowControl::delivery largeMessage interrupting as there are no more credits, available=" + availableCredits); } return false; } if (!sentInitialPacket) { context = largeMessage.getBodyEncoder(); sizePendingLargeMessage = context.getLargeBodySize(); context.open(); sentInitialPacket = true; int packetSize = callback.sendLargeMessage( largeMessage, id, context.getLargeBodySize(), ref.getDeliveryCount()); if (availableCredits != null) { availableCredits.addAndGet(-packetSize); if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( this + "::FlowControl::" + " deliver initialpackage with " + packetSize + " delivered, available now = " + availableCredits); } } // Execute the rest of the large message on a different thread so as not to tie up the // delivery thread // for too long resumeLargeMessage(); return false; } else { if (availableCredits != null && availableCredits.get() <= 0) { if (ServerConsumerImpl.isTrace) { HornetQServerLogger.LOGGER.trace( this + "::FlowControl::deliverLargeMessage Leaving loop of send LargeMessage because of credits, available=" + availableCredits); } return false; } int localChunkLen = 0; localChunkLen = (int) Math.min( sizePendingLargeMessage - positionPendingLargeMessage, minLargeMessageSize); HornetQBuffer bodyBuffer = HornetQBuffers.fixedBuffer(localChunkLen); context.encode(bodyBuffer, localChunkLen); byte[] body = bodyBuffer.toByteBuffer().array(); int packetSize = callback.sendLargeMessageContinuation( id, body, positionPendingLargeMessage + localChunkLen < sizePendingLargeMessage, false); int chunkLen = body.length; if (availableCredits != null) { availableCredits.addAndGet(-packetSize); if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( this + "::FlowControl::largeMessage deliver continuation, packetSize=" + packetSize + " available now=" + availableCredits); } } positionPendingLargeMessage += chunkLen; if (positionPendingLargeMessage < sizePendingLargeMessage) { resumeLargeMessage(); return false; } } if (ServerConsumerImpl.isTrace) { HornetQServerLogger.LOGGER.trace("Finished deliverLargeMessage"); } finish(); return true; } finally { lockDelivery.readLock().unlock(); } }
public HornetQBuffer readBytes(final int length) { byte[] bytesToGet = new byte[length]; readBytes(bytesToGet); return HornetQBuffers.wrappedBuffer(bytesToGet); }