/** This method is synchronized because we want it to be atomic with the cursors being used */ private long checkMinPage(Collection<PageSubscription> cursorList) { long minPage = Long.MAX_VALUE; for (PageSubscription cursor : cursorList) { long firstPage = cursor.getFirstPage(); if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this.pagingStore.getAddress() + " has a cursor " + cursor + " with first page=" + firstPage); } // the cursor will return -1 if the cursor is empty if (firstPage >= 0 && firstPage < minPage) { minPage = firstPage; } } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug(this.pagingStore.getAddress() + " has minPage=" + minPage); } return minPage; }
public void processReload() throws Exception { for (PageSubscription cursor : this.activeCursors.values()) { cursor.processReload(); } cleanup(); }
public void stop() { for (PageSubscription cursor : activeCursors.values()) { cursor.stop(); } waitForFuture(); }
public void flushExecutors() { for (PageSubscription cursor : activeCursors.values()) { cursor.flushExecutors(); } Future future = new Future(); executor.execute(future); while (!future.await(10000)) { log.warn("Waiting cursor provider " + this + " to finish executors " + executor); } }
/** * @param cursorList * @param currentPage * @throws Exception */ private void storePositions(ArrayList<PageSubscription> cursorList, Page currentPage) throws Exception { try { // First step: Move every cursor to the next bookmarked page (that was just created) for (PageSubscription cursor : cursorList) { cursor.confirmPosition(new PagePositionImpl(currentPage.getPageId(), -1)); } while (!storageManager.waitOnOperations(5000)) { log.warn("Couldn't complete operations on IO context " + storageManager.getContext()); } } finally { for (PageSubscription cursor : cursorList) { cursor.enableAutoCleanup(); } } }
/** * @param cursorList * @param currentPage * @throws Exception */ protected void storeBookmark(ArrayList<PageSubscription> cursorList, Page currentPage) throws Exception { try { // First step: Move every cursor to the next bookmarked page (that was just created) for (PageSubscription cursor : cursorList) { cursor.confirmPosition(new PagePositionImpl(currentPage.getPageId(), -1)); } while (!storageManager.waitOnOperations(5000)) { HornetQServerLogger.LOGGER.problemCompletingOperations(storageManager.getContext()); } } finally { for (PageSubscription cursor : cursorList) { cursor.enableAutoCleanup(); } } }
/** * Delete everything associated with any queue on this address. This is to be called when the * address is about to be released from paging. Hence the PagingStore will be holding a write * lock, meaning no messages are going to be paged at this time. So, we shouldn't lock anything * after this method, to avoid dead locks between the writeLock and any synchronization with the * CursorProvider. */ public void onPageModeCleared() { ArrayList<PageSubscription> subscriptions = cloneSubscriptions(); Transaction tx = new TransactionImpl(storageManager); for (PageSubscription sub : subscriptions) { try { sub.onPageModeCleared(tx); } catch (Exception e) { HornetQServerLogger.LOGGER.warn( "Error while cleaning paging on queue " + sub.getQueue().getName(), e); } } try { tx.commit(); } catch (Exception e) { HornetQServerLogger.LOGGER.warn("Error while cleaning page, during the commit", e); } }
/** This method is synchronized because we want it to be atomic with the cursors being used */ private long checkMinPage(List<PageSubscription> cursorList) { long minPage = Long.MAX_VALUE; for (PageSubscription cursor : cursorList) { long firstPage = cursor.getFirstPage(); if (log.isDebugEnabled()) { log.debug( this.pagingStore.getAddress() + " has a cursor " + cursor + " with first page=" + firstPage); } if (firstPage < minPage) { minPage = firstPage; } } if (log.isDebugEnabled()) { log.debug(this.pagingStore.getAddress() + " has minPage=" + minPage); } return minPage; }
public void processReload() throws Exception { Collection<PageSubscription> cursorList = this.activeCursors.values(); for (PageSubscription cursor : cursorList) { cursor.processReload(); } if (!cursorList.isEmpty()) { // https://issues.jboss.org/browse/JBPAPP-10338 if you ack out of order, // the min page could be beyond the first page. // we have to reload any previously acked message long cursorsMinPage = checkMinPage(cursorList); // checkMinPage will return MaxValue if there aren't any pages or any cursors if (cursorsMinPage != Long.MAX_VALUE) { for (long startPage = pagingStore.getFirstPage(); startPage < cursorsMinPage; startPage++) { for (PageSubscription cursor : cursorList) { cursor.reloadPageInfo(startPage); } } } } cleanup(); }
public void cleanup() { ArrayList<Page> depagedPages = new ArrayList<Page>(); while (true) { if (pagingStore.lock(100)) { break; } if (!pagingStore.isStarted()) return; } synchronized (this) { try { if (!pagingStore.isStarted()) { return; } if (pagingStore.getNumberOfPages() == 0) { return; } if (log.isDebugEnabled()) { log.debug("Asserting cleanup for address " + this.pagingStore.getAddress()); } ArrayList<PageSubscription> cursorList = new ArrayList<PageSubscription>(); cursorList.addAll(activeCursors.values()); long minPage = checkMinPage(cursorList); if (minPage == pagingStore.getCurrentWritingPage() && pagingStore.getCurrentPage().getNumberOfMessages() > 0) { boolean complete = true; for (PageSubscription cursor : cursorList) { if (!cursor.isComplete(minPage)) { if (log.isDebugEnabled()) { log.debug("Cursor " + cursor + " was considered incomplete at page " + minPage); } complete = false; break; } else { if (log.isDebugEnabled()) { log.debug("Cursor " + cursor + "was considered **complete** at page " + minPage); } } } if (!pagingStore.isStarted()) { return; } if (complete) { if (log.isDebugEnabled()) { log.debug( "Address " + pagingStore.getAddress() + " is leaving page mode as all messages are consumed and acknowledged from the page store"); } pagingStore.forceAnotherPage(); Page currentPage = pagingStore.getCurrentPage(); storePositions(cursorList, currentPage); pagingStore.stopPaging(); // This has to be called after we stopped paging for (PageSubscription cursor : cursorList) { cursor.scheduleCleanupCheck(); } } } for (long i = pagingStore.getFirstPage(); i < minPage; i++) { Page page = pagingStore.depage(); if (page == null) { break; } depagedPages.add(page); } if (pagingStore.getNumberOfPages() == 0 || pagingStore.getNumberOfPages() == 1 && pagingStore.getCurrentPage().getNumberOfMessages() == 0) { pagingStore.stopPaging(); } else { if (log.isTraceEnabled()) { log.trace( "Couldn't cleanup page on address " + this.pagingStore.getAddress() + " as numberOfPages == " + pagingStore.getNumberOfPages() + " and currentPage.numberOfMessages = " + pagingStore.getCurrentPage().getNumberOfMessages()); } } } catch (Exception ex) { log.warn("Couldn't complete cleanup on paging", ex); return; } finally { pagingStore.unlock(); } } try { for (Page depagedPage : depagedPages) { PageCache cache; PagedMessage[] pgdMessages; synchronized (softCache) { cache = softCache.get((long) depagedPage.getPageId()); } if (isTrace) { log.trace("Removing page " + depagedPage.getPageId() + " from page-cache"); } if (cache == null) { // The page is not on cache any more // We need to read the page-file before deleting it // to make sure we remove any large-messages pending storageManager.beforePageRead(); List<PagedMessage> pgdMessagesList = null; try { depagedPage.open(); pgdMessagesList = depagedPage.read(storageManager); } finally { try { depagedPage.close(); } catch (Exception e) { } storageManager.afterPageRead(); } depagedPage.close(); pgdMessages = pgdMessagesList.toArray(new PagedMessage[pgdMessagesList.size()]); } else { pgdMessages = cache.getMessages(); } depagedPage.delete(pgdMessages); synchronized (softCache) { softCache.remove((long) depagedPage.getPageId()); } } } catch (Exception ex) { log.warn("Couldn't complete cleanup on paging", ex); return; } }
public void close(PageSubscription cursor) { activeCursors.remove(cursor.getId()); scheduleCleanup(); }
protected void onDeletePage(Page deletedPage) throws Exception { List<PageSubscription> subscriptions = cloneSubscriptions(); for (PageSubscription subs : subscriptions) { subs.onDeletePage(deletedPage); } }
public void cleanup() { ArrayList<Page> depagedPages = new ArrayList<Page>(); while (true) { if (pagingStore.lock(100)) { break; } if (!pagingStore.isStarted()) return; } synchronized (this) { try { if (!pagingStore.isStarted()) { return; } if (pagingStore.getNumberOfPages() == 0) { return; } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Asserting cleanup for address " + this.pagingStore.getAddress()); } ArrayList<PageSubscription> cursorList = cloneSubscriptions(); long minPage = checkMinPage(cursorList); // if the current page is being written... // on that case we need to move to verify it in a different way if (minPage == pagingStore.getCurrentWritingPage() && pagingStore.getCurrentPage().getNumberOfMessages() > 0) { boolean complete = true; for (PageSubscription cursor : cursorList) { if (!cursor.isComplete(minPage)) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Cursor " + cursor + " was considered incomplete at page " + minPage); } complete = false; break; } else { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Cursor " + cursor + "was considered **complete** at page " + minPage); } } } if (!pagingStore.isStarted()) { return; } // All the pages on the cursor are complete.. so we will cleanup everything and store a // bookmark if (complete) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Address " + pagingStore.getAddress() + " is leaving page mode as all messages are consumed and acknowledged from the page store"); } pagingStore.forceAnotherPage(); Page currentPage = pagingStore.getCurrentPage(); storeBookmark(cursorList, currentPage); pagingStore.stopPaging(); } } for (long i = pagingStore.getFirstPage(); i < minPage; i++) { Page page = pagingStore.depage(); if (page == null) { break; } depagedPages.add(page); } if (pagingStore.getNumberOfPages() == 0 || pagingStore.getNumberOfPages() == 1 && pagingStore.getCurrentPage().getNumberOfMessages() == 0) { pagingStore.stopPaging(); } else { if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( "Couldn't cleanup page on address " + this.pagingStore.getAddress() + " as numberOfPages == " + pagingStore.getNumberOfPages() + " and currentPage.numberOfMessages = " + pagingStore.getCurrentPage().getNumberOfMessages()); } } } catch (Exception ex) { HornetQServerLogger.LOGGER.problemCleaningPageAddress(ex, pagingStore.getAddress()); return; } finally { pagingStore.unlock(); } } try { for (Page depagedPage : depagedPages) { PageCache cache; PagedMessage[] pgdMessages; synchronized (softCache) { cache = softCache.get((long) depagedPage.getPageId()); } if (isTrace) { HornetQServerLogger.LOGGER.trace( "Removing page " + depagedPage.getPageId() + " from page-cache"); } if (cache == null) { // The page is not on cache any more // We need to read the page-file before deleting it // to make sure we remove any large-messages pending storageManager.beforePageRead(); List<PagedMessage> pgdMessagesList = null; try { depagedPage.open(); pgdMessagesList = depagedPage.read(storageManager); } finally { try { depagedPage.close(); } catch (Exception e) { } storageManager.afterPageRead(); } depagedPage.close(); pgdMessages = pgdMessagesList.toArray(new PagedMessage[pgdMessagesList.size()]); } else { pgdMessages = cache.getMessages(); } depagedPage.delete(pgdMessages); onDeletePage(depagedPage); synchronized (softCache) { softCache.remove((long) depagedPage.getPageId()); } } } catch (Exception ex) { HornetQServerLogger.LOGGER.problemCleaningPageAddress(ex, pagingStore.getAddress()); return; } }
public void flushExecutors() { for (PageSubscription cursor : activeCursors.values()) { cursor.flushExecutors(); } waitForFuture(); }