Пример #1
0
  @Test
  public void testNoActions() throws Exception {

    setupServer(true);
    StorageManager storage = getStorage();
    manager = liveServer.getReplicationManager();
    waitForComponent(manager);

    Journal replicatedJournal = new ReplicatedJournal((byte) 1, new FakeJournal(), manager);

    replicatedJournal.appendPrepareRecord(1, new FakeData(), false);

    final CountDownLatch latch = new CountDownLatch(1);
    storage.afterCompleteOperations(
        new IOAsyncTask() {

          public void onError(final int errorCode, final String errorMessage) {}

          public void done() {
            latch.countDown();
          }
        });

    Assert.assertTrue(latch.await(1, TimeUnit.SECONDS));

    Assert.assertEquals(
        "should be empty " + manager.getActiveTokens(), 0, manager.getActiveTokens().size());
  }
Пример #2
0
  public PageCache getPageCache(final long pageId) {
    try {
      boolean needToRead = false;
      PageCache cache = null;
      synchronized (softCache) {
        if (pageId > pagingStore.getCurrentWritingPage()) {
          return null;
        }

        cache = softCache.get(pageId);
        if (cache == null) {
          if (!pagingStore.checkPageFileExists((int) pageId)) {
            return null;
          }

          cache = createPageCache(pageId);
          needToRead = true;
          // anyone reading from this cache will have to wait reading to finish first
          // we also want only one thread reading this cache
          cache.lock();
          if (isTrace) {
            HornetQServerLogger.LOGGER.trace(
                "adding " + pageId + " into cursor = " + this.pagingStore.getAddress());
          }
          softCache.put(pageId, cache);
        }
      }

      // Reading is done outside of the synchronized block, however
      // the page stays locked until the entire reading is finished
      if (needToRead) {
        Page page = null;
        try {
          page = pagingStore.createPage((int) pageId);

          storageManager.beforePageRead();
          page.open();

          List<PagedMessage> pgdMessages = page.read(storageManager);
          cache.setMessages(pgdMessages.toArray(new PagedMessage[pgdMessages.size()]));
        } finally {
          try {
            if (page != null) {
              page.close();
            }
          } catch (Throwable ignored) {
          }
          storageManager.afterPageRead();
          cache.unlock();
        }
      }

      return cache;
    } catch (Exception e) {
      throw new RuntimeException(
          "Couldn't complete paging due to an IO Exception on Paging - " + e.getMessage(), e);
    }
  }
Пример #3
0
  public void waitContextCompletion() {
    OperationContext formerCtx = storageManager.getContext();

    try {
      try {
        if (!storageManager.waitOnOperations(10000)) {
          HornetQLogger.LOGGER.errorCompletingContext(new Exception("warning"));
        }
      } catch (Exception e) {
        HornetQLogger.LOGGER.warn(e.getMessage(), e);
      }
    } finally {
      storageManager.setContext(formerCtx);
    }
  }
Пример #4
0
  public void prepare() throws Exception {
    storageManager.readLock();
    try {
      synchronized (timeoutLock) {
        if (state == State.ROLLBACK_ONLY) {
          if (exception != null) {
            // this TX will never be rolled back,
            // so we reset it now
            beforeRollback();
            afterRollback();
            operations.clear();
            throw exception;
          } else {
            // Do nothing
            return;
          }
        } else if (state != State.ACTIVE) {
          throw new IllegalStateException("Transaction is in invalid state " + state);
        }

        if (xid == null) {
          throw new IllegalStateException("Cannot prepare non XA transaction");
        }

        beforePrepare();

        storageManager.prepare(id, xid);

        state = State.PREPARED;
        // We use the Callback even for non persistence
        // If we are using non-persistence with replication, the replication manager will have
        // to execute this runnable in the correct order
        storageManager.afterCompleteOperations(
            new IOAsyncTask() {

              public void onError(final int errorCode, final String errorMessage) {
                HornetQServerLogger.LOGGER.ioErrorOnTX(errorCode, errorMessage);
              }

              public void done() {
                afterPrepare();
              }
            });
      }
    } finally {
      storageManager.readUnLock();
    }
  }
Пример #5
0
  public void route(final ServerMessage message, final RoutingContext context) throws Exception {
    // We must make a copy of the message, otherwise things like returning credits to the page won't
    // work
    // properly on ack, since the original address will be overwritten

    // TODO we can optimise this so it doesn't copy if it's not routed anywhere else

    if (HornetQServerLogger.LOGGER.isTraceEnabled()) {
      HornetQServerLogger.LOGGER.trace("Diverting message " + message + " into " + this);
    }

    long id = storageManager.generateUniqueID();

    ServerMessage copy = message.copy(id);
    copy.finishCopy();

    // This will set the original MessageId, and the original address
    copy.setOriginalHeaders(message, null, false);

    copy.setAddress(forwardAddress);

    if (transformer != null) {
      copy = transformer.transform(copy);
    }

    postOffice.route(copy, context.getTransaction(), false);
  }
Пример #6
0
  public synchronized void write(final PagedMessage message) throws Exception {
    if (!file.isOpen()) {

      return;
    }

    ByteBuffer buffer = fileFactory.newBuffer(message.getEncodeSize() + Page.SIZE_RECORD);

    HornetQBuffer wrap = HornetQBuffers.wrappedBuffer(buffer);
    wrap.clear();

    wrap.writeByte(Page.START_BYTE);
    wrap.writeInt(0);
    int startIndex = wrap.writerIndex();
    message.encode(wrap);
    int endIndex = wrap.writerIndex();
    wrap.setInt(1, endIndex - startIndex); // The encoded length
    wrap.writeByte(Page.END_BYTE);

    buffer.rewind();

    file.writeDirect(buffer, false);

    if (pageCache != null) {
      pageCache.addLiveMessage(message);
    }

    numberOfMessages.incrementAndGet();
    size.addAndGet(buffer.limit());

    storageManager.pageWrite(message, pageId);
  }
Пример #7
0
  public void sendLarge(final MessageInternal message) throws Exception {
    // need to create the LargeMessage before continue
    long id = storageManager.generateUniqueID();

    LargeServerMessage largeMsg = storageManager.createLargeMessage(id, message);

    if (HornetQLogger.LOGGER.isTraceEnabled()) {
      HornetQLogger.LOGGER.trace("sendLarge::" + largeMsg);
    }

    if (currentLargeMessage != null) {
      HornetQLogger.LOGGER.replacingIncompleteLargeMessage(currentLargeMessage.getMessageID());
    }

    currentLargeMessage = largeMsg;
  }
Пример #8
0
  public void rollback() throws Exception {
    synchronized (timeoutLock) {
      if (xid != null) {
        if (state != State.PREPARED && state != State.ACTIVE && state != State.ROLLBACK_ONLY) {
          throw new IllegalStateException("Transaction is in invalid state " + state);
        }
      } else {
        if (state != State.ACTIVE && state != State.ROLLBACK_ONLY) {
          throw new IllegalStateException("Transaction is in invalid state " + state);
        }
      }

      beforeRollback();

      doRollback();

      // We use the Callback even for non persistence
      // If we are using non-persistence with replication, the replication manager will have
      // to execute this runnable in the correct order
      storageManager.afterCompleteOperations(
          new IOAsyncTask() {

            public void onError(final int errorCode, final String errorMessage) {
              HornetQServerLogger.LOGGER.ioErrorOnTX(errorCode, errorMessage);
            }

            public void done() {
              afterRollback();
              state = State.ROLLEDBACK;
            }
          });
    }
  }
Пример #9
0
  /**
   * @param cursorList
   * @param currentPage
   * @throws Exception
   */
  protected void storeBookmark(ArrayList<PageSubscription> cursorList, Page currentPage)
      throws Exception {
    try {
      // First step: Move every cursor to the next bookmarked page (that was just created)
      for (PageSubscription cursor : cursorList) {
        cursor.confirmPosition(new PagePositionImpl(currentPage.getPageId(), -1));
      }

      while (!storageManager.waitOnOperations(5000)) {
        HornetQServerLogger.LOGGER.problemCompletingOperations(storageManager.getContext());
      }
    } finally {
      for (PageSubscription cursor : cursorList) {
        cursor.enableAutoCleanup();
      }
    }
  }
Пример #10
0
  /** @throws Exception */
  protected void doCommit() throws Exception {
    if (containsPersistent || xid != null && state == State.PREPARED) {

      storageManager.commit(id);

      state = State.COMMITTED;
    }
  }
  /**
   * @param cursorList
   * @param currentPage
   * @throws Exception
   */
  private void storePositions(ArrayList<PageSubscription> cursorList, Page currentPage)
      throws Exception {
    try {
      // First step: Move every cursor to the next bookmarked page (that was just created)
      for (PageSubscription cursor : cursorList) {
        cursor.confirmPosition(new PagePositionImpl(currentPage.getPageId(), -1));
      }

      while (!storageManager.waitOnOperations(5000)) {
        log.warn("Couldn't complete operations on IO context " + storageManager.getContext());
      }
    } finally {
      for (PageSubscription cursor : cursorList) {
        cursor.enableAutoCleanup();
      }
    }
  }
Пример #12
0
  public TransactionImpl(final StorageManager storageManager) {
    this.storageManager = storageManager;

    xid = null;

    id = storageManager.generateID();

    createTime = System.currentTimeMillis();
  }
Пример #13
0
  @Test
  public void testOrderOnNonPersistency() throws Exception {

    setupServer(true);

    final ArrayList<Integer> executions = new ArrayList<Integer>();

    StorageManager storage = getStorage();
    manager = liveServer.getReplicationManager();
    Journal replicatedJournal = new ReplicatedJournal((byte) 1, new FakeJournal(), manager);

    int numberOfAdds = 200;

    final CountDownLatch latch = new CountDownLatch(numberOfAdds);

    OperationContext ctx = storage.getContext();

    for (int i = 0; i < numberOfAdds; i++) {
      final int nAdd = i;

      if (i % 2 == 0) {
        replicatedJournal.appendPrepareRecord(i, new FakeData(), false);
      }

      ctx.executeOnCompletion(
          new IOAsyncTask() {

            public void onError(final int errorCode, final String errorMessage) {}

            public void done() {
              executions.add(nAdd);
              latch.countDown();
            }
          });
    }

    Assert.assertTrue(latch.await(10, TimeUnit.SECONDS));

    for (int i = 0; i < numberOfAdds; i++) {
      Assert.assertEquals(i, executions.get(i).intValue());
    }

    Assert.assertEquals(0, manager.getActiveTokens().size());
  }
Пример #14
0
 public synchronized void close() throws Exception {
   if (storageManager != null) {
     storageManager.pageClosed(storeName, pageId);
   }
   if (pageCache != null) {
     pageCache.close();
     // leave it to the soft cache to decide when to release it now
     pageCache = null;
   }
   file.close();
 }
Пример #15
0
  public TransactionImpl(final StorageManager storageManager, final int timeoutSeconds) {
    this.storageManager = storageManager;

    xid = null;

    id = storageManager.generateID();

    createTime = System.currentTimeMillis();

    this.timeoutSeconds = timeoutSeconds;
  }
Пример #16
0
  public void close(final boolean failed) {
    OperationContext formerCtx = storageManager.getContext();

    try {
      storageManager.setContext(sessionContext);

      storageManager.afterCompleteOperations(
          new IOAsyncTask() {
            public void onError(int errorCode, String errorMessage) {}

            public void done() {
              try {
                doClose(failed);
              } catch (Exception e) {
                HornetQLogger.LOGGER.errorClosingSession(e);
              }
            }
          });
    } finally {
      storageManager.setContext(formerCtx);
    }
  }
Пример #17
0
  public void sendInternalLarge(ServerMessageImpl message, boolean direct) throws Exception {
    int headerSize = message.getHeadersAndPropertiesEncodeSize();
    if (headerSize >= connection.getMinLargeMessageSize()) {
      throw BUNDLE.headerTooBig();
    }

    StorageManager storageManager = ((ServerSessionImpl) session).getStorageManager();
    long id = storageManager.generateUniqueID();
    LargeServerMessage largeMessage = storageManager.createLargeMessage(id, message);

    byte[] bytes = new byte[message.getBodyBuffer().writerIndex() - MessageImpl.BODY_OFFSET];
    message.getBodyBuffer().readBytes(bytes);

    largeMessage.addBytes(bytes);

    largeMessage.releaseResources();

    largeMessage.putLongProperty(Message.HDR_LARGE_BODY_SIZE, bytes.length);

    session.send(largeMessage, direct);

    largeMessage = null;
  }
Пример #18
0
  /** @param manager1 */
  private void blockOnReplication(final StorageManager storage, final ReplicationManager manager1)
      throws Exception {
    final CountDownLatch latch = new CountDownLatch(1);
    storage.afterCompleteOperations(
        new IOAsyncTask() {

          public void onError(final int errorCode, final String errorMessage) {}

          public void done() {
            latch.countDown();
          }
        });

    Assert.assertTrue(latch.await(30, TimeUnit.SECONDS));
  }
Пример #19
0
  public synchronized void xaForget(final Xid xid) throws Exception {
    long id = resourceManager.removeHeuristicCompletion(xid);

    if (id != -1) {
      try {
        storageManager.deleteHeuristicCompletion(id);
      } catch (Exception e) {
        e.printStackTrace();

        throw new HornetQXAException(XAException.XAER_RMERR);
      }
    } else {
      throw new HornetQXAException(XAException.XAER_NOTA);
    }
  }
Пример #20
0
  public void commit(final boolean onePhase) throws Exception {
    synchronized (timeoutLock) {
      if (state == State.ROLLBACK_ONLY) {
        rollback();

        if (exception != null) {
          throw exception;
        } else {
          // Do nothing
          return;
        }
      }

      if (xid != null) {
        if (onePhase && state != State.ACTIVE || !onePhase && state != State.PREPARED) {
          throw new IllegalStateException("Transaction is in invalid state " + state);
        }
      } else {
        if (state != State.ACTIVE) {
          throw new IllegalStateException("Transaction is in invalid state " + state);
        }
      }

      beforeCommit();

      doCommit();

      // We use the Callback even for non persistence
      // If we are using non-persistence with replication, the replication manager will have
      // to execute this runnable in the correct order
      // This also will only use a different thread if there are any IO pending.
      // If the IO finished early by the time we got here, we won't need an executor
      storageManager.afterCompleteOperations(
          new IOAsyncTask() {

            public void onError(final int errorCode, final String errorMessage) {
              HornetQServerLogger.LOGGER.ioErrorOnTX(errorCode, errorMessage);
            }

            public void done() {
              afterCommit();
            }
          });
    }
  }
Пример #21
0
  public void send(final ServerMessage message, final boolean direct) throws Exception {
    // large message may come from StompSession directly, in which
    // case the id header already generated.
    if (!message.isLargeMessage()) {
      long id = storageManager.generateUniqueID();

      message.setMessageID(id);
      message.encodeMessageIDToBuffer();
    }

    SimpleString address = message.getAddress();

    if (defaultAddress == null && address != null) {
      defaultAddress = address;
    }

    if (address == null) {
      if (message.isDurable()) {
        // We need to force a re-encode when the message gets persisted or when it gets reloaded
        // it will have no address
        message.setAddress(defaultAddress);
      } else {
        // We don't want to force a re-encode when the message gets sent to the consumer
        message.setAddressTransient(defaultAddress);
      }
    }

    if (isTrace) {
      HornetQServerLogger.LOGGER.trace(
          "send(message=" + message + ", direct=" + direct + ") being called");
    }

    if (message.getAddress() == null) {
      // This could happen with some tests that are ignoring messages
      throw HornetQMessageBundle.BUNDLE.noAddress();
    }

    if (message.getAddress().equals(managementAddress)) {
      // It's a management message

      handleManagementMessage(message, direct);
    } else {
      doSend(message, direct);
    }
  }
Пример #22
0
  public boolean delete(final PagedMessage[] messages) throws Exception {
    if (storageManager != null) {
      storageManager.pageDeleted(storeName, pageId);
    }

    if (isDebug) {
      HornetQServerLogger.LOGGER.debug("Deleting pageId=" + pageId + " on store " + storeName);
    }

    if (messages != null) {
      for (PagedMessage msg : messages) {
        if (msg.getMessage().isLargeMessage()) {
          LargeServerMessage lmsg = (LargeServerMessage) msg.getMessage();

          // Remember, cannot call delete directly here
          // Because the large-message may be linked to another message
          // or it may still being delivered even though it has been acked already
          lmsg.decrementDelayDeletionCount();
        }
      }
    }

    try {
      if (suspiciousRecords) {
        HornetQServerLogger.LOGGER.pageInvalid(file.getFileName(), file.getFileName());
        file.renameTo(file.getFileName() + ".invalidPage");
      } else {
        file.delete();
      }

      return true;
    } catch (Exception e) {
      HornetQServerLogger.LOGGER.pageDeleteError(e);
      return false;
    }
  }
Пример #23
0
  public HandleStatus handle(final MessageReference ref) throws Exception {
    if (availableCredits != null && availableCredits.get() <= 0) {
      if (HornetQServerLogger.LOGGER.isDebugEnabled()) {
        HornetQServerLogger.LOGGER.debug(
            this
                + " is busy for the lack of credits. Current credits = "
                + availableCredits
                + " Can't receive reference "
                + ref);
      }

      return HandleStatus.BUSY;
    }

    // TODO - https://jira.jboss.org/browse/HORNETQ-533
    // if (!writeReady.get())
    // {
    // return HandleStatus.BUSY;
    // }

    synchronized (lock) {
      // If the consumer is stopped then we don't accept the message, it
      // should go back into the
      // queue for delivery later.
      if (!started || transferring) {
        return HandleStatus.BUSY;
      }

      // If there is a pendingLargeMessage we can't take another message
      // This has to be checked inside the lock as the set to null is done inside the lock
      if (largeMessageDeliverer != null) {
        if (HornetQServerLogger.LOGGER.isDebugEnabled()) {
          HornetQServerLogger.LOGGER.debug(
              this
                  + " is busy delivering large message "
                  + largeMessageDeliverer
                  + ", can't deliver reference "
                  + ref);
        }
        return HandleStatus.BUSY;
      }
      final ServerMessage message = ref.getMessage();

      if (filter != null && !filter.match(message)) {
        if (HornetQServerLogger.LOGGER.isTraceEnabled()) {
          HornetQServerLogger.LOGGER.trace(
              "Reference " + ref + " is a noMatch on consumer " + this);
        }
        return HandleStatus.NO_MATCH;
      }

      if (HornetQServerLogger.LOGGER.isTraceEnabled()) {
        HornetQServerLogger.LOGGER.trace("Handling reference " + ref);
      }

      if (!browseOnly) {
        if (!preAcknowledge) {
          deliveringRefs.add(ref);
        }

        ref.handled();

        ref.incrementDeliveryCount();

        // If updateDeliveries = false (set by strict-update),
        // the updateDeliveryCount would still be updated after c
        if (strictUpdateDeliveryCount && !ref.isPaged()) {
          if (ref.getMessage().isDurable()
              && ref.getQueue().isDurable()
              && !ref.getQueue().isInternalQueue()
              && !ref.isPaged()) {
            storageManager.updateDeliveryCount(ref);
          }
        }

        if (preAcknowledge) {
          if (message.isLargeMessage()) {
            // we must hold one reference, or the file will be deleted before it could be delivered
            ((LargeServerMessage) message).incrementDelayDeletionCount();
          }

          // With pre-ack, we ack *before* sending to the client
          ref.getQueue().acknowledge(ref);
        }
      }

      if (message.isLargeMessage()) {
        largeMessageDeliverer = new LargeMessageDeliverer((LargeServerMessage) message, ref);
      }

      lockDelivery.readLock().lock();

      return HandleStatus.HANDLED;
    }
  }
Пример #24
0
 private void doRollback() throws Exception {
   if (containsPersistent || xid != null && state == State.PREPARED) {
     storageManager.rollback(id);
   }
 }
Пример #25
0
  public void cleanup() {
    ArrayList<Page> depagedPages = new ArrayList<Page>();

    while (true) {
      if (pagingStore.lock(100)) {
        break;
      }
      if (!pagingStore.isStarted()) return;
    }

    synchronized (this) {
      try {
        if (!pagingStore.isStarted()) {
          return;
        }

        if (pagingStore.getNumberOfPages() == 0) {
          return;
        }

        if (HornetQServerLogger.LOGGER.isDebugEnabled()) {
          HornetQServerLogger.LOGGER.debug(
              "Asserting cleanup for address " + this.pagingStore.getAddress());
        }

        ArrayList<PageSubscription> cursorList = cloneSubscriptions();

        long minPage = checkMinPage(cursorList);

        // if the current page is being written...
        // on that case we need to move to verify it in a different way
        if (minPage == pagingStore.getCurrentWritingPage()
            && pagingStore.getCurrentPage().getNumberOfMessages() > 0) {
          boolean complete = true;

          for (PageSubscription cursor : cursorList) {
            if (!cursor.isComplete(minPage)) {
              if (HornetQServerLogger.LOGGER.isDebugEnabled()) {
                HornetQServerLogger.LOGGER.debug(
                    "Cursor " + cursor + " was considered incomplete at page " + minPage);
              }

              complete = false;
              break;
            } else {
              if (HornetQServerLogger.LOGGER.isDebugEnabled()) {
                HornetQServerLogger.LOGGER.debug(
                    "Cursor " + cursor + "was considered **complete** at page " + minPage);
              }
            }
          }

          if (!pagingStore.isStarted()) {
            return;
          }

          // All the pages on the cursor are complete.. so we will cleanup everything and store a
          // bookmark
          if (complete) {

            if (HornetQServerLogger.LOGGER.isDebugEnabled()) {
              HornetQServerLogger.LOGGER.debug(
                  "Address "
                      + pagingStore.getAddress()
                      + " is leaving page mode as all messages are consumed and acknowledged from the page store");
            }

            pagingStore.forceAnotherPage();

            Page currentPage = pagingStore.getCurrentPage();

            storeBookmark(cursorList, currentPage);

            pagingStore.stopPaging();
          }
        }

        for (long i = pagingStore.getFirstPage(); i < minPage; i++) {
          Page page = pagingStore.depage();
          if (page == null) {
            break;
          }
          depagedPages.add(page);
        }

        if (pagingStore.getNumberOfPages() == 0
            || pagingStore.getNumberOfPages() == 1
                && pagingStore.getCurrentPage().getNumberOfMessages() == 0) {
          pagingStore.stopPaging();
        } else {
          if (HornetQServerLogger.LOGGER.isTraceEnabled()) {
            HornetQServerLogger.LOGGER.trace(
                "Couldn't cleanup page on address "
                    + this.pagingStore.getAddress()
                    + " as numberOfPages == "
                    + pagingStore.getNumberOfPages()
                    + " and currentPage.numberOfMessages = "
                    + pagingStore.getCurrentPage().getNumberOfMessages());
          }
        }
      } catch (Exception ex) {
        HornetQServerLogger.LOGGER.problemCleaningPageAddress(ex, pagingStore.getAddress());
        return;
      } finally {
        pagingStore.unlock();
      }
    }

    try {
      for (Page depagedPage : depagedPages) {
        PageCache cache;
        PagedMessage[] pgdMessages;
        synchronized (softCache) {
          cache = softCache.get((long) depagedPage.getPageId());
        }

        if (isTrace) {
          HornetQServerLogger.LOGGER.trace(
              "Removing page " + depagedPage.getPageId() + " from page-cache");
        }

        if (cache == null) {
          // The page is not on cache any more
          // We need to read the page-file before deleting it
          // to make sure we remove any large-messages pending
          storageManager.beforePageRead();

          List<PagedMessage> pgdMessagesList = null;
          try {
            depagedPage.open();
            pgdMessagesList = depagedPage.read(storageManager);
          } finally {
            try {
              depagedPage.close();
            } catch (Exception e) {
            }

            storageManager.afterPageRead();
          }
          depagedPage.close();
          pgdMessages = pgdMessagesList.toArray(new PagedMessage[pgdMessagesList.size()]);
        } else {
          pgdMessages = cache.getMessages();
        }

        depagedPage.delete(pgdMessages);
        onDeletePage(depagedPage);

        synchronized (softCache) {
          softCache.remove((long) depagedPage.getPageId());
        }
      }
    } catch (Exception ex) {
      HornetQServerLogger.LOGGER.problemCleaningPageAddress(ex, pagingStore.getAddress());
      return;
    }
  }
Пример #26
0
  @Test
  public void testSendTopic() throws Exception {
    Topic topic = createTopic("topic");
    Connection conn = cf.createConnection();

    try {
      conn.setClientID("someID");

      Session sess = conn.createSession(false, Session.AUTO_ACKNOWLEDGE);

      MessageConsumer cons = sess.createDurableSubscriber(topic, "someSub");

      conn.start();

      MessageProducer prod = sess.createProducer(topic);

      TextMessage msg1 = sess.createTextMessage("text");

      prod.send(msg1);

      assertNotNull(cons.receive(5000));

      conn.close();

      StorageManager storage = server.getStorageManager();

      for (int i = 0; i < 100; i++) {
        long txid = storage.generateUniqueID();

        final Queue queue =
            new QueueImpl(
                storage.generateUniqueID(),
                SimpleString.toSimpleString("jms.topic.topic"),
                SimpleString.toSimpleString("jms.topic.topic"),
                FilterImpl.createFilter(HornetQServerImpl.GENERIC_IGNORED_FILTER),
                true,
                false,
                server.getScheduledPool(),
                server.getPostOffice(),
                storage,
                server.getAddressSettingsRepository(),
                server.getExecutorFactory().getExecutor());

        LocalQueueBinding binding =
            new LocalQueueBinding(queue.getAddress(), queue, server.getNodeID());

        storage.addQueueBinding(txid, binding);

        storage.commitBindings(txid);
      }

      jmsServer.stop();

      jmsServer.start();

    } finally {
      try {
        conn.close();
      } catch (Throwable igonred) {
      }
    }
  }
  public void cleanup() {
    ArrayList<Page> depagedPages = new ArrayList<Page>();

    while (true) {
      if (pagingStore.lock(100)) {
        break;
      }
      if (!pagingStore.isStarted()) return;
    }

    synchronized (this) {
      try {
        if (!pagingStore.isStarted()) {
          return;
        }

        if (pagingStore.getNumberOfPages() == 0) {
          return;
        }

        if (log.isDebugEnabled()) {
          log.debug("Asserting cleanup for address " + this.pagingStore.getAddress());
        }

        ArrayList<PageSubscription> cursorList = new ArrayList<PageSubscription>();
        cursorList.addAll(activeCursors.values());

        long minPage = checkMinPage(cursorList);

        if (minPage == pagingStore.getCurrentWritingPage()
            && pagingStore.getCurrentPage().getNumberOfMessages() > 0) {
          boolean complete = true;

          for (PageSubscription cursor : cursorList) {
            if (!cursor.isComplete(minPage)) {
              if (log.isDebugEnabled()) {
                log.debug("Cursor " + cursor + " was considered incomplete at page " + minPage);
              }

              complete = false;
              break;
            } else {
              if (log.isDebugEnabled()) {
                log.debug("Cursor " + cursor + "was considered **complete** at page " + minPage);
              }
            }
          }

          if (!pagingStore.isStarted()) {
            return;
          }

          if (complete) {

            if (log.isDebugEnabled()) {
              log.debug(
                  "Address "
                      + pagingStore.getAddress()
                      + " is leaving page mode as all messages are consumed and acknowledged from the page store");
            }

            pagingStore.forceAnotherPage();

            Page currentPage = pagingStore.getCurrentPage();

            storePositions(cursorList, currentPage);

            pagingStore.stopPaging();

            // This has to be called after we stopped paging
            for (PageSubscription cursor : cursorList) {
              cursor.scheduleCleanupCheck();
            }
          }
        }

        for (long i = pagingStore.getFirstPage(); i < minPage; i++) {
          Page page = pagingStore.depage();
          if (page == null) {
            break;
          }
          depagedPages.add(page);
        }

        if (pagingStore.getNumberOfPages() == 0
            || pagingStore.getNumberOfPages() == 1
                && pagingStore.getCurrentPage().getNumberOfMessages() == 0) {
          pagingStore.stopPaging();
        } else {
          if (log.isTraceEnabled()) {
            log.trace(
                "Couldn't cleanup page on address "
                    + this.pagingStore.getAddress()
                    + " as numberOfPages == "
                    + pagingStore.getNumberOfPages()
                    + " and currentPage.numberOfMessages = "
                    + pagingStore.getCurrentPage().getNumberOfMessages());
          }
        }
      } catch (Exception ex) {
        log.warn("Couldn't complete cleanup on paging", ex);
        return;
      } finally {
        pagingStore.unlock();
      }
    }

    try {
      for (Page depagedPage : depagedPages) {
        PageCache cache;
        PagedMessage[] pgdMessages;
        synchronized (softCache) {
          cache = softCache.get((long) depagedPage.getPageId());
        }

        if (isTrace) {
          log.trace("Removing page " + depagedPage.getPageId() + " from page-cache");
        }

        if (cache == null) {
          // The page is not on cache any more
          // We need to read the page-file before deleting it
          // to make sure we remove any large-messages pending
          storageManager.beforePageRead();

          List<PagedMessage> pgdMessagesList = null;
          try {
            depagedPage.open();
            pgdMessagesList = depagedPage.read(storageManager);
          } finally {
            try {
              depagedPage.close();
            } catch (Exception e) {
            }

            storageManager.afterPageRead();
          }
          depagedPage.close();
          pgdMessages = pgdMessagesList.toArray(new PagedMessage[pgdMessagesList.size()]);
        } else {
          pgdMessages = cache.getMessages();
        }

        depagedPage.delete(pgdMessages);

        synchronized (softCache) {
          softCache.remove((long) depagedPage.getPageId());
        }
      }
    } catch (Exception ex) {
      log.warn("Couldn't complete cleanup on paging", ex);
      return;
    }
  }
Пример #28
0
  public synchronized List<PagedMessage> read(StorageManager storage) throws Exception {
    if (isDebug) {
      HornetQServerLogger.LOGGER.debug(
          "reading page " + this.pageId + " on address = " + storeName);
    }

    if (!file.isOpen()) {
      throw HornetQMessageBundle.BUNDLE.invalidPageIO();
    }

    ArrayList<PagedMessage> messages = new ArrayList<PagedMessage>();

    size.set((int) file.size());
    // Using direct buffer, as described on https://jira.jboss.org/browse/HORNETQ-467
    ByteBuffer directBuffer = storage.allocateDirectBuffer((int) file.size());

    try {

      file.position(0);
      file.read(directBuffer);

      directBuffer.rewind();

      HornetQBuffer fileBuffer = HornetQBuffers.wrappedBuffer(directBuffer);
      fileBuffer.writerIndex(fileBuffer.capacity());

      while (fileBuffer.readable()) {
        final int position = fileBuffer.readerIndex();

        byte byteRead = fileBuffer.readByte();

        if (byteRead == Page.START_BYTE) {
          if (fileBuffer.readerIndex() + DataConstants.SIZE_INT < fileBuffer.capacity()) {
            int messageSize = fileBuffer.readInt();
            int oldPos = fileBuffer.readerIndex();
            if (fileBuffer.readerIndex() + messageSize < fileBuffer.capacity()
                && fileBuffer.getByte(oldPos + messageSize) == Page.END_BYTE) {
              PagedMessage msg = new PagedMessageImpl();
              msg.decode(fileBuffer);
              byte b = fileBuffer.readByte();
              if (b != Page.END_BYTE) {
                // Sanity Check: This would only happen if there is a bug on decode or any internal
                // code, as
                // this
                // constraint was already checked
                throw new IllegalStateException(
                    "Internal error, it wasn't possible to locate END_BYTE " + b);
              }
              msg.initMessage(storage);
              if (isTrace) {
                HornetQServerLogger.LOGGER.trace(
                    "Reading message "
                        + msg
                        + " on pageId="
                        + this.pageId
                        + " for address="
                        + storeName);
              }
              messages.add(msg);
            } else {
              markFileAsSuspect(file.getFileName(), position, messages.size());
              break;
            }
          }
        } else {
          markFileAsSuspect(file.getFileName(), position, messages.size());
          break;
        }
      }
    } finally {
      storage.freeDirectBuffer(directBuffer);
    }

    numberOfMessages.set(messages.size());

    return messages;
  }