/** @param packet */
 private void handlePageWrite(final ReplicationPageWriteMessage packet) throws Exception {
   PagedMessage pgdMessage = packet.getPagedMessage();
   pgdMessage.initMessage(storageManager);
   ServerMessage msg = pgdMessage.getMessage();
   Page page = getPage(msg.getAddress(), packet.getPageNumber());
   page.write(pgdMessage);
 }
  @Override
  public synchronized void stop() throws Exception {
    if (!started) {
      return;
    }

    // Channel may be null if there isn't a connection to a live server
    if (channel != null) {
      channel.close();
    }

    for (ReplicatedLargeMessage largeMessage : largeMessages.values()) {
      largeMessage.releaseResources();
    }
    largeMessages.clear();

    for (Entry<JournalContent, Map<Long, JournalSyncFile>> entry :
        filesReservedForSync.entrySet()) {
      for (JournalSyncFile filesReserved : entry.getValue().values()) {
        filesReserved.close();
      }
    }

    filesReservedForSync.clear();
    if (journals != null) {
      for (Journal j : journals) {
        if (j instanceof FileWrapperJournal) j.stop();
      }
    }

    for (ConcurrentMap<Integer, Page> map : pageIndex.values()) {
      for (Page page : map.values()) {
        try {
          page.sync();
          page.close();
        } catch (Exception e) {
          ActiveMQServerLogger.LOGGER.errorClosingPageOnReplication(e);
        }
      }
    }
    pageManager.stop();

    pageIndex.clear();
    final CountDownLatch latch = new CountDownLatch(1);
    executor.execute(
        new Runnable() {

          @Override
          public void run() {
            latch.countDown();
          }
        });
    latch.await(30, TimeUnit.SECONDS);

    // Storage needs to be the last to stop
    storageManager.stop();

    started = false;
  }
  /**
   * @param pageId
   * @param map
   * @return
   */
  private synchronized Page newPage(
      final int pageId, final SimpleString storeName, final ConcurrentMap<Integer, Page> map)
      throws Exception {
    Page page = map.get(pageId);

    if (page == null) {
      page = pageManager.getPageStore(storeName).createPage(pageId);
      page.open();
      map.put(pageId, page);
    }

    return page;
  }
  /**
   * Receives 'raw' journal/page/large-message data from live server for synchronization of logs.
   *
   * @param msg
   * @throws Exception
   */
  private synchronized void handleReplicationSynchronization(ReplicationSyncFileMessage msg)
      throws Exception {
    Long id = Long.valueOf(msg.getId());
    byte[] data = msg.getData();
    SequentialFile channel1;
    switch (msg.getFileType()) {
      case LARGE_MESSAGE:
        {
          ReplicatedLargeMessage largeMessage = lookupLargeMessage(id, false);
          if (!(largeMessage instanceof LargeServerMessageInSync)) {
            ActiveMQServerLogger.LOGGER.largeMessageIncompatible();
            return;
          }
          LargeServerMessageInSync largeMessageInSync = (LargeServerMessageInSync) largeMessage;
          channel1 = largeMessageInSync.getSyncFile();
          break;
        }
      case PAGE:
        {
          Page page = getPage(msg.getPageStore(), (int) msg.getId());
          channel1 = page.getFile();
          break;
        }
      case JOURNAL:
        {
          JournalSyncFile journalSyncFile =
              filesReservedForSync.get(msg.getJournalContent()).get(id);
          FileChannel channel2 = journalSyncFile.getChannel();
          if (data == null) {
            channel2.close();
            return;
          }
          channel2.write(ByteBuffer.wrap(data));
          return;
        }
      default:
        throw ActiveMQMessageBundle.BUNDLE.replicationUnhandledFileType(msg.getFileType());
    }

    if (data == null) {
      channel1.close();
      return;
    }

    if (!channel1.isOpen()) {
      channel1.open();
    }
    channel1.writeDirect(ByteBuffer.wrap(data), true);
  }
  /** @param packet */
  private void handlePageEvent(final ReplicationPageEventMessage packet) throws Exception {
    ConcurrentMap<Integer, Page> pages = getPageMap(packet.getStoreName());

    Page page = pages.remove(packet.getPageNumber());

    if (page == null) {
      page = getPage(packet.getStoreName(), packet.getPageNumber());
    }

    if (page != null) {
      if (packet.isDelete()) {
        if (deletePages) {
          page.delete(null);
        }
      } else {
        page.close();
      }
    }
  }
  /**
   * This method will recover the counters after failures making sure the page counter doesn't get
   * out of sync
   *
   * @param pendingNonTXPageCounter
   * @throws Exception
   */
  @Override
  public void recoverPendingPageCounters(List<PageCountPending> pendingNonTXPageCounter)
      throws Exception {
    // We need a structure of the following
    // Address -> PageID -> QueueID -> List<PageCountPending>
    // The following loop will sort the records according to the hierarchy we need

    Transaction txRecoverCounter = new TransactionImpl(storageManager);

    Map<SimpleString, Map<Long, Map<Long, List<PageCountPending>>>> perAddressMap =
        generateMapsOnPendingCount(queues, pendingNonTXPageCounter, txRecoverCounter);

    for (SimpleString address : perAddressMap.keySet()) {
      PagingStore store = pagingManager.getPageStore(address);
      Map<Long, Map<Long, List<PageCountPending>>> perPageMap = perAddressMap.get(address);

      // We have already generated this before, so it can't be null
      assert (perPageMap != null);

      for (Long pageId : perPageMap.keySet()) {
        Map<Long, List<PageCountPending>> perQueue = perPageMap.get(pageId);

        // This can't be true!
        assert (perQueue != null);

        if (store.checkPageFileExists(pageId.intValue())) {
          // on this case we need to recalculate the records
          Page pg = store.createPage(pageId.intValue());
          pg.open();

          List<PagedMessage> pgMessages = pg.read(storageManager);
          Map<Long, AtomicInteger> countsPerQueueOnPage = new HashMap<>();

          for (PagedMessage pgd : pgMessages) {
            if (pgd.getTransactionID() <= 0) {
              for (long q : pgd.getQueueIDs()) {
                AtomicInteger countQ = countsPerQueueOnPage.get(q);
                if (countQ == null) {
                  countQ = new AtomicInteger(0);
                  countsPerQueueOnPage.put(q, countQ);
                }
                countQ.incrementAndGet();
              }
            }
          }

          for (Map.Entry<Long, List<PageCountPending>> entry : perQueue.entrySet()) {
            for (PageCountPending record : entry.getValue()) {
              logger.debug("Deleting pg tempCount " + record.getID());
              storageManager.deletePendingPageCounter(txRecoverCounter.getID(), record.getID());
            }

            PageSubscriptionCounter counter =
                store.getCursorProvider().getSubscription(entry.getKey()).getCounter();

            AtomicInteger value = countsPerQueueOnPage.get(entry.getKey());

            if (value == null) {
              logger.debug("Page " + entry.getKey() + " wasn't open, so we will just ignore");
            } else {
              logger.debug("Replacing counter " + value.get());
              counter.increment(txRecoverCounter, value.get());
            }
          }
        } else {
          // on this case the page file didn't exist, we just remove all the records since the page
          // is already gone
          logger.debug(
              "Page "
                  + pageId
                  + " didn't exist on address "
                  + address
                  + ", so we are just removing records");
          for (List<PageCountPending> records : perQueue.values()) {
            for (PageCountPending record : records) {
              logger.debug("Removing pending page counter " + record.getID());
              storageManager.deletePendingPageCounter(txRecoverCounter.getID(), record.getID());
              txRecoverCounter.setContainsPersistent();
            }
          }
        }
      }
    }

    txRecoverCounter.commit();
  }