/*
   * (non-Javadoc)
   *
   * @see org.apache.bookkeeper.mledger.ManagedLedger#openCursor(java.
   * lang.String)
   */
  @Override
  public synchronized ManagedCursor openCursor(String cursorName)
      throws InterruptedException, ManagedLedgerException {
    checkFenced();

    ManagedCursor cursor = cursors.get(cursorName);

    if (cursor == null) {
      // Create a new one and persist it
      Position position = new Position(currentLedger.getId(), currentLedger.getLastAddConfirmed());

      cursor = new ManagedCursorImpl(this, cursorName, position);
      store.updateConsumer(name, cursorName, position);
      cursors.add(cursor);
    }

    log.debug("[{}] Opened new cursor: {}", this.name, cursor);
    return cursor;
  }
  protected synchronized void updateCursor(ManagedCursorImpl cursor, Position newPosition)
      throws InterruptedException, ManagedLedgerException {
    checkFenced();
    // First update the metadata store, so that if we don't succeed we have
    // not changed any other state
    store.updateConsumer(name, cursor.getName(), newPosition);
    Position oldPosition = cursor.setAcknowledgedPosition(newPosition);
    cursors.cursorUpdated(cursor);

    if (oldPosition.getLedgerId() != newPosition.getLedgerId()) {
      // Only trigger a trimming when switching to the next ledger
      trimConsumedLedgersInBackground();
    }
  }
  /**
   * Checks whether there are ledger that have been fully consumed and deletes them
   *
   * @throws Exception
   */
  protected void internalTrimConsumedLedgers() {
    // Ensure only one trimming operation is active
    List<LedgerStat> ledgersToDelete = Lists.newArrayList();

    synchronized (this) {
      long slowestReaderLedgerId = -1;
      if (cursors.isEmpty() && currentLedger != null) {
        // At this point the lastLedger will be pointing to the
        // ledger that has just been closed, therefore the +1 to
        // include lastLedger in the trimming.
        slowestReaderLedgerId = currentLedger.getId() + 1;
      } else {
        slowestReaderLedgerId = cursors.getSlowestReaderPosition().getLedgerId();
      }

      for (LedgerStat ls : ledgers.headMap(slowestReaderLedgerId, false).values()) {
        ledgersToDelete.add(ls);
        ledgerCache.invalidate(ls.getLedgerId());
      }

      if (ledgersToDelete.isEmpty()) {
        return;
      }
    }

    // Delete the ledgers _without_ holding the lock on 'this'
    long removedCount = 0;
    long removedSize = 0;

    for (LedgerStat ls : ledgersToDelete) {
      log.info("[{}] Removing ledger {}", name, ls.getLedgerId());
      try {
        bookKeeper.deleteLedger(ls.getLedgerId());
        ++removedCount;
        removedSize += ls.getSize();
      } catch (BKNoSuchLedgerExistsException e) {
        log.warn("[{}] Ledger was already deleted {}", name, ls.getLedgerId());
      } catch (Exception e) {
        log.error("[{}] Error deleting ledger {}", name, ls.getLedgerId());
        return;
      }
    }

    // Update metadata
    try {
      synchronized (this) {
        numberOfEntries.addAndGet(-removedCount);
        totalSize.addAndGet(-removedSize);
        for (LedgerStat ls : ledgersToDelete) {
          ledgers.remove(ls.getLedgerId());
        }

        if (state == State.CreatingLedger) {
          // The list of ledgers is being modified asynchronously, we
          // cannot update it now. In case of a client crash, this
          // will just result in some ledgers to be deleted twice,
          // without any side consequences.
          log.info("[{}] Skipped updating ledger list for concurrent modification", name);
          return;
        }

        ledgersVersion = store.updateLedgersIds(name, ledgers.values(), ledgersVersion);
      }
    } catch (MetaStoreException e) {
      log.error("[{}] Failed to update the list of ledgers after trimming", name, e);
    }
  }