/** - (void) beginReplicating in CBL_Replicator.m */
  @Override
  @InterfaceAudience.Private
  public void beginReplicating() {
    // If we're still waiting to create the remote db, do nothing now. (This method will be
    // re-invoked after that request finishes; see -maybeCreateRemoteDB above.)

    Log.d(Log.TAG_SYNC, "%s: beginReplicating() called", this);

    // If we're still waiting to create the remote db, do nothing now. (This method will be
    // re-invoked after that request finishes; see maybeCreateRemoteDB() above.)
    if (creatingTarget) {
      Log.d(Log.TAG_SYNC, "%s: creatingTarget == true, doing nothing", this);
      return;
    }

    pendingSequences = Collections.synchronizedSortedSet(new TreeSet<Long>());
    try {
      maxPendingSequence = Long.parseLong(lastSequence);
    } catch (NumberFormatException e) {
      Log.w(Log.TAG_SYNC, "Error converting lastSequence: %s to long.  Using 0", lastSequence);
      maxPendingSequence = new Long(0);
    }

    filter = compilePushReplicationFilter();
    if (filterName != null && filter == null) {
      Log.w(
          Log.TAG_SYNC,
          "%s: No ReplicationFilter registered for filter '%s'; ignoring",
          this,
          filterName);
    }

    // Process existing changes since the last push:
    long lastSequenceLong = 0;
    if (lastSequence != null) {
      lastSequenceLong = Long.parseLong(lastSequence);
    }
    ChangesOptions options = new ChangesOptions();
    options.setIncludeConflicts(true);
    Log.d(Log.TAG_SYNC, "%s: Getting changes since %s", this, lastSequence);
    RevisionList changes = db.changesSince(lastSequenceLong, options, filter, filterParams);
    if (changes.size() > 0) {
      Log.d(Log.TAG_SYNC, "%s: Queuing %d changes since %s", this, changes.size(), lastSequence);
      int remaining = changes.size();
      int size = batcher.getCapacity();
      int start = 0;
      while (remaining > 0) {
        if (size > remaining) size = remaining;
        RevisionList subChanges = new RevisionList(changes.subList(start, start + size));
        batcher.queueObjects(subChanges);
        start += size;
        remaining -= size;
        pauseOrResume();
        waitIfPaused();
      }
    } else {
      Log.d(Log.TAG_SYNC, "%s: No changes since %s", this, lastSequence);
    }

    // Now listen for future changes (in continuous mode):
    if (isContinuous()) {
      observing = true;
      db.addChangeListener(this);
    }
  }
  /** - (void) processInbox: (CBL_RevisionList*)changes in CBLRestPusher.m */
  @Override
  @InterfaceAudience.Private
  protected void processInbox(final RevisionList changes) {

    Log.v(Log.TAG_SYNC, "processInbox() changes=" + changes.size());

    // Generate a set of doc/rev IDs in the JSON format that _revs_diff wants:
    // <http://wiki.apache.org/couchdb/HttpPostRevsDiff>
    Map<String, List<String>> diffs = new HashMap<String, List<String>>();
    for (RevisionInternal rev : changes) {
      String docID = rev.getDocID();
      List<String> revs = diffs.get(docID);
      if (revs == null) {
        revs = new ArrayList<String>();
        diffs.put(docID, revs);
      }
      revs.add(rev.getRevID());
      addPending(rev);
    }

    // Call _revs_diff on the target db:
    Log.v(Log.TAG_SYNC, "%s: posting to /_revs_diff", this);

    CustomFuture future =
        sendAsyncRequest(
            "POST",
            "/_revs_diff",
            diffs,
            new RemoteRequestCompletionBlock() {

              @Override
              public void onCompletion(HttpResponse httpResponse, Object response, Throwable e) {

                Log.v(Log.TAG_SYNC, "%s: got /_revs_diff response", this);
                Map<String, Object> results = (Map<String, Object>) response;
                if (e != null) {
                  setError(e);
                } else {
                  if (results.size() != 0) {
                    // Go through the list of local changes again, selecting the ones the
                    // destination server
                    // said were missing and mapping them to a JSON dictionary in the form
                    // _bulk_docs wants:
                    List<Object> docsToSend = new ArrayList<Object>();
                    RevisionList revsToSend = new RevisionList();
                    long bufferedSize = 0;
                    for (RevisionInternal rev : changes) {
                      // Is this revision in the server's 'missing' list?
                      Map<String, Object> properties = null;
                      Map<String, Object> revResults =
                          (Map<String, Object>) results.get(rev.getDocID());
                      if (revResults == null) {
                        removePending(rev);
                        continue;
                      }
                      List<String> revs = (List<String>) revResults.get("missing");
                      if (revs == null || !revs.contains(rev.getRevID())) {
                        removePending(rev);
                        continue;
                      }

                      // NOTE: force to load body by Database.loadRevisionBody()
                      // In SQLiteStore.loadRevisionBody() does not load data from database
                      // if sequence != 0 && body != null
                      rev.setSequence(0);
                      rev.setBody(null);

                      RevisionInternal loadedRev;
                      try {
                        loadedRev = db.loadRevisionBody(rev);
                      } catch (CouchbaseLiteException e1) {
                        Log.w(
                            Log.TAG_SYNC,
                            "%s Couldn't get local contents of %s",
                            rev,
                            PusherInternal.this);
                        continue;
                      }

                      RevisionInternal populatedRev = transformRevision(loadedRev);
                      loadedRev = null;

                      List<String> possibleAncestors =
                          (List<String>) revResults.get("possible_ancestors");

                      properties = new HashMap<String, Object>(populatedRev.getProperties());
                      Map<String, Object> revisions =
                          db.getRevisionHistoryDictStartingFromAnyAncestor(
                              populatedRev, possibleAncestors);
                      properties.put("_revisions", revisions);
                      populatedRev.setProperties(properties);

                      // Strip any attachments already known to the target db:
                      if (properties.containsKey("_attachments")) {
                        // Look for the latest common ancestor and stub out older attachments:
                        int minRevPos = findCommonAncestor(populatedRev, possibleAncestors);

                        Status status = new Status(Status.OK);
                        if (!db.expandAttachments(
                            populatedRev, minRevPos + 1, !dontSendMultipart, false, status)) {
                          Log.w(
                              Log.TAG_SYNC,
                              "%s: Couldn't expand attachments of %s",
                              this,
                              populatedRev);
                          continue;
                        }

                        properties = populatedRev.getProperties();
                        if (!dontSendMultipart && uploadMultipartRevision(populatedRev)) {
                          continue;
                        }
                      }

                      if (properties == null || !properties.containsKey("_id")) {
                        throw new IllegalStateException("properties must contain a document _id");
                      }

                      revsToSend.add(rev);
                      docsToSend.add(properties);

                      bufferedSize += JSONUtils.estimate(properties);
                      if (bufferedSize > kMaxBulkDocsObjectSize) {
                        uploadBulkDocs(docsToSend, revsToSend);
                        docsToSend = new ArrayList<Object>();
                        revsToSend = new RevisionList();
                        bufferedSize = 0;
                      }
                    }

                    // Post the revisions to the destination:
                    uploadBulkDocs(docsToSend, revsToSend);

                  } else {
                    // None of the revisions are new to the remote
                    for (RevisionInternal revisionInternal : changes) {
                      removePending(revisionInternal);
                    }
                  }
                }
              }
            });
    future.setQueue(pendingFutures);
    pendingFutures.add(future);

    pauseOrResume();
  }
  /** Process a bunch of remote revisions from the _changes feed at once */
  @Override
  @InterfaceAudience.Private
  protected void processInbox(RevisionList inbox) {
    Log.d(Log.TAG_SYNC, "processInbox called");

    if (canBulkGet == null) {
      canBulkGet = serverIsSyncGatewayVersion("0.81");
    }

    // Ask the local database which of the revs are not known to it:
    String lastInboxSequence = ((PulledRevision) inbox.get(inbox.size() - 1)).getRemoteSequenceID();

    int numRevisionsRemoved = 0;
    try {
      // findMissingRevisions is the local equivalent of _revs_diff. it looks at the
      // array of revisions in "inbox" and removes the ones that already exist.
      // So whatever's left in 'inbox'
      // afterwards are the revisions that need to be downloaded.
      numRevisionsRemoved = db.findMissingRevisions(inbox);
    } catch (SQLException e) {
      Log.e(Log.TAG_SYNC, String.format("%s failed to look up local revs", this), e);
      inbox = null;
    }

    // introducing this to java version since inbox may now be null everywhere
    int inboxCount = 0;
    if (inbox != null) {
      inboxCount = inbox.size();
    }

    if (numRevisionsRemoved > 0) {
      Log.v(
          Log.TAG_SYNC,
          "%s: processInbox() setting changesCount to: %s",
          this,
          getChangesCount().get() - numRevisionsRemoved);
      // May decrease the changesCount, to account for the revisions we just found out we don't need
      // to get.
      addToChangesCount(-1 * numRevisionsRemoved);
    }

    if (inboxCount == 0) {
      // Nothing to do. Just bump the lastSequence.
      Log.w(
          Log.TAG_SYNC,
          "%s no new remote revisions to fetch.  add lastInboxSequence (%s) to pendingSequences (%s)",
          this,
          lastInboxSequence,
          pendingSequences);
      long seq = pendingSequences.addValue(lastInboxSequence);
      pendingSequences.removeSequence(seq);
      setLastSequence(pendingSequences.getCheckpointedValue());
      pauseOrResume();
      return;
    }

    Log.v(Log.TAG_SYNC, "%s: fetching %s remote revisions...", this, inboxCount);

    // Dump the revs into the queue of revs to pull from the remote db:
    for (int i = 0; i < inbox.size(); i++) {
      PulledRevision rev = (PulledRevision) inbox.get(i);
      if (canBulkGet || (rev.getGeneration() == 1 && !rev.isDeleted() && !rev.isConflicted())) {
        bulkRevsToPull.add(rev);
      } else {
        queueRemoteRevision(rev);
      }
      rev.setSequence(pendingSequences.addValue(rev.getRemoteSequenceID()));
    }
    pullRemoteRevisions();
    pauseOrResume();
  }