protected void startChangeTracker() {

    ChangeTracker.ChangeTrackerMode changeTrackerMode;

    // it always starts out as OneShot, but if its a continuous replication
    // it will switch to longpoll later.
    changeTrackerMode = ChangeTracker.ChangeTrackerMode.OneShot;

    Log.d(
        Log.TAG_SYNC,
        "%s: starting ChangeTracker with since=%s mode=%s",
        this,
        lastSequence,
        changeTrackerMode);
    changeTracker = new ChangeTracker(remote, changeTrackerMode, true, lastSequence, this);
    changeTracker.setAuthenticator(getAuthenticator());
    Log.d(Log.TAG_SYNC, "%s: started ChangeTracker %s", this, changeTracker);

    if (filterName != null) {
      changeTracker.setFilterName(filterName);
      if (filterParams != null) {
        changeTracker.setFilterParams(filterParams);
      }
    }
    changeTracker.setDocIDs(documentIDs);
    changeTracker.setRequestHeaders(requestHeaders);
    changeTracker.setContinuous(lifecycle == Replication.Lifecycle.CONTINUOUS);

    changeTracker.setUsePOST(serverIsSyncGatewayVersion("0.93"));
    changeTracker.start();
  }
  private void waitForAllTasksCompleted() {
    // NOTE: Wait till all queue becomes empty
    while ((batcher != null && batcher.count() > 0)
        || (pendingFutures != null && pendingFutures.size() > 0)
        || (downloadsToInsert != null && downloadsToInsert.count() > 0)) {

      // Wait for batcher completed
      if (batcher != null) {
        // if batcher delays task execution, need to wait same amount of time. (0.5 sec or 0 sec)
        try {
          Thread.sleep(batcher.getDelay());
        } catch (Exception e) {
        }
        Log.d(Log.TAG_SYNC, "batcher.waitForPendingFutures()");
        batcher.waitForPendingFutures();
      }

      // wait for pending featurs completed
      Log.d(Log.TAG_SYNC, "waitPendingFuturesCompleted()");
      waitPendingFuturesCompleted();

      // wait for downloadToInsert batcher completed
      if (downloadsToInsert != null) {
        // if batcher delays task execution, need to wait same amount of time. (1.0 sec or 0 sec)
        try {
          Thread.sleep(downloadsToInsert.getDelay());
        } catch (Exception e) {
        }
        Log.d(Log.TAG_SYNC, "downloadsToInsert.waitForPendingFutures()");
        downloadsToInsert.waitForPendingFutures();
      }
    }
  }
  public void testPullReplicateOneShot() throws Exception {

    // create mock sync gateway that will serve as a pull target and return random docs
    int numMockDocsToServe = 0;
    MockDispatcher dispatcher = new MockDispatcher();
    MockWebServer server =
        MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, numMockDocsToServe, 1);
    dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
    server.setDispatcher(dispatcher);
    server.play();

    // kick off replication via REST api
    Map<String, Object> replicateJsonMap = getPullReplicationParsedJson(server.getUrl("/db"));
    Log.e(TAG, "map: " + replicateJsonMap);

    Map<String, Object> result =
        (Map<String, Object>) sendBody("POST", "/_replicate", replicateJsonMap, Status.OK, null);
    Log.e(TAG, "result: " + result);
    assertNotNull(result.get("session_id"));

    ArrayList<Object> activeTasks =
        (ArrayList<Object>) send("GET", "/_active_tasks", Status.OK, null);
    Log.e(TAG, "activeTasks.size(): " + activeTasks.size());
    for (Object obj : activeTasks) {
      Map<String, Object> resp = (Map<String, Object>) obj;
      assertEquals("Stopped", resp.get("status"));
    }

    // cleanup
    server.shutdown();
  }
  public void testPushReplicate() throws Exception {

    // create mock sync gateway that will serve as a pull target and return random docs
    MockDispatcher dispatcher = new MockDispatcher();
    MockWebServer server = MockHelper.getMockWebServer(dispatcher);
    dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);

    // fake checkpoint response 404
    MockCheckpointGet mockCheckpointGet = new MockCheckpointGet();
    dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointGet);

    server.play();

    Map<String, Object> replicateJsonMap = getPushReplicationParsedJson(server.getUrl("/db"));

    Log.v(TAG, "map: " + replicateJsonMap);
    Map<String, Object> result =
        (Map<String, Object>) sendBody("POST", "/_replicate", replicateJsonMap, Status.OK, null);
    Log.v(TAG, "result: " + result);
    assertNotNull(result.get("session_id"));

    boolean success = waitForReplicationToFinish();
    assertTrue(success);

    server.shutdown();
  }
  public void testPullReplicate() throws Exception {

    // create mock sync gateway that will serve as a pull target and return random docs
    int numMockDocsToServe = 0;
    MockDispatcher dispatcher = new MockDispatcher();
    MockWebServer server =
        MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, numMockDocsToServe, 1);
    dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
    server.setDispatcher(dispatcher);
    server.play();

    // kick off replication via REST api
    Map<String, Object> replicateJsonMap = getPullReplicationParsedJson(server.getUrl("/db"));
    Log.v(TAG, "map: " + replicateJsonMap);
    Map<String, Object> result =
        (Map<String, Object>) sendBody("POST", "/_replicate", replicateJsonMap, Status.OK, null);
    Log.v(TAG, "result: " + result);
    assertNotNull(result.get("session_id"));

    // wait for replication to finish
    boolean success = waitForReplicationToFinish();
    assertTrue(success);

    // cleanup
    server.shutdown();
  }
  public void waitForPendingFutures() {
    synchronized (lockWaitForPendingFutures) {
      if (waitingForPendingFutures) {
        return;
      }
      waitingForPendingFutures = true;
    }

    Log.d(
        Log.TAG_SYNC,
        "[PullerInternal.waitForPendingFutures()] STARTED - thread id: "
            + Thread.currentThread().getId());

    try {
      waitForAllTasksCompleted();
    } catch (Exception e) {
      Log.e(Log.TAG_SYNC, "Exception waiting for pending futures: %s", e);
    }

    fireTrigger(ReplicationTrigger.WAITING_FOR_CHANGES);

    Log.d(
        Log.TAG_SYNC,
        "[waitForPendingFutures()] END - thread id: " + Thread.currentThread().getId());

    synchronized (lockWaitForPendingFutures) {
      waitingForPendingFutures = false;
    }
  }
 /** This method is called when a part's headers have been parsed, before its data is parsed. */
 public void startedPart(Map headers) {
   if (_docReader != null) {
     throw new IllegalStateException("_docReader is already defined");
   }
   Log.v(Log.TAG_SYNC, "%s: Starting new document; headers =%s", this, headers);
   Log.v(Log.TAG_SYNC, "%s: Starting new document; ID=%s", this, headers.get("X-Doc-Id"));
   _docReader = new MultipartDocumentReader(_db);
   _docReader.setHeaders(headers);
   _docReader.startedPart(headers);
 }
 @Override
 public void changeTrackerReceivedChange(final Map<String, Object> change) {
   try {
     Log.d(Log.TAG_SYNC, "changeTrackerReceivedChange: %s", change);
     processChangeTrackerChange(change);
   } catch (Exception e) {
     Log.e(Log.TAG_SYNC, "Error processChangeTrackerChange(): %s", e);
     throw new RuntimeException(e);
   }
 }
  @Override
  protected void stop() {
    if (stateMachine.isInState(ReplicationState.STOPPED)) return;

    Log.d(Log.TAG_SYNC, "%s STOPPING...", toString());

    stopObserving();

    super.stop();

    // this has to be on a different thread than the replicator thread, or else it's a deadlock
    // because it might be waiting for jobs that have been scheduled, and not
    // yet executed (and which will never execute because this will block processing).
    new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  // wait for all tasks completed
                  waitForPendingFutures();
                } catch (Exception e) {
                  Log.e(Log.TAG_SYNC, "stop.run() had exception: %s", e);
                } finally {
                  triggerStopImmediate();
                  Log.d(Log.TAG_SYNC, "PusherInternal stop.run() finished");
                }
              }
            })
        .start();
  }
  protected void stopGraceful() {
    super.stopGraceful();

    Log.d(Log.TAG_SYNC, "PullerInternal.stopGraceful() started");

    // this has to be on a different thread than the replicator thread, or else it's a deadlock
    // because it might be waiting for jobs that have been scheduled, and not
    // yet executed (and which will never execute because this will block processing).
    new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  // wait for all tasks completed
                  waitForAllTasksCompleted();

                  // stop change tracker
                  if (changeTracker != null) {
                    Log.d(Log.TAG_SYNC, "stopping change tracker");
                    changeTracker.stop();
                    Log.d(Log.TAG_SYNC, "stopped change tracker");
                  }
                } catch (Exception e) {
                  Log.e(Log.TAG_SYNC, "stopGraceful.run() had exception: %s", e);
                  e.printStackTrace();
                } finally {
                  // stop replicator immediate
                  triggerStopImmediate();
                }
                Log.d(Log.TAG_SYNC, "PullerInternal stopGraceful.run() finished");
              }
            })
        .start();
  }
  // depends on testRunPushReplication
  private void testRunPullReplication() throws Throwable {
    URL remoteDbURL = new URL(syncGatewayUrl + pushThenPullDBName);
    Database db = startDatabase();

    Log.i(TAG, "Pulling...");
    Replication repl = db.getPullReplication(remoteDbURL);
    runReplication(repl);
    assertNull(repl.getLastError());

    Log.i(TAG, "Verifying documents...");
    for (int i = 1; i <= kNDocuments; i++) {
      Document doc = db.getDocument("doc-" + i);
      assertEquals(i, doc.getProperty("index"));
      assertEquals(false, doc.getProperty("bar"));
    }
  }
  /** - (void) maybeCreateRemoteDB in CBL_Replicator.m */
  @Override
  @InterfaceAudience.Private
  protected void maybeCreateRemoteDB() {
    if (!createTarget) {
      return;
    }
    creatingTarget = true;
    Log.v(Log.TAG_SYNC, "Remote db might not exist; creating it...");

    Future future =
        sendAsyncRequest(
            "PUT",
            "",
            null,
            new RemoteRequestCompletionBlock() {

              @Override
              public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) {
                creatingTarget = false;
                if (e != null
                    && e instanceof HttpResponseException
                    && ((HttpResponseException) e).getStatusCode() != 412) {
                  Log.e(Log.TAG_SYNC, this + ": Failed to create remote db", e);
                  setError(e);
                  triggerStopGraceful(); // this is fatal: no db to push to!
                } else {
                  Log.v(Log.TAG_SYNC, "%s: Created remote db", this);
                  createTarget = false;
                  beginReplicating();
                }
              }
            });
    pendingFutures.add(future);
  }
 /** - (void) dbChanged: (NSNotification*)n in CBLRestPusher.m */
 @Override
 @InterfaceAudience.Private
 public void changed(Database.ChangeEvent event) {
   List<DocumentChange> changes = event.getChanges();
   try {
     java.net.URI remoteUri = remote.toURI();
     for (DocumentChange change : changes) {
       // Skip revisions that originally came from the database I'm syncing to:
       URL source = change.getSource();
       if (source != null && source.toURI().equals(remoteUri)) return;
       RevisionInternal rev = change.getAddedRevision();
       if (getLocalDatabase().runFilter(filter, filterParams, rev)) {
         pauseOrResume();
         waitIfPaused();
         RevisionInternal nuRev = rev.copy();
         nuRev.setBody(null); // save memory
         addToInbox(nuRev);
       }
     }
   } catch (java.net.URISyntaxException uriException) {
     // Not possible since it would not be an active replicator.
     // However, until we refactor everything to use java.net,
     // I'm not sure we have a choice but to swallow this.
     Log.e(Log.TAG_SYNC, "Active replicator found with invalid URI", uriException);
   }
 }
  /** https://github.com/couchbase/couchbase-lite-java-core/issues/106 */
  public void testResolveConflict() throws Exception {

    Map<String, Object> result;

    // Create a conflict on purpose
    Document doc = database.createDocument();
    SavedRevision rev1 = doc.createRevision().save();
    SavedRevision rev2a = createRevisionWithRandomProps(rev1, false);
    SavedRevision rev2b = createRevisionWithRandomProps(rev1, true);

    SavedRevision winningRev = null;
    SavedRevision losingRev = null;
    if (doc.getCurrentRevisionId().equals(rev2a.getId())) {
      winningRev = rev2a;
      losingRev = rev2b;
    } else {
      winningRev = rev2b;
      losingRev = rev2a;
    }

    assertEquals(2, doc.getConflictingRevisions().size());
    assertEquals(2, doc.getLeafRevisions().size());

    result =
        (Map<String, Object>)
            send(
                "GET",
                String.format("/%s/%s?conflicts=true", DEFAULT_TEST_DB, doc.getId()),
                Status.OK,
                null);
    List<String> conflicts = (List) result.get("_conflicts");
    assertEquals(1, conflicts.size());
    String conflictingRevId = conflicts.get(0);
    assertEquals(losingRev.getId(), conflictingRevId);

    long docNumericID = database.getDocNumericID(doc.getId());
    assertTrue(docNumericID != 0);
    assertNotNull(database.getDocument(doc.getId()));

    Log.d(TAG, "docNumericID for " + doc.getId() + " is: " + docNumericID);

    result =
        (Map<String, Object>)
            send(
                "DELETE",
                String.format("/%s/%s?rev=%s", DEFAULT_TEST_DB, doc.getId(), conflictingRevId),
                Status.OK,
                null);

    result =
        (Map<String, Object>)
            send(
                "GET",
                String.format("/%s/%s?conflicts=true", DEFAULT_TEST_DB, doc.getId()),
                Status.OK,
                null);

    conflicts = (List) result.get("_conflicts");
    assertEquals(0, conflicts.size());
  }
 private void revisionFailed(RevisionInternal rev, Throwable throwable) {
   if (!Utils.isTransientError(throwable)) {
     Log.v(Log.TAG_SYNC, "%s: giving up on %s: %s", this, rev, throwable);
     pendingSequences.removeSequence(rev.getSequence());
     pauseOrResume();
   }
   completedChangesCount.getAndIncrement();
 }
 private void waitPendingFuturesCompleted() {
   try {
     while (!pendingFutures.isEmpty()) {
       Future future = pendingFutures.take();
       try {
         Log.d(Log.TAG_SYNC, "calling future.get() on %s", future);
         future.get();
         Log.d(Log.TAG_SYNC, "done calling future.get() on %s", future);
       } catch (InterruptedException e) {
         e.printStackTrace();
       } catch (ExecutionException e) {
         e.printStackTrace();
       }
     }
   } catch (Exception e) {
     Log.e(Log.TAG_SYNC, "Exception waiting for pending futures: %s", e);
   }
 }
 private void setPaused(boolean paused) {
   Log.v(Log.TAG, "setPaused: " + paused);
   synchronized (pausedObj) {
     if (this.paused != paused) {
       this.paused = paused;
       pausedObj.notifyAll();
     }
   }
 }
  /** This method is called when a part is complete. */
  public void finishedPart() {
    Log.v(Log.TAG_SYNC, "%s: Finished document", this);
    if (_docReader == null) {
      throw new IllegalStateException("_docReader is not defined");
    }

    _docReader.finish();
    _onDocument.onDocument(_docReader.getDocumentProperties());
    _docReader = null;
  }
  public void failingRunPushReplication() throws Throwable {
    URL remoteDbURL = new URL(syncGatewayUrl + pushThenPullDBName);
    // java.net.ConnectException: failed to connect to /127.0.0.1 (port 4985): connect failed:
    // ECONNREFUSED (Connection refused)
    // RemoteRequest remoteRequest = new RemoteRequest(null,
    // CouchbaseLiteHttpClientFactory.INSTANCE, "DELETE", remoteDbURL, null, null);
    // remoteRequest.run();
    Database db = startDatabase();

    Log.i(TAG, "Creating " + kNDocuments + " documents...");
    createDocumentsAsync(db, kNDocuments);

    Log.i(TAG, "Pushing...");
    Replication repl = db.getPullReplication(remoteDbURL);
    repl.setCreateTarget(true);
    runReplication(repl);
    assertNull(repl.getLastError());

    testRunPullReplication();
  }
  /** Actual work of starting the replication process. */
  protected void beginReplicating() {
    Log.d(Log.TAG_SYNC, "startReplicating()");

    initPendingSequences();

    initDownloadsToInsert();

    startChangeTracker();

    // start replicator ..
  }
 private void waitIfPaused() {
   synchronized (pausedObj) {
     while (paused) {
       Log.v(Log.TAG, "Waiting: " + paused);
       try {
         pausedObj.wait();
       } catch (InterruptedException e) {
       }
     }
   }
 }
  /** https://github.com/couchbase/couchbase-lite-java-core/issues/291 */
  public void testCallReplicateTwice() throws Exception {

    // create mock sync gateway that will serve as a pull target and return random docs
    int numMockDocsToServe = 0;
    MockDispatcher dispatcher = new MockDispatcher();
    MockWebServer server =
        MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, numMockDocsToServe, 1);
    dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
    server.setDispatcher(dispatcher);
    server.play();

    // kick off 1st replication via REST api
    Map<String, Object> replicateJsonMap = getPullReplicationParsedJson(server.getUrl("/db"));
    Log.i(TAG, "map: " + replicateJsonMap);

    Log.i(TAG, "Call 1st /_replicate");
    Map<String, Object> result =
        (Map<String, Object>) sendBody("POST", "/_replicate", replicateJsonMap, Status.OK, null);
    Log.i(TAG, "result: " + result);
    assertNotNull(result.get("session_id"));
    String sessionId1 = (String) result.get("session_id");

    // NOTE: one short replication should be blocked. sendBody() waits till response is ready.
    //      https://github.com/couchbase/couchbase-lite-android/issues/204

    // kick off 2nd replication via REST api
    Log.i(TAG, "Call 2nd /_replicate");
    Map<String, Object> result2 =
        (Map<String, Object>) sendBody("POST", "/_replicate", replicateJsonMap, Status.OK, null);
    Log.i(TAG, "result2: " + result2);
    assertNotNull(result2.get("session_id"));
    String sessionId2 = (String) result2.get("session_id");

    // wait for replication to finish
    boolean success = waitForReplicationToFinish();
    assertTrue(success);

    // kick off 3rd replication via REST api
    Log.i(TAG, "Call 3rd /_replicate");
    Map<String, Object> result3 =
        (Map<String, Object>) sendBody("POST", "/_replicate", replicateJsonMap, Status.OK, null);
    Log.i(TAG, "result3: " + result3);
    assertNotNull(result3.get("session_id"));
    String sessionId3 = (String) result3.get("session_id");

    // wait for replication to finish
    boolean success3 = waitForReplicationToFinish();
    assertTrue(success3);

    assertFalse(sessionId1.equals(sessionId2));
    assertFalse(sessionId1.equals(sessionId3));
    assertFalse(sessionId2.equals(sessionId3));

    // cleanup
    server.shutdown();
  }
 static void runReplication(Replication repl) throws InterruptedException {
   Log.i(TAG, "Waiting for " + repl + " to finish...");
   boolean started = false, done = false;
   repl.start();
   long lastTime = System.currentTimeMillis();
   ;
   while (!done) {
     if (repl.isRunning()) {
       started = true;
     }
     // TODO getMode() always throws UnsupportedOperationException (see ios test)
     if (started
         && (repl.getMode() == Replication.ReplicationMode.REPLICATION_ACTIVE
             || repl.getMode() == Replication.ReplicationMode.REPLICATION_ACTIVE)) {
       done = true;
     }
     // Replication runs on a background thread, so the main runloop should not be blocked.
     // Make sure it's spinning in a timely manner:
     long now = System.currentTimeMillis();
     if (lastTime > 0 && now - lastTime > 25)
       Log.w(TAG, "Runloop was blocked for " + (now - lastTime) * 100 + " sec");
     lastTime = now;
     Thread.sleep(100);
     break;
   }
   if (repl.getLastError() == null) {
     Log.i(
         TAG,
         String.format(
             "...replicator finished. progress %d/%d without error",
             repl.getCompletedChangesCount(), repl.getChangesCount()));
   } else {
     Log.i(
         TAG,
         String.format(
             "...replicator finished. progress %d/%d, error=%s",
             repl.getCompletedChangesCount(),
             repl.getChangesCount(),
             repl.getLastError().toString()));
   }
 }
 @Override
 public void changeTrackerCaughtUp() {
   Log.d(Log.TAG_SYNC, "changeTrackerCaughtUp");
   // for continuous replications, once the change tracker is caught up, we
   // should try to go into the idle state.
   if (isContinuous()) {
     // this has to be on a different thread than the replicator thread, or else it's a deadlock
     // because it might be waiting for jobs that have been scheduled, and not
     // yet executed (and which will never execute because this will block processing).
     waitForPendingFuturesWithNewThread();
   }
 }
  public void testFacebookToken() {
    send("PUT", "/db", Status.CREATED, null);

    Map<String, Object> doc1 = new HashMap<String, Object>();
    doc1.put("email", "*****@*****.**");
    doc1.put("remote_url", getReplicationURL().toExternalForm());
    doc1.put("access_token", "fake_access_token");

    Map<String, Object> result =
        (Map<String, Object>) sendBody("POST", "/_facebook_token", doc1, Status.OK, null);
    Log.v(TAG, String.format("result %s", result));
  }
 private void processChangeTrackerStopped(ChangeTracker tracker) {
   Log.d(Log.TAG_SYNC, "changeTrackerStopped.  lifecycle: %s", lifecycle);
   switch (lifecycle) {
     case ONESHOT:
       // TODO: This is too early to fire STOP_GRACEFUL, Need to change.
       Log.d(Log.TAG_SYNC, "fire STOP_GRACEFUL");
       if (tracker.getLastError() != null) {
         setError(tracker.getLastError());
       }
       stateMachine.fire(ReplicationTrigger.STOP_GRACEFUL);
       break;
     case CONTINUOUS:
       if (stateMachine.isInState(ReplicationState.OFFLINE)) {
         // in this case, we don't want to do anything here, since
         // we told the change tracker to go offline ..
         Log.d(Log.TAG_SYNC, "Change tracker stopped because we are going offline");
       } else if (stateMachine.isInState(ReplicationState.STOPPING)
           || stateMachine.isInState(ReplicationState.STOPPED)) {
         Log.d(Log.TAG_SYNC, "Change tracker stopped because replicator is stopping or stopped.");
       } else {
         // otherwise, try to restart the change tracker, since it should
         // always be running in continuous replications
         String msg = String.format("Change tracker stopped during continuous replication");
         Log.e(Log.TAG_SYNC, msg);
         parentReplication.setLastError(new Exception(msg));
         fireTrigger(ReplicationTrigger.WAITING_FOR_CHANGES);
         Log.d(
             Log.TAG_SYNC,
             "Scheduling change tracker restart in %d ms",
             CHANGE_TRACKER_RESTART_DELAY_MS);
         workExecutor.schedule(
             new Runnable() {
               @Override
               public void run() {
                 // the replication may have been stopped by the time this scheduled fires
                 // so we need to check the state here.
                 if (stateMachine.isInState(ReplicationState.RUNNING)) {
                   Log.d(Log.TAG_SYNC, "%s still running, restarting change tracker", this);
                   startChangeTracker();
                 } else {
                   Log.d(
                       Log.TAG_SYNC,
                       "%s still no longer running, not restarting change tracker",
                       this);
                 }
               }
             },
             CHANGE_TRACKER_RESTART_DELAY_MS,
             TimeUnit.MILLISECONDS);
       }
       break;
     default:
       Log.e(Log.TAG_SYNC, "Unknown lifecycle: %s", lifecycle);
   }
 }
  /**
   * in CBL_Puller.m - (void) changeTrackerReceivedSequence: (id)remoteSequenceID docID:
   * (NSString*)docID revIDs: (NSArray*)revIDs deleted: (BOOL)deleted
   */
  protected void processChangeTrackerChange(final Map<String, Object> change) {
    String lastSequence = change.get("seq").toString();
    String docID = (String) change.get("id");
    if (docID == null) {
      return;
    }

    if (!Document.isValidDocumentId(docID)) {
      Log.w(Log.TAG_SYNC, "%s: Received invalid doc ID from _changes: %s", this, change);
      return;
    }
    boolean deleted =
        (change.containsKey("deleted") && ((Boolean) change.get("deleted")).equals(Boolean.TRUE));
    List<Map<String, Object>> changes = (List<Map<String, Object>>) change.get("changes");
    for (Map<String, Object> changeDict : changes) {
      String revID = (String) changeDict.get("rev");
      if (revID == null) {
        continue;
      }

      PulledRevision rev = new PulledRevision(docID, revID, deleted);

      // Remember its remote sequence ID (opaque), and make up a numeric sequence
      // based on the order in which it appeared in the _changes feed:
      rev.setRemoteSequenceID(lastSequence);

      if (changes.size() > 1) rev.setConflicted(true);

      Log.d(Log.TAG_SYNC, "%s: adding rev to inbox %s", this, rev);

      Log.v(Log.TAG_SYNC, "%s: changeTrackerReceivedChange() incrementing changesCount by 1", this);

      // this is purposefully done slightly different than the ios version
      addToChangesCount(1);

      addToInbox(rev);
    }

    pauseOrResume();
  }
  public void testPersonaAssertion() {
    send("PUT", "/db", Status.CREATED, null);

    Map<String, Object> doc1 = new HashMap<String, Object>();
    String sampleAssertion =
        "eyJhbGciOiJSUzI1NiJ9.eyJwdWJsaWMta2V5Ijp7ImFsZ29yaXRobSI6IkRTIiwieSI6ImNhNWJiYTYzZmI4MDQ2OGE0MjFjZjgxYTIzN2VlMDcwYTJlOTM4NTY0ODhiYTYzNTM0ZTU4NzJjZjllMGUwMDk0ZWQ2NDBlOGNhYmEwMjNkYjc5ODU3YjkxMzBlZGNmZGZiNmJiNTUwMWNjNTk3MTI1Y2NiMWQ1ZWQzOTVjZTMyNThlYjEwN2FjZTM1ODRiOWIwN2I4MWU5MDQ4NzhhYzBhMjFlOWZkYmRjYzNhNzNjOTg3MDAwYjk4YWUwMmZmMDQ4ODFiZDNiOTBmNzllYzVlNDU1YzliZjM3NzFkYjEzMTcxYjNkMTA2ZjM1ZDQyZmZmZjQ2ZWZiZDcwNjgyNWQiLCJwIjoiZmY2MDA0ODNkYjZhYmZjNWI0NWVhYjc4NTk0YjM1MzNkNTUwZDlmMWJmMmE5OTJhN2E4ZGFhNmRjMzRmODA0NWFkNGU2ZTBjNDI5ZDMzNGVlZWFhZWZkN2UyM2Q0ODEwYmUwMGU0Y2MxNDkyY2JhMzI1YmE4MWZmMmQ1YTViMzA1YThkMTdlYjNiZjRhMDZhMzQ5ZDM5MmUwMGQzMjk3NDRhNTE3OTM4MDM0NGU4MmExOGM0NzkzMzQzOGY4OTFlMjJhZWVmODEyZDY5YzhmNzVlMzI2Y2I3MGVhMDAwYzNmNzc2ZGZkYmQ2MDQ2MzhjMmVmNzE3ZmMyNmQwMmUxNyIsInEiOiJlMjFlMDRmOTExZDFlZDc5OTEwMDhlY2FhYjNiZjc3NTk4NDMwOWMzIiwiZyI6ImM1MmE0YTBmZjNiN2U2MWZkZjE4NjdjZTg0MTM4MzY5YTYxNTRmNGFmYTkyOTY2ZTNjODI3ZTI1Y2ZhNmNmNTA4YjkwZTVkZTQxOWUxMzM3ZTA3YTJlOWUyYTNjZDVkZWE3MDRkMTc1ZjhlYmY2YWYzOTdkNjllMTEwYjk2YWZiMTdjN2EwMzI1OTMyOWU0ODI5YjBkMDNiYmM3ODk2YjE1YjRhZGU1M2UxMzA4NThjYzM0ZDk2MjY5YWE4OTA0MWY0MDkxMzZjNzI0MmEzODg5NWM5ZDViY2NhZDRmMzg5YWYxZDdhNGJkMTM5OGJkMDcyZGZmYTg5NjIzMzM5N2EifSwicHJpbmNpcGFsIjp7ImVtYWlsIjoiamVuc0Btb29zZXlhcmQuY29tIn0sImlhdCI6MTM1ODI5NjIzNzU3NywiZXhwIjoxMzU4MzgyNjM3NTc3LCJpc3MiOiJsb2dpbi5wZXJzb25hLm9yZyJ9.RnDK118nqL2wzpLCVRzw1MI4IThgeWpul9jPl6ypyyxRMMTurlJbjFfs-BXoPaOem878G8-4D2eGWS6wd307k7xlPysevYPogfFWxK_eDHwkTq3Ts91qEDqrdV_JtgULC8c1LvX65E0TwW_GL_TM94g3CvqoQnGVxxoaMVye4ggvR7eOZjimWMzUuu4Lo9Z-VBHBj7XM0UMBie57CpGwH4_Wkv0V_LHZRRHKdnl9ISp_aGwfBObTcHG9v0P3BW9vRrCjihIn0SqOJQ9obl52rMf84GD4Lcy9NIktzfyka70xR9Sh7ALotW7rWywsTzMTu3t8AzMz2MJgGjvQmx49QA~eyJhbGciOiJEUzEyOCJ9.eyJleHAiOjEzNTgyOTY0Mzg0OTUsImF1ZCI6Imh0dHA6Ly9sb2NhbGhvc3Q6NDk4NC8ifQ.4FV2TrUQffDya0MOxOQlzJQbDNvCPF2sfTIJN7KOLvvlSFPknuIo5g";
    doc1.put("assertion", sampleAssertion);

    Map<String, Object> result =
        (Map<String, Object>) sendBody("POST", "/_persona_assertion", doc1, Status.OK, null);
    Log.v(TAG, String.format("result %s", result));
    String email = (String) result.get("email");
    assertEquals(email, "*****@*****.**");
  }
 /**
  * Removes a revision from the "pending" set after it's been uploaded. Advances checkpoint. -
  * (void) removePending: (CBL_Revision*)rev in CBLRestPusher.m
  */
 @InterfaceAudience.Private
 private void removePending(RevisionInternal revisionInternal) {
   long seq = revisionInternal.getSequence();
   if (pendingSequences == null || pendingSequences.isEmpty()) {
     Log.w(
         Log.TAG_SYNC,
         "%s: removePending() called w/ rev: %s, but pendingSequences empty",
         this,
         revisionInternal);
     if (revisionInternal.getBody() != null) revisionInternal.getBody().release();
     pauseOrResume();
     return;
   }
   boolean wasFirst = (seq == pendingSequences.first());
   if (!pendingSequences.contains(seq)) {
     Log.w(
         Log.TAG_SYNC,
         "%s: removePending: sequence %s not in set, for rev %s",
         this,
         seq,
         revisionInternal);
   }
   pendingSequences.remove(seq);
   if (wasFirst) {
     // If I removed the first pending sequence, can advance the checkpoint:
     long maxCompleted;
     if (pendingSequences.size() == 0) {
       maxCompleted = maxPendingSequence;
     } else {
       maxCompleted = pendingSequences.first();
       --maxCompleted;
     }
     setLastSequence(Long.toString(maxCompleted));
   }
   if (revisionInternal.getBody() != null) revisionInternal.getBody().release();
   pauseOrResume();
 }
 public static String addLocation(Database database, Location local) {
   // Create a new document and add data
   Document document = database.createDocument();
   String documentId = document.getId();
   Map<String, Object> map = new HashMap<String, Object>();
   map.put("", "Big Party");
   map.put("location", "My House");
   try {
     // Save the properties to the document
     document.putProperties(map);
   } catch (CouchbaseLiteException e) {
     Log.e(TAG, "Error putting", e);
   }
   return documentId;
 }