public void waitForPendingFutures() {
    synchronized (lockWaitForPendingFutures) {
      if (waitingForPendingFutures) {
        return;
      }
      waitingForPendingFutures = true;
    }

    Log.d(
        Log.TAG_SYNC,
        "[PullerInternal.waitForPendingFutures()] STARTED - thread id: "
            + Thread.currentThread().getId());

    try {
      waitForAllTasksCompleted();
    } catch (Exception e) {
      Log.e(Log.TAG_SYNC, "Exception waiting for pending futures: %s", e);
    }

    fireTrigger(ReplicationTrigger.WAITING_FOR_CHANGES);

    Log.d(
        Log.TAG_SYNC,
        "[waitForPendingFutures()] END - thread id: " + Thread.currentThread().getId());

    synchronized (lockWaitForPendingFutures) {
      waitingForPendingFutures = false;
    }
  }
  protected void startChangeTracker() {

    ChangeTracker.ChangeTrackerMode changeTrackerMode;

    // it always starts out as OneShot, but if its a continuous replication
    // it will switch to longpoll later.
    changeTrackerMode = ChangeTracker.ChangeTrackerMode.OneShot;

    Log.d(
        Log.TAG_SYNC,
        "%s: starting ChangeTracker with since=%s mode=%s",
        this,
        lastSequence,
        changeTrackerMode);
    changeTracker = new ChangeTracker(remote, changeTrackerMode, true, lastSequence, this);
    changeTracker.setAuthenticator(getAuthenticator());
    Log.d(Log.TAG_SYNC, "%s: started ChangeTracker %s", this, changeTracker);

    if (filterName != null) {
      changeTracker.setFilterName(filterName);
      if (filterParams != null) {
        changeTracker.setFilterParams(filterParams);
      }
    }
    changeTracker.setDocIDs(documentIDs);
    changeTracker.setRequestHeaders(requestHeaders);
    changeTracker.setContinuous(lifecycle == Replication.Lifecycle.CONTINUOUS);

    changeTracker.setUsePOST(serverIsSyncGatewayVersion("0.93"));
    changeTracker.start();
  }
  private void waitForAllTasksCompleted() {
    // NOTE: Wait till all queue becomes empty
    while ((batcher != null && batcher.count() > 0)
        || (pendingFutures != null && pendingFutures.size() > 0)
        || (downloadsToInsert != null && downloadsToInsert.count() > 0)) {

      // Wait for batcher completed
      if (batcher != null) {
        // if batcher delays task execution, need to wait same amount of time. (0.5 sec or 0 sec)
        try {
          Thread.sleep(batcher.getDelay());
        } catch (Exception e) {
        }
        Log.d(Log.TAG_SYNC, "batcher.waitForPendingFutures()");
        batcher.waitForPendingFutures();
      }

      // wait for pending featurs completed
      Log.d(Log.TAG_SYNC, "waitPendingFuturesCompleted()");
      waitPendingFuturesCompleted();

      // wait for downloadToInsert batcher completed
      if (downloadsToInsert != null) {
        // if batcher delays task execution, need to wait same amount of time. (1.0 sec or 0 sec)
        try {
          Thread.sleep(downloadsToInsert.getDelay());
        } catch (Exception e) {
        }
        Log.d(Log.TAG_SYNC, "downloadsToInsert.waitForPendingFutures()");
        downloadsToInsert.waitForPendingFutures();
      }
    }
  }
 private void processChangeTrackerStopped(ChangeTracker tracker) {
   Log.d(Log.TAG_SYNC, "changeTrackerStopped.  lifecycle: %s", lifecycle);
   switch (lifecycle) {
     case ONESHOT:
       // TODO: This is too early to fire STOP_GRACEFUL, Need to change.
       Log.d(Log.TAG_SYNC, "fire STOP_GRACEFUL");
       if (tracker.getLastError() != null) {
         setError(tracker.getLastError());
       }
       stateMachine.fire(ReplicationTrigger.STOP_GRACEFUL);
       break;
     case CONTINUOUS:
       if (stateMachine.isInState(ReplicationState.OFFLINE)) {
         // in this case, we don't want to do anything here, since
         // we told the change tracker to go offline ..
         Log.d(Log.TAG_SYNC, "Change tracker stopped because we are going offline");
       } else if (stateMachine.isInState(ReplicationState.STOPPING)
           || stateMachine.isInState(ReplicationState.STOPPED)) {
         Log.d(Log.TAG_SYNC, "Change tracker stopped because replicator is stopping or stopped.");
       } else {
         // otherwise, try to restart the change tracker, since it should
         // always be running in continuous replications
         String msg = String.format("Change tracker stopped during continuous replication");
         Log.e(Log.TAG_SYNC, msg);
         parentReplication.setLastError(new Exception(msg));
         fireTrigger(ReplicationTrigger.WAITING_FOR_CHANGES);
         Log.d(
             Log.TAG_SYNC,
             "Scheduling change tracker restart in %d ms",
             CHANGE_TRACKER_RESTART_DELAY_MS);
         workExecutor.schedule(
             new Runnable() {
               @Override
               public void run() {
                 // the replication may have been stopped by the time this scheduled fires
                 // so we need to check the state here.
                 if (stateMachine.isInState(ReplicationState.RUNNING)) {
                   Log.d(Log.TAG_SYNC, "%s still running, restarting change tracker", this);
                   startChangeTracker();
                 } else {
                   Log.d(
                       Log.TAG_SYNC,
                       "%s still no longer running, not restarting change tracker",
                       this);
                 }
               }
             },
             CHANGE_TRACKER_RESTART_DELAY_MS,
             TimeUnit.MILLISECONDS);
       }
       break;
     default:
       Log.e(Log.TAG_SYNC, "Unknown lifecycle: %s", lifecycle);
   }
 }
  @Override
  protected void stop() {
    if (stateMachine.isInState(ReplicationState.STOPPED)) return;

    Log.d(Log.TAG_SYNC, "%s STOPPING...", toString());

    stopObserving();

    super.stop();

    // this has to be on a different thread than the replicator thread, or else it's a deadlock
    // because it might be waiting for jobs that have been scheduled, and not
    // yet executed (and which will never execute because this will block processing).
    new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  // wait for all tasks completed
                  waitForPendingFutures();
                } catch (Exception e) {
                  Log.e(Log.TAG_SYNC, "stop.run() had exception: %s", e);
                } finally {
                  triggerStopImmediate();
                  Log.d(Log.TAG_SYNC, "PusherInternal stop.run() finished");
                }
              }
            })
        .start();
  }
  protected void stopGraceful() {
    super.stopGraceful();

    Log.d(Log.TAG_SYNC, "PullerInternal.stopGraceful() started");

    // this has to be on a different thread than the replicator thread, or else it's a deadlock
    // because it might be waiting for jobs that have been scheduled, and not
    // yet executed (and which will never execute because this will block processing).
    new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  // wait for all tasks completed
                  waitForAllTasksCompleted();

                  // stop change tracker
                  if (changeTracker != null) {
                    Log.d(Log.TAG_SYNC, "stopping change tracker");
                    changeTracker.stop();
                    Log.d(Log.TAG_SYNC, "stopped change tracker");
                  }
                } catch (Exception e) {
                  Log.e(Log.TAG_SYNC, "stopGraceful.run() had exception: %s", e);
                  e.printStackTrace();
                } finally {
                  // stop replicator immediate
                  triggerStopImmediate();
                }
                Log.d(Log.TAG_SYNC, "PullerInternal stopGraceful.run() finished");
              }
            })
        .start();
  }
  /** https://github.com/couchbase/couchbase-lite-java-core/issues/106 */
  public void testResolveConflict() throws Exception {

    Map<String, Object> result;

    // Create a conflict on purpose
    Document doc = database.createDocument();
    SavedRevision rev1 = doc.createRevision().save();
    SavedRevision rev2a = createRevisionWithRandomProps(rev1, false);
    SavedRevision rev2b = createRevisionWithRandomProps(rev1, true);

    SavedRevision winningRev = null;
    SavedRevision losingRev = null;
    if (doc.getCurrentRevisionId().equals(rev2a.getId())) {
      winningRev = rev2a;
      losingRev = rev2b;
    } else {
      winningRev = rev2b;
      losingRev = rev2a;
    }

    assertEquals(2, doc.getConflictingRevisions().size());
    assertEquals(2, doc.getLeafRevisions().size());

    result =
        (Map<String, Object>)
            send(
                "GET",
                String.format("/%s/%s?conflicts=true", DEFAULT_TEST_DB, doc.getId()),
                Status.OK,
                null);
    List<String> conflicts = (List) result.get("_conflicts");
    assertEquals(1, conflicts.size());
    String conflictingRevId = conflicts.get(0);
    assertEquals(losingRev.getId(), conflictingRevId);

    long docNumericID = database.getDocNumericID(doc.getId());
    assertTrue(docNumericID != 0);
    assertNotNull(database.getDocument(doc.getId()));

    Log.d(TAG, "docNumericID for " + doc.getId() + " is: " + docNumericID);

    result =
        (Map<String, Object>)
            send(
                "DELETE",
                String.format("/%s/%s?rev=%s", DEFAULT_TEST_DB, doc.getId(), conflictingRevId),
                Status.OK,
                null);

    result =
        (Map<String, Object>)
            send(
                "GET",
                String.format("/%s/%s?conflicts=true", DEFAULT_TEST_DB, doc.getId()),
                Status.OK,
                null);

    conflicts = (List) result.get("_conflicts");
    assertEquals(0, conflicts.size());
  }
 private void waitPendingFuturesCompleted() {
   try {
     while (!pendingFutures.isEmpty()) {
       Future future = pendingFutures.take();
       try {
         Log.d(Log.TAG_SYNC, "calling future.get() on %s", future);
         future.get();
         Log.d(Log.TAG_SYNC, "done calling future.get() on %s", future);
       } catch (InterruptedException e) {
         e.printStackTrace();
       } catch (ExecutionException e) {
         e.printStackTrace();
       }
     }
   } catch (Exception e) {
     Log.e(Log.TAG_SYNC, "Exception waiting for pending futures: %s", e);
   }
 }
 @Override
 public void changeTrackerReceivedChange(final Map<String, Object> change) {
   try {
     Log.d(Log.TAG_SYNC, "changeTrackerReceivedChange: %s", change);
     processChangeTrackerChange(change);
   } catch (Exception e) {
     Log.e(Log.TAG_SYNC, "Error processChangeTrackerChange(): %s", e);
     throw new RuntimeException(e);
   }
 }
  /** Actual work of starting the replication process. */
  protected void beginReplicating() {
    Log.d(Log.TAG_SYNC, "startReplicating()");

    initPendingSequences();

    initDownloadsToInsert();

    startChangeTracker();

    // start replicator ..
  }
 @Override
 public void changeTrackerCaughtUp() {
   Log.d(Log.TAG_SYNC, "changeTrackerCaughtUp");
   // for continuous replications, once the change tracker is caught up, we
   // should try to go into the idle state.
   if (isContinuous()) {
     // this has to be on a different thread than the replicator thread, or else it's a deadlock
     // because it might be waiting for jobs that have been scheduled, and not
     // yet executed (and which will never execute because this will block processing).
     waitForPendingFuturesWithNewThread();
   }
 }
  /**
   * in CBL_Puller.m - (void) changeTrackerReceivedSequence: (id)remoteSequenceID docID:
   * (NSString*)docID revIDs: (NSArray*)revIDs deleted: (BOOL)deleted
   */
  protected void processChangeTrackerChange(final Map<String, Object> change) {
    String lastSequence = change.get("seq").toString();
    String docID = (String) change.get("id");
    if (docID == null) {
      return;
    }

    if (!Document.isValidDocumentId(docID)) {
      Log.w(Log.TAG_SYNC, "%s: Received invalid doc ID from _changes: %s", this, change);
      return;
    }
    boolean deleted =
        (change.containsKey("deleted") && ((Boolean) change.get("deleted")).equals(Boolean.TRUE));
    List<Map<String, Object>> changes = (List<Map<String, Object>>) change.get("changes");
    for (Map<String, Object> changeDict : changes) {
      String revID = (String) changeDict.get("rev");
      if (revID == null) {
        continue;
      }

      PulledRevision rev = new PulledRevision(docID, revID, deleted);

      // Remember its remote sequence ID (opaque), and make up a numeric sequence
      // based on the order in which it appeared in the _changes feed:
      rev.setRemoteSequenceID(lastSequence);

      if (changes.size() > 1) rev.setConflicted(true);

      Log.d(Log.TAG_SYNC, "%s: adding rev to inbox %s", this, rev);

      Log.v(Log.TAG_SYNC, "%s: changeTrackerReceivedChange() incrementing changesCount by 1", this);

      // this is purposefully done slightly different than the ios version
      addToChangesCount(1);

      addToInbox(rev);
    }

    pauseOrResume();
  }
  /** in CBL_Pusher.m - (CBLMultipartWriter*)multipartWriterForRevision: (CBL_Revision*)rev */
  @InterfaceAudience.Private
  private boolean uploadMultipartRevision(final RevisionInternal revision) {

    // holds inputStream for blob to close after using
    final List<InputStream> streamList = new ArrayList<InputStream>();

    MultipartEntity multiPart = null;

    Map<String, Object> revProps = revision.getProperties();

    Map<String, Object> attachments = (Map<String, Object>) revProps.get("_attachments");
    for (String attachmentKey : attachments.keySet()) {
      Map<String, Object> attachment = (Map<String, Object>) attachments.get(attachmentKey);
      if (attachment.containsKey("follows")) {

        if (multiPart == null) {

          multiPart = new MultipartEntity();

          try {
            String json = Manager.getObjectMapper().writeValueAsString(revProps);
            Charset utf8charset = Charset.forName("UTF-8");
            byte[] uncompressed = json.getBytes(utf8charset);
            byte[] compressed = null;
            byte[] data = uncompressed;
            String contentEncoding = null;
            if (uncompressed.length > RemoteRequest.MIN_JSON_LENGTH_TO_COMPRESS
                && canSendCompressedRequests()) {
              compressed = Utils.compressByGzip(uncompressed);
              if (compressed.length < uncompressed.length) {
                data = compressed;
                contentEncoding = "gzip";
              }
            }
            // NOTE: StringBody.contentEncoding default value is null. Setting null value to
            // contentEncoding does not cause any impact.
            multiPart.addPart(
                "param1", new StringBody(data, "application/json", utf8charset, contentEncoding));
          } catch (IOException e) {
            throw new IllegalArgumentException(e);
          }
        }

        BlobStore blobStore = this.db.getAttachmentStore();
        String base64Digest = (String) attachment.get("digest");
        BlobKey blobKey = new BlobKey(base64Digest);
        InputStream blobStream = blobStore.blobStreamForKey(blobKey);
        if (blobStream == null) {
          Log.w(
              Log.TAG_SYNC,
              "Unable to load the blob stream for blobKey: %s - Skipping upload of multipart revision.",
              blobKey);
          return false;
        } else {
          streamList.add(blobStream);
          String contentType = null;
          if (attachment.containsKey("content_type")) {
            contentType = (String) attachment.get("content_type");
          } else if (attachment.containsKey("type")) {
            contentType = (String) attachment.get("type");
          } else if (attachment.containsKey("content-type")) {
            Log.w(
                Log.TAG_SYNC,
                "Found attachment that uses content-type"
                    + " field name instead of content_type (see couchbase-lite-android"
                    + " issue #80): %s",
                attachment);
          }

          // contentType = null causes Exception from FileBody of apache.
          if (contentType == null) contentType = "application/octet-stream"; // default

          // NOTE: Content-Encoding might not be necessary to set. Apache FileBody does not set
          // Content-Encoding.
          //       FileBody always return null for getContentEncoding(), and Content-Encoding header
          // is not set in multipart
          // CBL iOS:
          // https://github.com/couchbase/couchbase-lite-ios/blob/feb7ff5eda1e80bd00e5eb19f1d46c793f7a1951/Source/CBL_Pusher.m#L449-L452
          String contentEncoding = null;
          if (attachment.containsKey("encoding")) {
            contentEncoding = (String) attachment.get("encoding");
          }

          InputStreamBody inputStreamBody =
              new CustomStreamBody(blobStream, contentType, attachmentKey, contentEncoding);
          multiPart.addPart(attachmentKey, inputStreamBody);
        }
      }
    }

    if (multiPart == null) {
      return false;
    }

    final String path = String.format("/%s?new_edits=false", encodeDocumentId(revision.getDocID()));

    Log.d(Log.TAG_SYNC, "Uploading multipart request.  Revision: %s", revision);

    addToChangesCount(1);

    CustomFuture future =
        sendAsyncMultipartRequest(
            "PUT",
            path,
            multiPart,
            new RemoteRequestCompletionBlock() {
              @Override
              public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) {
                try {
                  if (e != null) {
                    if (e instanceof HttpResponseException) {
                      // Server doesn't like multipart, eh? Fall back to JSON.
                      if (((HttpResponseException) e).getStatusCode() == 415) {
                        // status 415 = "bad_content_type"
                        dontSendMultipart = true;
                        uploadJsonRevision(revision);
                      }
                    } else {
                      Log.e(Log.TAG_SYNC, "Exception uploading multipart request", e);
                      setError(e);
                    }
                  } else {
                    Log.v(Log.TAG_SYNC, "Uploaded multipart request.  Revision: %s", revision);
                    removePending(revision);
                  }
                } finally {
                  // close all inputStreams for Blob
                  for (InputStream stream : streamList) {
                    try {
                      stream.close();
                    } catch (IOException ioe) {
                    }
                  }
                  addToCompletedChangesCount(1);
                }
              }
            });
    future.setQueue(pendingFutures);
    pendingFutures.add(future);

    return true;
  }
  /** - (void) beginReplicating in CBL_Replicator.m */
  @Override
  @InterfaceAudience.Private
  public void beginReplicating() {
    // If we're still waiting to create the remote db, do nothing now. (This method will be
    // re-invoked after that request finishes; see -maybeCreateRemoteDB above.)

    Log.d(Log.TAG_SYNC, "%s: beginReplicating() called", this);

    // If we're still waiting to create the remote db, do nothing now. (This method will be
    // re-invoked after that request finishes; see maybeCreateRemoteDB() above.)
    if (creatingTarget) {
      Log.d(Log.TAG_SYNC, "%s: creatingTarget == true, doing nothing", this);
      return;
    }

    pendingSequences = Collections.synchronizedSortedSet(new TreeSet<Long>());
    try {
      maxPendingSequence = Long.parseLong(lastSequence);
    } catch (NumberFormatException e) {
      Log.w(Log.TAG_SYNC, "Error converting lastSequence: %s to long.  Using 0", lastSequence);
      maxPendingSequence = new Long(0);
    }

    filter = compilePushReplicationFilter();
    if (filterName != null && filter == null) {
      Log.w(
          Log.TAG_SYNC,
          "%s: No ReplicationFilter registered for filter '%s'; ignoring",
          this,
          filterName);
    }

    // Process existing changes since the last push:
    long lastSequenceLong = 0;
    if (lastSequence != null) {
      lastSequenceLong = Long.parseLong(lastSequence);
    }
    ChangesOptions options = new ChangesOptions();
    options.setIncludeConflicts(true);
    Log.d(Log.TAG_SYNC, "%s: Getting changes since %s", this, lastSequence);
    RevisionList changes = db.changesSince(lastSequenceLong, options, filter, filterParams);
    if (changes.size() > 0) {
      Log.d(Log.TAG_SYNC, "%s: Queuing %d changes since %s", this, changes.size(), lastSequence);
      int remaining = changes.size();
      int size = batcher.getCapacity();
      int start = 0;
      while (remaining > 0) {
        if (size > remaining) size = remaining;
        RevisionList subChanges = new RevisionList(changes.subList(start, start + size));
        batcher.queueObjects(subChanges);
        start += size;
        remaining -= size;
        pauseOrResume();
        waitIfPaused();
      }
    } else {
      Log.d(Log.TAG_SYNC, "%s: No changes since %s", this, lastSequence);
    }

    // Now listen for future changes (in continuous mode):
    if (isContinuous()) {
      observing = true;
      db.addChangeListener(this);
    }
  }
  public void waitForPendingFutures() {
    if (waitingForPendingFutures) {
      return;
    }

    synchronized (lockWaitForPendingFutures) {
      waitingForPendingFutures = true;

      Log.d(
          Log.TAG_SYNC,
          "[waitForPendingFutures()] STARTED - thread id: " + Thread.currentThread().getId());

      try {

        // wait for batcher's pending futures
        if (batcher != null) {
          Log.d(Log.TAG_SYNC, "batcher.waitForPendingFutures()");
          // TODO: should we call batcher.flushAll(); here?
          batcher.waitForPendingFutures();
          Log.d(Log.TAG_SYNC, "/batcher.waitForPendingFutures()");
        }

        while (!pendingFutures.isEmpty()) {
          Future future = pendingFutures.take();
          try {
            Log.d(Log.TAG_SYNC, "calling future.get() on %s", future);
            future.get();
            Log.d(Log.TAG_SYNC, "done calling future.get() on %s", future);
          } catch (InterruptedException e) {
            Log.e(Log.TAG_SYNC, "InterruptedException in Future.get()", e);
          } catch (ExecutionException e) {
            Log.e(Log.TAG_SYNC, "ExecutionException in Future.get()", e);
          }
        }

        // since it's possible that in the process of waiting for pendingFutures,
        // new items were added to the batcher, let's wait for the batcher to
        // drain again.
        if (batcher != null) {
          Log.d(Log.TAG_SYNC, "batcher.waitForPendingFutures()");
          batcher.waitForPendingFutures();
          Log.d(Log.TAG_SYNC, "/batcher.waitForPendingFutures()");
        }

        // If pendingFutures queue is empty and state is RUNNING, fireTrigger to IDLE
        // NOTE: in case of many documents sync, new Future tasks could be added into the queue.
        //       This is reason to check if queue is empty.
        if (pendingFutures.isEmpty()) {
          Log.v(Log.TAG_SYNC, "[waitForPendingFutures()] state=" + stateMachine.getState());
          if (isContinuous()) {
            // Make state IDLE
            Log.v(
                Log.TAG_SYNC,
                "[waitForPendingFutures()] fireTrigger(ReplicationTrigger.WAITING_FOR_CHANGES);");
            fireTrigger(ReplicationTrigger.WAITING_FOR_CHANGES);
          } else {
            // Make state STOPPING
            triggerStopGraceful();
          }
        }
      } catch (Exception e) {
        Log.e(Log.TAG_SYNC, "Exception waiting for pending futures: %s", e);
      } finally {
        Log.d(
            Log.TAG_SYNC,
            "[waitForPendingFutures()] END - thread id: " + Thread.currentThread().getId());
        waitingForPendingFutures = false;
      }
    }
  }
  public void testDocs() {
    send("PUT", "/db", Status.CREATED, null);

    // PUT:
    Map<String, Object> doc1 = new HashMap<String, Object>();
    doc1.put("message", "hello");
    Map<String, Object> result =
        (Map<String, Object>) sendBody("PUT", "/db/doc1", doc1, Status.CREATED, null);
    String revID = (String) result.get("rev");
    assertTrue(revID.startsWith("1-"));

    // PUT to update:
    doc1.put("message", "goodbye");
    doc1.put("_rev", revID);
    result = (Map<String, Object>) sendBody("PUT", "/db/doc1", doc1, Status.CREATED, null);
    Log.v(TAG, String.format("PUT returned %s", result));
    revID = (String) result.get("rev");
    assertTrue(revID.startsWith("2-"));

    doc1.put("_id", "doc1");
    doc1.put("_rev", revID);
    result = (Map<String, Object>) send("GET", "/db/doc1", Status.OK, doc1);

    // Add more docs:
    Map<String, Object> docX = new HashMap<String, Object>();
    docX.put("message", "hello");
    result = (Map<String, Object>) sendBody("PUT", "/db/doc3", docX, Status.CREATED, null);
    String revID3 = (String) result.get("rev");
    result = (Map<String, Object>) sendBody("PUT", "/db/doc2", docX, Status.CREATED, null);
    String revID2 = (String) result.get("rev");

    // _all_docs:
    result = (Map<String, Object>) send("GET", "/db/_all_docs", Status.OK, null);
    assertEquals(3, result.get("total_rows"));
    assertEquals(0, result.get("offset"));

    Map<String, Object> value1 = valueMapWithRev(revID);
    Map<String, Object> value2 = valueMapWithRev(revID2);
    Map<String, Object> value3 = valueMapWithRev(revID3);

    Map<String, Object> row1 = new HashMap<String, Object>();
    row1.put("id", "doc1");
    row1.put("key", "doc1");
    row1.put("value", value1);
    Map<String, Object> row2 = new HashMap<String, Object>();
    row2.put("id", "doc2");
    row2.put("key", "doc2");
    row2.put("value", value2);
    Map<String, Object> row3 = new HashMap<String, Object>();
    row3.put("id", "doc3");
    row3.put("key", "doc3");
    row3.put("value", value3);

    List<Map<String, Object>> expectedRows = new ArrayList<Map<String, Object>>();
    expectedRows.add(row1);
    expectedRows.add(row2);
    expectedRows.add(row3);

    List<Map<String, Object>> rows = (List<Map<String, Object>>) result.get("rows");
    assertEquals(expectedRows, rows);

    // DELETE:
    result =
        (Map<String, Object>)
            send("DELETE", String.format("/db/doc1?rev=%s", revID), Status.OK, null);
    revID = (String) result.get("rev");
    assertTrue(revID.startsWith("3-"));

    send("GET", "/db/doc1", Status.NOT_FOUND, null);

    // _changes:
    List<Object> changes1 = new ArrayList<Object>();
    changes1.add(valueMapWithRevNoConflictArray(revID));
    List<Object> changes2 = new ArrayList<Object>();
    changes2.add(valueMapWithRevNoConflictArray(revID2));
    List<Object> changes3 = new ArrayList<Object>();
    changes3.add(valueMapWithRevNoConflictArray(revID3));

    Map<String, Object> result1 = new HashMap<String, Object>();
    result1.put("id", "doc1");
    result1.put("seq", 5);
    result1.put("deleted", true);
    result1.put("changes", changes1);
    Map<String, Object> result2 = new HashMap<String, Object>();
    result2.put("id", "doc2");
    result2.put("seq", 4);
    result2.put("changes", changes2);
    Map<String, Object> result3 = new HashMap<String, Object>();
    result3.put("id", "doc3");
    result3.put("seq", 3);
    result3.put("changes", changes3);

    List<Object> results = new ArrayList<Object>();
    results.add(result3);
    results.add(result2);
    results.add(result1);

    Map<String, Object> expectedChanges = new HashMap<String, Object>();
    expectedChanges.put("last_seq", 5);
    expectedChanges.put("results", results);

    send("GET", "/db/_changes", Status.OK, expectedChanges);

    // _changes with ?since:
    results.remove(result3);
    results.remove(result2);
    expectedChanges.put("results", results);
    send("GET", "/db/_changes?since=4", Status.OK, expectedChanges);

    results.remove(result1);
    expectedChanges.put("results", results);
    send("GET", "/db/_changes?since=5", Status.OK, expectedChanges);

    // Put with _deleted to delete a doc:
    Log.d(TAG, "Put with _deleted to delete a doc");
    send("GET", "/db/doc5", Status.NOT_FOUND, null);
    Map<String, Object> doc5 = new HashMap<String, Object>();
    doc5.put("message", "hello5");
    Map<String, Object> resultDoc5 =
        (Map<String, Object>) sendBody("PUT", "/db/doc5", doc5, Status.CREATED, null);
    String revIdDoc5 = (String) resultDoc5.get("rev");
    assertTrue(revIdDoc5.startsWith("1-"));
    doc5.put("_deleted", true);
    doc5.put("_rev", revIdDoc5);
    doc5.put("_id", "doc5");
    result = (Map<String, Object>) sendBody("PUT", "/db/doc5", doc5, Status.OK, null);
    send("GET", "/db/doc5", Status.NOT_FOUND, null);
    Log.d(TAG, "Finished put with _deleted to delete a doc");
  }
  /** Process a bunch of remote revisions from the _changes feed at once */
  @Override
  @InterfaceAudience.Private
  protected void processInbox(RevisionList inbox) {
    Log.d(Log.TAG_SYNC, "processInbox called");

    if (canBulkGet == null) {
      canBulkGet = serverIsSyncGatewayVersion("0.81");
    }

    // Ask the local database which of the revs are not known to it:
    String lastInboxSequence = ((PulledRevision) inbox.get(inbox.size() - 1)).getRemoteSequenceID();

    int numRevisionsRemoved = 0;
    try {
      // findMissingRevisions is the local equivalent of _revs_diff. it looks at the
      // array of revisions in "inbox" and removes the ones that already exist.
      // So whatever's left in 'inbox'
      // afterwards are the revisions that need to be downloaded.
      numRevisionsRemoved = db.findMissingRevisions(inbox);
    } catch (SQLException e) {
      Log.e(Log.TAG_SYNC, String.format("%s failed to look up local revs", this), e);
      inbox = null;
    }

    // introducing this to java version since inbox may now be null everywhere
    int inboxCount = 0;
    if (inbox != null) {
      inboxCount = inbox.size();
    }

    if (numRevisionsRemoved > 0) {
      Log.v(
          Log.TAG_SYNC,
          "%s: processInbox() setting changesCount to: %s",
          this,
          getChangesCount().get() - numRevisionsRemoved);
      // May decrease the changesCount, to account for the revisions we just found out we don't need
      // to get.
      addToChangesCount(-1 * numRevisionsRemoved);
    }

    if (inboxCount == 0) {
      // Nothing to do. Just bump the lastSequence.
      Log.w(
          Log.TAG_SYNC,
          "%s no new remote revisions to fetch.  add lastInboxSequence (%s) to pendingSequences (%s)",
          this,
          lastInboxSequence,
          pendingSequences);
      long seq = pendingSequences.addValue(lastInboxSequence);
      pendingSequences.removeSequence(seq);
      setLastSequence(pendingSequences.getCheckpointedValue());
      pauseOrResume();
      return;
    }

    Log.v(Log.TAG_SYNC, "%s: fetching %s remote revisions...", this, inboxCount);

    // Dump the revs into the queue of revs to pull from the remote db:
    for (int i = 0; i < inbox.size(); i++) {
      PulledRevision rev = (PulledRevision) inbox.get(i);
      if (canBulkGet || (rev.getGeneration() == 1 && !rev.isDeleted() && !rev.isConflicted())) {
        bulkRevsToPull.add(rev);
      } else {
        queueRemoteRevision(rev);
      }
      rev.setSequence(pendingSequences.addValue(rev.getRemoteSequenceID()));
    }
    pullRemoteRevisions();
    pauseOrResume();
  }
 @Override
 public void changeTrackerFinished(ChangeTracker tracker) {
   Log.d(Log.TAG_SYNC, "changeTrackerFinished");
 }
  /** This will be called when _revsToInsert fills up: */
  @InterfaceAudience.Private
  public void insertDownloads(final List<RevisionInternal> downloads) {

    Log.d(Log.TAG_SYNC, this + " inserting " + downloads.size() + " revisions...");
    final long time = System.currentTimeMillis();
    Collections.sort(downloads, getRevisionListComparator());

    db.getStore()
        .runInTransaction(
            new TransactionalTask() {
              @Override
              public boolean run() {
                boolean success = false;
                try {
                  for (RevisionInternal rev : downloads) {
                    long fakeSequence = rev.getSequence();
                    List<String> history = db.parseCouchDBRevisionHistory(rev.getProperties());
                    if (history.isEmpty() && rev.getGeneration() > 1) {
                      Log.w(
                          Log.TAG_SYNC,
                          "%s: Missing revision history in response for: %s",
                          this,
                          rev);
                      setError(new CouchbaseLiteException(Status.UPSTREAM_ERROR));
                      continue;
                    }

                    Log.v(Log.TAG_SYNC, "%s: inserting %s %s", this, rev.getDocID(), history);

                    // Insert the revision
                    try {
                      db.forceInsert(rev, history, remote);
                    } catch (CouchbaseLiteException e) {
                      if (e.getCBLStatus().getCode() == Status.FORBIDDEN) {
                        Log.i(Log.TAG_SYNC, "%s: Remote rev failed validation: %s", this, rev);
                      } else {
                        Log.w(
                            Log.TAG_SYNC,
                            "%s: failed to write %s: status=%s",
                            this,
                            rev,
                            e.getCBLStatus().getCode());
                        setError(new HttpResponseException(e.getCBLStatus().getCode(), null));
                        continue;
                      }
                    }

                    // if(rev.getBody() != null) rev.getBody().release();
                    if (rev.getBody() != null) rev.getBody().compact();

                    // Mark this revision's fake sequence as processed:
                    pendingSequences.removeSequence(fakeSequence);
                  }

                  Log.v(
                      Log.TAG_SYNC, "%s: finished inserting %d revisions", this, downloads.size());
                  success = true;

                } catch (SQLException e) {
                  Log.e(Log.TAG_SYNC, this + ": Exception inserting revisions", e);
                } finally {
                  if (success) {

                    // Checkpoint:
                    setLastSequence(pendingSequences.getCheckpointedValue());

                    long delta = System.currentTimeMillis() - time;
                    Log.v(
                        Log.TAG_SYNC,
                        "%s: inserted %d revs in %d milliseconds",
                        this,
                        downloads.size(),
                        delta);

                    int newCompletedChangesCount =
                        getCompletedChangesCount().get() + downloads.size();
                    Log.d(
                        Log.TAG_SYNC,
                        "%s insertDownloads() updating completedChangesCount from %d -> %d ",
                        this,
                        getCompletedChangesCount().get(),
                        newCompletedChangesCount);

                    addToCompletedChangesCount(downloads.size());
                  }

                  pauseOrResume();
                  return success;
                }
              }
            });
  }
  /**
   * Fetches the contents of a revision from the remote db, including its parent revision ID. The
   * contents are stored into rev.properties.
   */
  @InterfaceAudience.Private
  public void pullRemoteRevision(final RevisionInternal rev) {

    Log.d(Log.TAG_SYNC, "%s: pullRemoteRevision with rev: %s", this, rev);

    ++httpConnectionCount;

    // Construct a query. We want the revision history, and the bodies of attachments that have
    // been added since the latest revisions we have locally.
    // See: http://wiki.apache.org/couchdb/HTTP_Document_API#Getting_Attachments_With_a_Document
    StringBuilder path = new StringBuilder("/");
    path.append(encodeDocumentId(rev.getDocID()));
    path.append("?rev=").append(URIUtils.encode(rev.getRevID()));
    path.append("&revs=true&attachments=true");

    // If the document has attachments, add an 'atts_since' param with a list of
    // already-known revisions, so the server can skip sending the bodies of any
    // attachments we already have locally:
    AtomicBoolean hasAttachment = new AtomicBoolean(false);
    List<String> knownRevs =
        db.getPossibleAncestorRevisionIDs(
            rev, PullerInternal.MAX_NUMBER_OF_ATTS_SINCE, hasAttachment);
    if (hasAttachment.get() && knownRevs != null && knownRevs.size() > 0) {
      path.append("&atts_since=");
      path.append(joinQuotedEscaped(knownRevs));
    }

    // create a final version of this variable for the log statement inside
    // FIXME find a way to avoid this
    final String pathInside = path.toString();
    CustomFuture future =
        sendAsyncMultipartDownloaderRequest(
            "GET",
            pathInside,
            null,
            db,
            new RemoteRequestCompletionBlock() {

              @Override
              public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) {
                if (e != null) {
                  Log.e(Log.TAG_SYNC, "Error pulling remote revision", e);
                  revisionFailed(rev, e);
                } else {
                  Map<String, Object> properties = (Map<String, Object>) result;
                  PulledRevision gotRev = new PulledRevision(properties);
                  gotRev.setSequence(rev.getSequence());

                  Log.d(
                      Log.TAG_SYNC,
                      "%s: pullRemoteRevision add rev: %s to batcher: %s",
                      PullerInternal.this,
                      gotRev,
                      downloadsToInsert);

                  if (gotRev.getBody() != null) gotRev.getBody().compact();

                  // Add to batcher ... eventually it will be fed to -insertRevisions:.
                  downloadsToInsert.queueObject(gotRev);
                }

                // Note that we've finished this task:
                --httpConnectionCount;

                // Start another task if there are still revisions waiting to be pulled:
                pullRemoteRevisions();
              }
            });
    future.setQueue(pendingFutures);
    pendingFutures.add(future);
  }