public void testPushReplicate() throws Exception {

    // create mock sync gateway that will serve as a pull target and return random docs
    MockDispatcher dispatcher = new MockDispatcher();
    MockWebServer server = MockHelper.getMockWebServer(dispatcher);
    dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);

    // fake checkpoint response 404
    MockCheckpointGet mockCheckpointGet = new MockCheckpointGet();
    dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointGet);

    server.play();

    Map<String, Object> replicateJsonMap = getPushReplicationParsedJson(server.getUrl("/db"));

    Log.v(TAG, "map: " + replicateJsonMap);
    Map<String, Object> result =
        (Map<String, Object>) sendBody("POST", "/_replicate", replicateJsonMap, Status.OK, null);
    Log.v(TAG, "result: " + result);
    assertNotNull(result.get("session_id"));

    boolean success = waitForReplicationToFinish();
    assertTrue(success);

    server.shutdown();
  }
  public void testPullReplicate() throws Exception {

    // create mock sync gateway that will serve as a pull target and return random docs
    int numMockDocsToServe = 0;
    MockDispatcher dispatcher = new MockDispatcher();
    MockWebServer server =
        MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, numMockDocsToServe, 1);
    dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
    server.setDispatcher(dispatcher);
    server.play();

    // kick off replication via REST api
    Map<String, Object> replicateJsonMap = getPullReplicationParsedJson(server.getUrl("/db"));
    Log.v(TAG, "map: " + replicateJsonMap);
    Map<String, Object> result =
        (Map<String, Object>) sendBody("POST", "/_replicate", replicateJsonMap, Status.OK, null);
    Log.v(TAG, "result: " + result);
    assertNotNull(result.get("session_id"));

    // wait for replication to finish
    boolean success = waitForReplicationToFinish();
    assertTrue(success);

    // cleanup
    server.shutdown();
  }
 /** This method is called when a part's headers have been parsed, before its data is parsed. */
 public void startedPart(Map headers) {
   if (_docReader != null) {
     throw new IllegalStateException("_docReader is already defined");
   }
   Log.v(Log.TAG_SYNC, "%s: Starting new document; headers =%s", this, headers);
   Log.v(Log.TAG_SYNC, "%s: Starting new document; ID=%s", this, headers.get("X-Doc-Id"));
   _docReader = new MultipartDocumentReader(_db);
   _docReader.setHeaders(headers);
   _docReader.startedPart(headers);
 }
  /** - (void) maybeCreateRemoteDB in CBL_Replicator.m */
  @Override
  @InterfaceAudience.Private
  protected void maybeCreateRemoteDB() {
    if (!createTarget) {
      return;
    }
    creatingTarget = true;
    Log.v(Log.TAG_SYNC, "Remote db might not exist; creating it...");

    Future future =
        sendAsyncRequest(
            "PUT",
            "",
            null,
            new RemoteRequestCompletionBlock() {

              @Override
              public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) {
                creatingTarget = false;
                if (e != null
                    && e instanceof HttpResponseException
                    && ((HttpResponseException) e).getStatusCode() != 412) {
                  Log.e(Log.TAG_SYNC, this + ": Failed to create remote db", e);
                  setError(e);
                  triggerStopGraceful(); // this is fatal: no db to push to!
                } else {
                  Log.v(Log.TAG_SYNC, "%s: Created remote db", this);
                  createTarget = false;
                  beginReplicating();
                }
              }
            });
    pendingFutures.add(future);
  }
 private void revisionFailed(RevisionInternal rev, Throwable throwable) {
   if (!Utils.isTransientError(throwable)) {
     Log.v(Log.TAG_SYNC, "%s: giving up on %s: %s", this, rev, throwable);
     pendingSequences.removeSequence(rev.getSequence());
     pauseOrResume();
   }
   completedChangesCount.getAndIncrement();
 }
 private void setPaused(boolean paused) {
   Log.v(Log.TAG, "setPaused: " + paused);
   synchronized (pausedObj) {
     if (this.paused != paused) {
       this.paused = paused;
       pausedObj.notifyAll();
     }
   }
 }
  /** This method is called when a part is complete. */
  public void finishedPart() {
    Log.v(Log.TAG_SYNC, "%s: Finished document", this);
    if (_docReader == null) {
      throw new IllegalStateException("_docReader is not defined");
    }

    _docReader.finish();
    _onDocument.onDocument(_docReader.getDocumentProperties());
    _docReader = null;
  }
 private void waitIfPaused() {
   synchronized (pausedObj) {
     while (paused) {
       Log.v(Log.TAG, "Waiting: " + paused);
       try {
         pausedObj.wait();
       } catch (InterruptedException e) {
       }
     }
   }
 }
  public void testFacebookToken() {
    send("PUT", "/db", Status.CREATED, null);

    Map<String, Object> doc1 = new HashMap<String, Object>();
    doc1.put("email", "*****@*****.**");
    doc1.put("remote_url", getReplicationURL().toExternalForm());
    doc1.put("access_token", "fake_access_token");

    Map<String, Object> result =
        (Map<String, Object>) sendBody("POST", "/_facebook_token", doc1, Status.OK, null);
    Log.v(TAG, String.format("result %s", result));
  }
Exemplo n.º 10
0
  public void testPersonaAssertion() {
    send("PUT", "/db", Status.CREATED, null);

    Map<String, Object> doc1 = new HashMap<String, Object>();
    String sampleAssertion =
        "eyJhbGciOiJSUzI1NiJ9.eyJwdWJsaWMta2V5Ijp7ImFsZ29yaXRobSI6IkRTIiwieSI6ImNhNWJiYTYzZmI4MDQ2OGE0MjFjZjgxYTIzN2VlMDcwYTJlOTM4NTY0ODhiYTYzNTM0ZTU4NzJjZjllMGUwMDk0ZWQ2NDBlOGNhYmEwMjNkYjc5ODU3YjkxMzBlZGNmZGZiNmJiNTUwMWNjNTk3MTI1Y2NiMWQ1ZWQzOTVjZTMyNThlYjEwN2FjZTM1ODRiOWIwN2I4MWU5MDQ4NzhhYzBhMjFlOWZkYmRjYzNhNzNjOTg3MDAwYjk4YWUwMmZmMDQ4ODFiZDNiOTBmNzllYzVlNDU1YzliZjM3NzFkYjEzMTcxYjNkMTA2ZjM1ZDQyZmZmZjQ2ZWZiZDcwNjgyNWQiLCJwIjoiZmY2MDA0ODNkYjZhYmZjNWI0NWVhYjc4NTk0YjM1MzNkNTUwZDlmMWJmMmE5OTJhN2E4ZGFhNmRjMzRmODA0NWFkNGU2ZTBjNDI5ZDMzNGVlZWFhZWZkN2UyM2Q0ODEwYmUwMGU0Y2MxNDkyY2JhMzI1YmE4MWZmMmQ1YTViMzA1YThkMTdlYjNiZjRhMDZhMzQ5ZDM5MmUwMGQzMjk3NDRhNTE3OTM4MDM0NGU4MmExOGM0NzkzMzQzOGY4OTFlMjJhZWVmODEyZDY5YzhmNzVlMzI2Y2I3MGVhMDAwYzNmNzc2ZGZkYmQ2MDQ2MzhjMmVmNzE3ZmMyNmQwMmUxNyIsInEiOiJlMjFlMDRmOTExZDFlZDc5OTEwMDhlY2FhYjNiZjc3NTk4NDMwOWMzIiwiZyI6ImM1MmE0YTBmZjNiN2U2MWZkZjE4NjdjZTg0MTM4MzY5YTYxNTRmNGFmYTkyOTY2ZTNjODI3ZTI1Y2ZhNmNmNTA4YjkwZTVkZTQxOWUxMzM3ZTA3YTJlOWUyYTNjZDVkZWE3MDRkMTc1ZjhlYmY2YWYzOTdkNjllMTEwYjk2YWZiMTdjN2EwMzI1OTMyOWU0ODI5YjBkMDNiYmM3ODk2YjE1YjRhZGU1M2UxMzA4NThjYzM0ZDk2MjY5YWE4OTA0MWY0MDkxMzZjNzI0MmEzODg5NWM5ZDViY2NhZDRmMzg5YWYxZDdhNGJkMTM5OGJkMDcyZGZmYTg5NjIzMzM5N2EifSwicHJpbmNpcGFsIjp7ImVtYWlsIjoiamVuc0Btb29zZXlhcmQuY29tIn0sImlhdCI6MTM1ODI5NjIzNzU3NywiZXhwIjoxMzU4MzgyNjM3NTc3LCJpc3MiOiJsb2dpbi5wZXJzb25hLm9yZyJ9.RnDK118nqL2wzpLCVRzw1MI4IThgeWpul9jPl6ypyyxRMMTurlJbjFfs-BXoPaOem878G8-4D2eGWS6wd307k7xlPysevYPogfFWxK_eDHwkTq3Ts91qEDqrdV_JtgULC8c1LvX65E0TwW_GL_TM94g3CvqoQnGVxxoaMVye4ggvR7eOZjimWMzUuu4Lo9Z-VBHBj7XM0UMBie57CpGwH4_Wkv0V_LHZRRHKdnl9ISp_aGwfBObTcHG9v0P3BW9vRrCjihIn0SqOJQ9obl52rMf84GD4Lcy9NIktzfyka70xR9Sh7ALotW7rWywsTzMTu3t8AzMz2MJgGjvQmx49QA~eyJhbGciOiJEUzEyOCJ9.eyJleHAiOjEzNTgyOTY0Mzg0OTUsImF1ZCI6Imh0dHA6Ly9sb2NhbGhvc3Q6NDk4NC8ifQ.4FV2TrUQffDya0MOxOQlzJQbDNvCPF2sfTIJN7KOLvvlSFPknuIo5g";
    doc1.put("assertion", sampleAssertion);

    Map<String, Object> result =
        (Map<String, Object>) sendBody("POST", "/_persona_assertion", doc1, Status.OK, null);
    Log.v(TAG, String.format("result %s", result));
    String email = (String) result.get("email");
    assertEquals(email, "*****@*****.**");
  }
  // This invokes the tranformation block if one is installed and queues the resulting CBL_Revision
  private void queueDownloadedRevision(RevisionInternal rev) {

    if (revisionBodyTransformationBlock != null) {
      // Add 'file' properties to attachments pointing to their bodies:

      for (Map.Entry<String, Map<String, Object>> entry :
          ((Map<String, Map<String, Object>>) rev.getProperties().get("_attachments")).entrySet()) {
        String name = entry.getKey();
        Map<String, Object> attachment = entry.getValue();
        attachment.remove("file");
        if (attachment.get("follows") != null && attachment.get("data") == null) {
          String filePath = db.fileForAttachmentDict(attachment).getPath();
          if (filePath != null) attachment.put("file", filePath);
        }
      }

      RevisionInternal xformed = transformRevision(rev);
      if (xformed == null) {
        Log.v(Log.TAG_SYNC, "%s: Transformer rejected revision %s", this, rev);
        pendingSequences.removeSequence(rev.getSequence());
        lastSequence = pendingSequences.getCheckpointedValue();
        pauseOrResume();
        return;
      }
      rev = xformed;

      // Clean up afterwards
      Map<String, Object> attachments =
          (Map<String, Object>) rev.getProperties().get("_attachments");

      for (Map.Entry<String, Map<String, Object>> entry :
          ((Map<String, Map<String, Object>>) rev.getProperties().get("_attachments")).entrySet()) {
        Map<String, Object> attachment = entry.getValue();
        attachment.remove("file");
      }
    }

    if (rev != null && rev.getBody() != null) rev.getBody().compact();

    downloadsToInsert.queueObject(rev);
  }
  /**
   * in CBL_Puller.m - (void) changeTrackerReceivedSequence: (id)remoteSequenceID docID:
   * (NSString*)docID revIDs: (NSArray*)revIDs deleted: (BOOL)deleted
   */
  protected void processChangeTrackerChange(final Map<String, Object> change) {
    String lastSequence = change.get("seq").toString();
    String docID = (String) change.get("id");
    if (docID == null) {
      return;
    }

    if (!Document.isValidDocumentId(docID)) {
      Log.w(Log.TAG_SYNC, "%s: Received invalid doc ID from _changes: %s", this, change);
      return;
    }
    boolean deleted =
        (change.containsKey("deleted") && ((Boolean) change.get("deleted")).equals(Boolean.TRUE));
    List<Map<String, Object>> changes = (List<Map<String, Object>>) change.get("changes");
    for (Map<String, Object> changeDict : changes) {
      String revID = (String) changeDict.get("rev");
      if (revID == null) {
        continue;
      }

      PulledRevision rev = new PulledRevision(docID, revID, deleted);

      // Remember its remote sequence ID (opaque), and make up a numeric sequence
      // based on the order in which it appeared in the _changes feed:
      rev.setRemoteSequenceID(lastSequence);

      if (changes.size() > 1) rev.setConflicted(true);

      Log.d(Log.TAG_SYNC, "%s: adding rev to inbox %s", this, rev);

      Log.v(Log.TAG_SYNC, "%s: changeTrackerReceivedChange() incrementing changesCount by 1", this);

      // this is purposefully done slightly different than the ios version
      addToChangesCount(1);

      addToInbox(rev);
    }

    pauseOrResume();
  }
  @Override
  protected void executeRequest(HttpClient httpClient, HttpUriRequest request) {
    Object fullBody = null;
    Throwable error = null;
    HttpResponse response = null;

    try {
      if (request.isAborted()) {
        respondWithResult(
            fullBody,
            new Exception(String.format("%s: Request %s has been aborted", this, request)),
            response);
        return;
      }

      response = httpClient.execute(request);

      try {
        // add in cookies to global store
        if (httpClient instanceof DefaultHttpClient) {
          DefaultHttpClient defaultHttpClient = (DefaultHttpClient) httpClient;
          clientFactory.addCookies(defaultHttpClient.getCookieStore().getCookies());
        }
      } catch (Exception e) {
        Log.e(Log.TAG_REMOTE_REQUEST, "Unable to add in cookies to global store", e);
      }

      StatusLine status = response.getStatusLine();
      if (status.getStatusCode() >= 300) {
        Log.e(
            Log.TAG_REMOTE_REQUEST,
            "Got error status: %d for %s.  Reason: %s",
            status.getStatusCode(),
            request,
            status.getReasonPhrase());
        error = new HttpResponseException(status.getStatusCode(), status.getReasonPhrase());
      } else {
        HttpEntity entity = null;
        try {
          entity = response.getEntity();
          if (entity != null) {
            InputStream inputStream = null;
            try {
              inputStream = entity.getContent();

              Header contentTypeHeader = entity.getContentType();
              if (contentTypeHeader != null) {
                // multipart
                if (contentTypeHeader.getValue().contains("multipart/")) {
                  Log.v(Log.TAG_SYNC, "contentTypeHeader = %s", contentTypeHeader.getValue());
                  _topReader = new MultipartReader(contentTypeHeader.getValue(), this);
                  byte[] buffer = new byte[BUF_LEN];
                  int numBytesRead = 0;
                  while ((numBytesRead = inputStream.read(buffer)) != -1) {
                    _topReader.appendData(buffer, 0, numBytesRead);
                  }
                  _topReader.finished();
                  respondWithResult(fullBody, error, response);
                }
                // non-multipart
                else {
                  Log.v(
                      Log.TAG_SYNC,
                      "contentTypeHeader is not multipart = %s",
                      contentTypeHeader.getValue());
                  GZIPInputStream gzipStream = null;
                  try {
                    // decompress if contentEncoding is gzip
                    if (Utils.isGzip(entity)) {
                      gzipStream = new GZIPInputStream(inputStream);
                      fullBody = Manager.getObjectMapper().readValue(gzipStream, Object.class);
                    } else {
                      fullBody = Manager.getObjectMapper().readValue(inputStream, Object.class);
                    }
                    respondWithResult(fullBody, error, response);
                  } finally {
                    try {
                      if (gzipStream != null) {
                        gzipStream.close();
                      }
                    } catch (IOException e) {
                    }
                    gzipStream = null;
                  }
                }
              }
            } finally {
              try {
                if (inputStream != null) {
                  inputStream.close();
                }
              } catch (IOException e) {
              }
              inputStream = null;
            }
          }
        } finally {
          if (entity != null) {
            try {
              entity.consumeContent();
            } catch (IOException e) {
            }
          }
          entity = null;
        }
      }
    } catch (IOException e) {
      Log.e(Log.TAG_REMOTE_REQUEST, "io exception", e);
      error = e;
    } catch (Exception e) {
      Log.e(Log.TAG_REMOTE_REQUEST, "%s: executeRequest() Exception: ", e, this);
      error = e;
    } finally {
      Log.v(Log.TAG_SYNC, "%s: BulkDownloader finally block.  url: %s", this, url);
    }

    Log.v(
        Log.TAG_SYNC,
        "%s: BulkDownloader calling respondWithResult.  url: %s, error: %s",
        this,
        url,
        error);
    respondWithResult(fullBody, error, response);
  }
  /**
   * Post the revisions to the destination. "new_edits":false means that the server should use the
   * given _rev IDs instead of making up new ones.
   *
   * <p>- (void) uploadBulkDocs: (NSArray*)docsToSend changes: (CBL_RevisionList*)changes in
   * CBLRestPusher.m
   */
  @InterfaceAudience.Private
  protected void uploadBulkDocs(List<Object> docsToSend, final RevisionList changes) {

    final int numDocsToSend = docsToSend.size();
    if (numDocsToSend == 0) {
      return;
    }

    Log.v(
        Log.TAG_SYNC,
        "%s: POSTing " + numDocsToSend + " revisions to _bulk_docs: %s",
        PusherInternal.this,
        docsToSend);
    addToChangesCount(numDocsToSend);

    Map<String, Object> bulkDocsBody = new HashMap<String, Object>();
    bulkDocsBody.put("docs", docsToSend);
    bulkDocsBody.put("new_edits", false);

    CustomFuture future =
        sendAsyncRequest(
            "POST",
            "/_bulk_docs",
            bulkDocsBody,
            new RemoteRequestCompletionBlock() {

              @Override
              public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) {
                if (e == null) {
                  Set<String> failedIDs = new HashSet<String>();
                  // _bulk_docs response is really an array, not a dictionary!
                  List<Map<String, Object>> items = (List) result;
                  for (Map<String, Object> item : items) {
                    Status status = statusFromBulkDocsResponseItem(item);
                    if (status.isError()) {
                      // One of the docs failed to save.
                      Log.w(Log.TAG_SYNC, "%s: _bulk_docs got an error: %s", item, this);
                      // 403/Forbidden means validation failed; don't treat it as an error
                      // because I did my job in sending the revision. Other statuses are
                      // actual replication errors.
                      if (status.getCode() != Status.FORBIDDEN) {
                        String docID = (String) item.get("id");
                        failedIDs.add(docID);
                        // TODO - port from iOS
                        // NSURL* url = docID ? [_remote URLByAppendingPathComponent: docID] : nil;
                        // error = CBLStatusToNSError(status, url);
                      }
                    }
                  }

                  // Remove from the pending list all the revs that didn't fail:
                  for (RevisionInternal revisionInternal : changes) {
                    if (!failedIDs.contains(revisionInternal.getDocID())) {
                      removePending(revisionInternal);
                    }
                  }
                }
                if (e != null) {
                  setError(e);
                } else {
                  Log.v(Log.TAG_SYNC, "%s: POSTed to _bulk_docs", PusherInternal.this);
                }
                addToCompletedChangesCount(numDocsToSend);
              }
            });
    future.setQueue(pendingFutures);
    pendingFutures.add(future);
  }
  /** - (void) processInbox: (CBL_RevisionList*)changes in CBLRestPusher.m */
  @Override
  @InterfaceAudience.Private
  protected void processInbox(final RevisionList changes) {

    Log.v(Log.TAG_SYNC, "processInbox() changes=" + changes.size());

    // Generate a set of doc/rev IDs in the JSON format that _revs_diff wants:
    // <http://wiki.apache.org/couchdb/HttpPostRevsDiff>
    Map<String, List<String>> diffs = new HashMap<String, List<String>>();
    for (RevisionInternal rev : changes) {
      String docID = rev.getDocID();
      List<String> revs = diffs.get(docID);
      if (revs == null) {
        revs = new ArrayList<String>();
        diffs.put(docID, revs);
      }
      revs.add(rev.getRevID());
      addPending(rev);
    }

    // Call _revs_diff on the target db:
    Log.v(Log.TAG_SYNC, "%s: posting to /_revs_diff", this);

    CustomFuture future =
        sendAsyncRequest(
            "POST",
            "/_revs_diff",
            diffs,
            new RemoteRequestCompletionBlock() {

              @Override
              public void onCompletion(HttpResponse httpResponse, Object response, Throwable e) {

                Log.v(Log.TAG_SYNC, "%s: got /_revs_diff response", this);
                Map<String, Object> results = (Map<String, Object>) response;
                if (e != null) {
                  setError(e);
                } else {
                  if (results.size() != 0) {
                    // Go through the list of local changes again, selecting the ones the
                    // destination server
                    // said were missing and mapping them to a JSON dictionary in the form
                    // _bulk_docs wants:
                    List<Object> docsToSend = new ArrayList<Object>();
                    RevisionList revsToSend = new RevisionList();
                    long bufferedSize = 0;
                    for (RevisionInternal rev : changes) {
                      // Is this revision in the server's 'missing' list?
                      Map<String, Object> properties = null;
                      Map<String, Object> revResults =
                          (Map<String, Object>) results.get(rev.getDocID());
                      if (revResults == null) {
                        removePending(rev);
                        continue;
                      }
                      List<String> revs = (List<String>) revResults.get("missing");
                      if (revs == null || !revs.contains(rev.getRevID())) {
                        removePending(rev);
                        continue;
                      }

                      // NOTE: force to load body by Database.loadRevisionBody()
                      // In SQLiteStore.loadRevisionBody() does not load data from database
                      // if sequence != 0 && body != null
                      rev.setSequence(0);
                      rev.setBody(null);

                      RevisionInternal loadedRev;
                      try {
                        loadedRev = db.loadRevisionBody(rev);
                      } catch (CouchbaseLiteException e1) {
                        Log.w(
                            Log.TAG_SYNC,
                            "%s Couldn't get local contents of %s",
                            rev,
                            PusherInternal.this);
                        continue;
                      }

                      RevisionInternal populatedRev = transformRevision(loadedRev);
                      loadedRev = null;

                      List<String> possibleAncestors =
                          (List<String>) revResults.get("possible_ancestors");

                      properties = new HashMap<String, Object>(populatedRev.getProperties());
                      Map<String, Object> revisions =
                          db.getRevisionHistoryDictStartingFromAnyAncestor(
                              populatedRev, possibleAncestors);
                      properties.put("_revisions", revisions);
                      populatedRev.setProperties(properties);

                      // Strip any attachments already known to the target db:
                      if (properties.containsKey("_attachments")) {
                        // Look for the latest common ancestor and stub out older attachments:
                        int minRevPos = findCommonAncestor(populatedRev, possibleAncestors);

                        Status status = new Status(Status.OK);
                        if (!db.expandAttachments(
                            populatedRev, minRevPos + 1, !dontSendMultipart, false, status)) {
                          Log.w(
                              Log.TAG_SYNC,
                              "%s: Couldn't expand attachments of %s",
                              this,
                              populatedRev);
                          continue;
                        }

                        properties = populatedRev.getProperties();
                        if (!dontSendMultipart && uploadMultipartRevision(populatedRev)) {
                          continue;
                        }
                      }

                      if (properties == null || !properties.containsKey("_id")) {
                        throw new IllegalStateException("properties must contain a document _id");
                      }

                      revsToSend.add(rev);
                      docsToSend.add(properties);

                      bufferedSize += JSONUtils.estimate(properties);
                      if (bufferedSize > kMaxBulkDocsObjectSize) {
                        uploadBulkDocs(docsToSend, revsToSend);
                        docsToSend = new ArrayList<Object>();
                        revsToSend = new RevisionList();
                        bufferedSize = 0;
                      }
                    }

                    // Post the revisions to the destination:
                    uploadBulkDocs(docsToSend, revsToSend);

                  } else {
                    // None of the revisions are new to the remote
                    for (RevisionInternal revisionInternal : changes) {
                      removePending(revisionInternal);
                    }
                  }
                }
              }
            });
    future.setQueue(pendingFutures);
    pendingFutures.add(future);

    pauseOrResume();
  }
  /** Process a bunch of remote revisions from the _changes feed at once */
  @Override
  @InterfaceAudience.Private
  protected void processInbox(RevisionList inbox) {
    Log.d(Log.TAG_SYNC, "processInbox called");

    if (canBulkGet == null) {
      canBulkGet = serverIsSyncGatewayVersion("0.81");
    }

    // Ask the local database which of the revs are not known to it:
    String lastInboxSequence = ((PulledRevision) inbox.get(inbox.size() - 1)).getRemoteSequenceID();

    int numRevisionsRemoved = 0;
    try {
      // findMissingRevisions is the local equivalent of _revs_diff. it looks at the
      // array of revisions in "inbox" and removes the ones that already exist.
      // So whatever's left in 'inbox'
      // afterwards are the revisions that need to be downloaded.
      numRevisionsRemoved = db.findMissingRevisions(inbox);
    } catch (SQLException e) {
      Log.e(Log.TAG_SYNC, String.format("%s failed to look up local revs", this), e);
      inbox = null;
    }

    // introducing this to java version since inbox may now be null everywhere
    int inboxCount = 0;
    if (inbox != null) {
      inboxCount = inbox.size();
    }

    if (numRevisionsRemoved > 0) {
      Log.v(
          Log.TAG_SYNC,
          "%s: processInbox() setting changesCount to: %s",
          this,
          getChangesCount().get() - numRevisionsRemoved);
      // May decrease the changesCount, to account for the revisions we just found out we don't need
      // to get.
      addToChangesCount(-1 * numRevisionsRemoved);
    }

    if (inboxCount == 0) {
      // Nothing to do. Just bump the lastSequence.
      Log.w(
          Log.TAG_SYNC,
          "%s no new remote revisions to fetch.  add lastInboxSequence (%s) to pendingSequences (%s)",
          this,
          lastInboxSequence,
          pendingSequences);
      long seq = pendingSequences.addValue(lastInboxSequence);
      pendingSequences.removeSequence(seq);
      setLastSequence(pendingSequences.getCheckpointedValue());
      pauseOrResume();
      return;
    }

    Log.v(Log.TAG_SYNC, "%s: fetching %s remote revisions...", this, inboxCount);

    // Dump the revs into the queue of revs to pull from the remote db:
    for (int i = 0; i < inbox.size(); i++) {
      PulledRevision rev = (PulledRevision) inbox.get(i);
      if (canBulkGet || (rev.getGeneration() == 1 && !rev.isDeleted() && !rev.isConflicted())) {
        bulkRevsToPull.add(rev);
      } else {
        queueRemoteRevision(rev);
      }
      rev.setSequence(pendingSequences.addValue(rev.getRemoteSequenceID()));
    }
    pullRemoteRevisions();
    pauseOrResume();
  }
  public void waitForPendingFutures() {
    if (waitingForPendingFutures) {
      return;
    }

    synchronized (lockWaitForPendingFutures) {
      waitingForPendingFutures = true;

      Log.d(
          Log.TAG_SYNC,
          "[waitForPendingFutures()] STARTED - thread id: " + Thread.currentThread().getId());

      try {

        // wait for batcher's pending futures
        if (batcher != null) {
          Log.d(Log.TAG_SYNC, "batcher.waitForPendingFutures()");
          // TODO: should we call batcher.flushAll(); here?
          batcher.waitForPendingFutures();
          Log.d(Log.TAG_SYNC, "/batcher.waitForPendingFutures()");
        }

        while (!pendingFutures.isEmpty()) {
          Future future = pendingFutures.take();
          try {
            Log.d(Log.TAG_SYNC, "calling future.get() on %s", future);
            future.get();
            Log.d(Log.TAG_SYNC, "done calling future.get() on %s", future);
          } catch (InterruptedException e) {
            Log.e(Log.TAG_SYNC, "InterruptedException in Future.get()", e);
          } catch (ExecutionException e) {
            Log.e(Log.TAG_SYNC, "ExecutionException in Future.get()", e);
          }
        }

        // since it's possible that in the process of waiting for pendingFutures,
        // new items were added to the batcher, let's wait for the batcher to
        // drain again.
        if (batcher != null) {
          Log.d(Log.TAG_SYNC, "batcher.waitForPendingFutures()");
          batcher.waitForPendingFutures();
          Log.d(Log.TAG_SYNC, "/batcher.waitForPendingFutures()");
        }

        // If pendingFutures queue is empty and state is RUNNING, fireTrigger to IDLE
        // NOTE: in case of many documents sync, new Future tasks could be added into the queue.
        //       This is reason to check if queue is empty.
        if (pendingFutures.isEmpty()) {
          Log.v(Log.TAG_SYNC, "[waitForPendingFutures()] state=" + stateMachine.getState());
          if (isContinuous()) {
            // Make state IDLE
            Log.v(
                Log.TAG_SYNC,
                "[waitForPendingFutures()] fireTrigger(ReplicationTrigger.WAITING_FOR_CHANGES);");
            fireTrigger(ReplicationTrigger.WAITING_FOR_CHANGES);
          } else {
            // Make state STOPPING
            triggerStopGraceful();
          }
        }
      } catch (Exception e) {
        Log.e(Log.TAG_SYNC, "Exception waiting for pending futures: %s", e);
      } finally {
        Log.d(
            Log.TAG_SYNC,
            "[waitForPendingFutures()] END - thread id: " + Thread.currentThread().getId());
        waitingForPendingFutures = false;
      }
    }
  }
  // Get a bunch of revisions in one bulk request. Will use _bulk_get if possible.
  protected void pullBulkRevisions(List<RevisionInternal> bulkRevs) {

    int nRevs = bulkRevs.size();
    if (nRevs == 0) {
      return;
    }
    Log.v(Log.TAG_SYNC, "%s bulk-fetching %d remote revisions...", this, nRevs);
    Log.v(Log.TAG_SYNC, "%s bulk-fetching remote revisions: %s", this, bulkRevs);

    if (!canBulkGet) {
      pullBulkWithAllDocs(bulkRevs);
      return;
    }

    Log.v(Log.TAG_SYNC, "%s: POST _bulk_get", this);
    final List<RevisionInternal> remainingRevs = new ArrayList<RevisionInternal>(bulkRevs);

    ++httpConnectionCount;

    final BulkDownloader dl;
    try {

      dl =
          new BulkDownloader(
              workExecutor,
              clientFactory,
              remote,
              bulkRevs,
              db,
              this.requestHeaders,
              new BulkDownloader.BulkDownloaderDocumentBlock() {
                public void onDocument(Map<String, Object> props) {
                  // Got a revision!
                  // Find the matching revision in 'remainingRevs' and get its sequence:
                  RevisionInternal rev;
                  if (props.get("_id") != null) {
                    rev = new RevisionInternal(props);
                  } else {
                    rev =
                        new RevisionInternal(
                            (String) props.get("id"), (String) props.get("rev"), false);
                  }

                  int pos = remainingRevs.indexOf(rev);
                  if (pos > -1) {
                    rev.setSequence(remainingRevs.get(pos).getSequence());
                    remainingRevs.remove(pos);
                  } else {
                    Log.w(Log.TAG_SYNC, "%s : Received unexpected rev rev", this);
                  }

                  if (props.get("_id") != null) {
                    // Add to batcher ... eventually it will be fed to -insertRevisions:.
                    queueDownloadedRevision(rev);
                  } else {
                    Status status = statusFromBulkDocsResponseItem(props);
                    error = new CouchbaseLiteException(status);
                    revisionFailed(rev, error);
                  }
                }
              },
              new RemoteRequestCompletionBlock() {

                public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) {
                  // The entire _bulk_get is finished:
                  if (e != null) {
                    setError(e);
                    completedChangesCount.addAndGet(remainingRevs.size());
                  }

                  --httpConnectionCount;
                  // Start another task if there are still revisions waiting to be pulled:
                  pullRemoteRevisions();
                }
              });
    } catch (Exception e) {
      Log.e(Log.TAG_SYNC, "%s: pullBulkRevisions Exception: %s", this, e);
      return;
    }

    dl.setAuthenticator(getAuthenticator());

    // set compressed request - gzip
    dl.setCompressedRequest(canSendCompressedRequests());

    synchronized (remoteRequestExecutor) {
      if (!remoteRequestExecutor.isShutdown()) {
        Future future = remoteRequestExecutor.submit(dl);
        pendingFutures.add(future);
      }
    }
  }
Exemplo n.º 19
0
  public void testDocs() {
    send("PUT", "/db", Status.CREATED, null);

    // PUT:
    Map<String, Object> doc1 = new HashMap<String, Object>();
    doc1.put("message", "hello");
    Map<String, Object> result =
        (Map<String, Object>) sendBody("PUT", "/db/doc1", doc1, Status.CREATED, null);
    String revID = (String) result.get("rev");
    assertTrue(revID.startsWith("1-"));

    // PUT to update:
    doc1.put("message", "goodbye");
    doc1.put("_rev", revID);
    result = (Map<String, Object>) sendBody("PUT", "/db/doc1", doc1, Status.CREATED, null);
    Log.v(TAG, String.format("PUT returned %s", result));
    revID = (String) result.get("rev");
    assertTrue(revID.startsWith("2-"));

    doc1.put("_id", "doc1");
    doc1.put("_rev", revID);
    result = (Map<String, Object>) send("GET", "/db/doc1", Status.OK, doc1);

    // Add more docs:
    Map<String, Object> docX = new HashMap<String, Object>();
    docX.put("message", "hello");
    result = (Map<String, Object>) sendBody("PUT", "/db/doc3", docX, Status.CREATED, null);
    String revID3 = (String) result.get("rev");
    result = (Map<String, Object>) sendBody("PUT", "/db/doc2", docX, Status.CREATED, null);
    String revID2 = (String) result.get("rev");

    // _all_docs:
    result = (Map<String, Object>) send("GET", "/db/_all_docs", Status.OK, null);
    assertEquals(3, result.get("total_rows"));
    assertEquals(0, result.get("offset"));

    Map<String, Object> value1 = valueMapWithRev(revID);
    Map<String, Object> value2 = valueMapWithRev(revID2);
    Map<String, Object> value3 = valueMapWithRev(revID3);

    Map<String, Object> row1 = new HashMap<String, Object>();
    row1.put("id", "doc1");
    row1.put("key", "doc1");
    row1.put("value", value1);
    Map<String, Object> row2 = new HashMap<String, Object>();
    row2.put("id", "doc2");
    row2.put("key", "doc2");
    row2.put("value", value2);
    Map<String, Object> row3 = new HashMap<String, Object>();
    row3.put("id", "doc3");
    row3.put("key", "doc3");
    row3.put("value", value3);

    List<Map<String, Object>> expectedRows = new ArrayList<Map<String, Object>>();
    expectedRows.add(row1);
    expectedRows.add(row2);
    expectedRows.add(row3);

    List<Map<String, Object>> rows = (List<Map<String, Object>>) result.get("rows");
    assertEquals(expectedRows, rows);

    // DELETE:
    result =
        (Map<String, Object>)
            send("DELETE", String.format("/db/doc1?rev=%s", revID), Status.OK, null);
    revID = (String) result.get("rev");
    assertTrue(revID.startsWith("3-"));

    send("GET", "/db/doc1", Status.NOT_FOUND, null);

    // _changes:
    List<Object> changes1 = new ArrayList<Object>();
    changes1.add(valueMapWithRevNoConflictArray(revID));
    List<Object> changes2 = new ArrayList<Object>();
    changes2.add(valueMapWithRevNoConflictArray(revID2));
    List<Object> changes3 = new ArrayList<Object>();
    changes3.add(valueMapWithRevNoConflictArray(revID3));

    Map<String, Object> result1 = new HashMap<String, Object>();
    result1.put("id", "doc1");
    result1.put("seq", 5);
    result1.put("deleted", true);
    result1.put("changes", changes1);
    Map<String, Object> result2 = new HashMap<String, Object>();
    result2.put("id", "doc2");
    result2.put("seq", 4);
    result2.put("changes", changes2);
    Map<String, Object> result3 = new HashMap<String, Object>();
    result3.put("id", "doc3");
    result3.put("seq", 3);
    result3.put("changes", changes3);

    List<Object> results = new ArrayList<Object>();
    results.add(result3);
    results.add(result2);
    results.add(result1);

    Map<String, Object> expectedChanges = new HashMap<String, Object>();
    expectedChanges.put("last_seq", 5);
    expectedChanges.put("results", results);

    send("GET", "/db/_changes", Status.OK, expectedChanges);

    // _changes with ?since:
    results.remove(result3);
    results.remove(result2);
    expectedChanges.put("results", results);
    send("GET", "/db/_changes?since=4", Status.OK, expectedChanges);

    results.remove(result1);
    expectedChanges.put("results", results);
    send("GET", "/db/_changes?since=5", Status.OK, expectedChanges);

    // Put with _deleted to delete a doc:
    Log.d(TAG, "Put with _deleted to delete a doc");
    send("GET", "/db/doc5", Status.NOT_FOUND, null);
    Map<String, Object> doc5 = new HashMap<String, Object>();
    doc5.put("message", "hello5");
    Map<String, Object> resultDoc5 =
        (Map<String, Object>) sendBody("PUT", "/db/doc5", doc5, Status.CREATED, null);
    String revIdDoc5 = (String) resultDoc5.get("rev");
    assertTrue(revIdDoc5.startsWith("1-"));
    doc5.put("_deleted", true);
    doc5.put("_rev", revIdDoc5);
    doc5.put("_id", "doc5");
    result = (Map<String, Object>) sendBody("PUT", "/db/doc5", doc5, Status.OK, null);
    send("GET", "/db/doc5", Status.NOT_FOUND, null);
    Log.d(TAG, "Finished put with _deleted to delete a doc");
  }