/** - (void) dbChanged: (NSNotification*)n in CBLRestPusher.m */ @Override @InterfaceAudience.Private public void changed(Database.ChangeEvent event) { List<DocumentChange> changes = event.getChanges(); try { java.net.URI remoteUri = remote.toURI(); for (DocumentChange change : changes) { // Skip revisions that originally came from the database I'm syncing to: URL source = change.getSource(); if (source != null && source.toURI().equals(remoteUri)) return; RevisionInternal rev = change.getAddedRevision(); if (getLocalDatabase().runFilter(filter, filterParams, rev)) { pauseOrResume(); waitIfPaused(); RevisionInternal nuRev = rev.copy(); nuRev.setBody(null); // save memory addToInbox(nuRev); } } } catch (java.net.URISyntaxException uriException) { // Not possible since it would not be an active replicator. // However, until we refactor everything to use java.net, // I'm not sure we have a choice but to swallow this. Log.e(Log.TAG_SYNC, "Active replicator found with invalid URI", uriException); } }
/** * Fallback to upload a revision if uploadMultipartRevision failed due to the server's rejecting * multipart format. - (void) uploadJSONRevision: (CBL_Revision*)originalRev in CBLRestPusher.m */ private void uploadJsonRevision(final RevisionInternal rev) { // Get the revision's properties: if (!db.inlineFollowingAttachmentsIn(rev)) { setError(new CouchbaseLiteException(Status.BAD_ATTACHMENT)); return; } final String path = String.format("/%s?new_edits=false", encodeDocumentId(rev.getDocID())); CustomFuture future = sendAsyncRequest( "PUT", path, rev.getProperties(), new RemoteRequestCompletionBlock() { public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) { if (e != null) { setError(e); } else { Log.v(Log.TAG_SYNC, "%s: Sent %s (JSON), response=%s", this, rev, result); removePending(rev); } } }); future.setQueue(pendingFutures); pendingFutures.add(future); }
/** * Adds a local revision to the "pending" set that are awaiting upload: - (void) addPending: * (CBL_Revision*)rev in CBLRestPusher.m */ @InterfaceAudience.Private private void addPending(RevisionInternal revisionInternal) { long seq = revisionInternal.getSequence(); pendingSequences.add(seq); if (seq > maxPendingSequence) { maxPendingSequence = seq; } }
private void revisionFailed(RevisionInternal rev, Throwable throwable) { if (!Utils.isTransientError(throwable)) { Log.v(Log.TAG_SYNC, "%s: giving up on %s: %s", this, rev, throwable); pendingSequences.removeSequence(rev.getSequence()); pauseOrResume(); } completedChangesCount.getAndIncrement(); }
/** Add a revision to the appropriate queue of revs to individually GET */ @InterfaceAudience.Private protected void queueRemoteRevision(RevisionInternal rev) { if (rev.isDeleted()) { deletedRevsToPull.add(rev); } else { revsToPull.add(rev); } }
/** * Removes a revision from the "pending" set after it's been uploaded. Advances checkpoint. - * (void) removePending: (CBL_Revision*)rev in CBLRestPusher.m */ @InterfaceAudience.Private private void removePending(RevisionInternal revisionInternal) { long seq = revisionInternal.getSequence(); if (pendingSequences == null || pendingSequences.isEmpty()) { Log.w( Log.TAG_SYNC, "%s: removePending() called w/ rev: %s, but pendingSequences empty", this, revisionInternal); if (revisionInternal.getBody() != null) revisionInternal.getBody().release(); pauseOrResume(); return; } boolean wasFirst = (seq == pendingSequences.first()); if (!pendingSequences.contains(seq)) { Log.w( Log.TAG_SYNC, "%s: removePending: sequence %s not in set, for rev %s", this, seq, revisionInternal); } pendingSequences.remove(seq); if (wasFirst) { // If I removed the first pending sequence, can advance the checkpoint: long maxCompleted; if (pendingSequences.size() == 0) { maxCompleted = maxPendingSequence; } else { maxCompleted = pendingSequences.first(); --maxCompleted; } setLastSequence(Long.toString(maxCompleted)); } if (revisionInternal.getBody() != null) revisionInternal.getBody().release(); pauseOrResume(); }
/** The revision ID of the document this row was mapped from. */ @InterfaceAudience.Public public String getDocumentRevisionId() { // Get the revision id from either the embedded document contents, // or the '_rev' or 'rev' value key: String rev = null; if (documentRevision != null) rev = documentRevision.getRevID(); if (rev == null) { if (value instanceof Map) { Map<String, Object> mapValue = (Map<String, Object>) value; rev = (String) mapValue.get("_rev"); if (rev == null) { rev = (String) mapValue.get("rev"); } } } return rev; }
/** * The ID of the document described by this view row. This is not necessarily the same as the * document that caused this row to be emitted; see the discussion of the .sourceDocumentID * property for details. */ @InterfaceAudience.Public public String getDocumentId() { // Get the doc id from either the embedded document contents, or the '_id' value key. // Failing that, there's no document linking, so use the regular old _sourceDocID String docID = null; if (documentRevision != null) docID = documentRevision.getDocID(); if (docID == null) { if (value != null) { if (value instanceof Map) { Map<String, Object> props = (Map<String, Object>) value; docID = (String) props.get("_id"); } } } if (docID == null) docID = sourceDocID; return docID; }
/** * Given a revision and an array of revIDs, finds the latest common ancestor revID and returns its * generation #. If there is none, returns 0. * * <p>int CBLFindCommonAncestor(CBL_Revision* rev, NSArray* possibleRevIDs) in CBLRestPusher.m */ private static int findCommonAncestor(RevisionInternal rev, List<String> possibleRevIDs) { if (possibleRevIDs == null || possibleRevIDs.size() == 0) { return 0; } List<String> history = Database.parseCouchDBRevisionHistory(rev.getProperties()); // rev is missing _revisions property assert (history != null); boolean changed = history.retainAll(possibleRevIDs); String ancestorID = history.size() == 0 ? null : history.get(0); if (ancestorID == null) { return 0; } int generation = RevisionUtils.parseRevIDNumber(ancestorID); return generation; }
// This invokes the tranformation block if one is installed and queues the resulting CBL_Revision private void queueDownloadedRevision(RevisionInternal rev) { if (revisionBodyTransformationBlock != null) { // Add 'file' properties to attachments pointing to their bodies: for (Map.Entry<String, Map<String, Object>> entry : ((Map<String, Map<String, Object>>) rev.getProperties().get("_attachments")).entrySet()) { String name = entry.getKey(); Map<String, Object> attachment = entry.getValue(); attachment.remove("file"); if (attachment.get("follows") != null && attachment.get("data") == null) { String filePath = db.fileForAttachmentDict(attachment).getPath(); if (filePath != null) attachment.put("file", filePath); } } RevisionInternal xformed = transformRevision(rev); if (xformed == null) { Log.v(Log.TAG_SYNC, "%s: Transformer rejected revision %s", this, rev); pendingSequences.removeSequence(rev.getSequence()); lastSequence = pendingSequences.getCheckpointedValue(); pauseOrResume(); return; } rev = xformed; // Clean up afterwards Map<String, Object> attachments = (Map<String, Object>) rev.getProperties().get("_attachments"); for (Map.Entry<String, Map<String, Object>> entry : ((Map<String, Map<String, Object>>) rev.getProperties().get("_attachments")).entrySet()) { Map<String, Object> attachment = entry.getValue(); attachment.remove("file"); } } if (rev != null && rev.getBody() != null) rev.getBody().compact(); downloadsToInsert.queueObject(rev); }
/** in CBL_Pusher.m - (CBLMultipartWriter*)multipartWriterForRevision: (CBL_Revision*)rev */ @InterfaceAudience.Private private boolean uploadMultipartRevision(final RevisionInternal revision) { // holds inputStream for blob to close after using final List<InputStream> streamList = new ArrayList<InputStream>(); MultipartEntity multiPart = null; Map<String, Object> revProps = revision.getProperties(); Map<String, Object> attachments = (Map<String, Object>) revProps.get("_attachments"); for (String attachmentKey : attachments.keySet()) { Map<String, Object> attachment = (Map<String, Object>) attachments.get(attachmentKey); if (attachment.containsKey("follows")) { if (multiPart == null) { multiPart = new MultipartEntity(); try { String json = Manager.getObjectMapper().writeValueAsString(revProps); Charset utf8charset = Charset.forName("UTF-8"); byte[] uncompressed = json.getBytes(utf8charset); byte[] compressed = null; byte[] data = uncompressed; String contentEncoding = null; if (uncompressed.length > RemoteRequest.MIN_JSON_LENGTH_TO_COMPRESS && canSendCompressedRequests()) { compressed = Utils.compressByGzip(uncompressed); if (compressed.length < uncompressed.length) { data = compressed; contentEncoding = "gzip"; } } // NOTE: StringBody.contentEncoding default value is null. Setting null value to // contentEncoding does not cause any impact. multiPart.addPart( "param1", new StringBody(data, "application/json", utf8charset, contentEncoding)); } catch (IOException e) { throw new IllegalArgumentException(e); } } BlobStore blobStore = this.db.getAttachmentStore(); String base64Digest = (String) attachment.get("digest"); BlobKey blobKey = new BlobKey(base64Digest); InputStream blobStream = blobStore.blobStreamForKey(blobKey); if (blobStream == null) { Log.w( Log.TAG_SYNC, "Unable to load the blob stream for blobKey: %s - Skipping upload of multipart revision.", blobKey); return false; } else { streamList.add(blobStream); String contentType = null; if (attachment.containsKey("content_type")) { contentType = (String) attachment.get("content_type"); } else if (attachment.containsKey("type")) { contentType = (String) attachment.get("type"); } else if (attachment.containsKey("content-type")) { Log.w( Log.TAG_SYNC, "Found attachment that uses content-type" + " field name instead of content_type (see couchbase-lite-android" + " issue #80): %s", attachment); } // contentType = null causes Exception from FileBody of apache. if (contentType == null) contentType = "application/octet-stream"; // default // NOTE: Content-Encoding might not be necessary to set. Apache FileBody does not set // Content-Encoding. // FileBody always return null for getContentEncoding(), and Content-Encoding header // is not set in multipart // CBL iOS: // https://github.com/couchbase/couchbase-lite-ios/blob/feb7ff5eda1e80bd00e5eb19f1d46c793f7a1951/Source/CBL_Pusher.m#L449-L452 String contentEncoding = null; if (attachment.containsKey("encoding")) { contentEncoding = (String) attachment.get("encoding"); } InputStreamBody inputStreamBody = new CustomStreamBody(blobStream, contentType, attachmentKey, contentEncoding); multiPart.addPart(attachmentKey, inputStreamBody); } } } if (multiPart == null) { return false; } final String path = String.format("/%s?new_edits=false", encodeDocumentId(revision.getDocID())); Log.d(Log.TAG_SYNC, "Uploading multipart request. Revision: %s", revision); addToChangesCount(1); CustomFuture future = sendAsyncMultipartRequest( "PUT", path, multiPart, new RemoteRequestCompletionBlock() { @Override public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) { try { if (e != null) { if (e instanceof HttpResponseException) { // Server doesn't like multipart, eh? Fall back to JSON. if (((HttpResponseException) e).getStatusCode() == 415) { // status 415 = "bad_content_type" dontSendMultipart = true; uploadJsonRevision(revision); } } else { Log.e(Log.TAG_SYNC, "Exception uploading multipart request", e); setError(e); } } else { Log.v(Log.TAG_SYNC, "Uploaded multipart request. Revision: %s", revision); removePending(revision); } } finally { // close all inputStreams for Blob for (InputStream stream : streamList) { try { stream.close(); } catch (IOException ioe) { } } addToCompletedChangesCount(1); } } }); future.setQueue(pendingFutures); pendingFutures.add(future); return true; }
/** - (void) processInbox: (CBL_RevisionList*)changes in CBLRestPusher.m */ @Override @InterfaceAudience.Private protected void processInbox(final RevisionList changes) { Log.v(Log.TAG_SYNC, "processInbox() changes=" + changes.size()); // Generate a set of doc/rev IDs in the JSON format that _revs_diff wants: // <http://wiki.apache.org/couchdb/HttpPostRevsDiff> Map<String, List<String>> diffs = new HashMap<String, List<String>>(); for (RevisionInternal rev : changes) { String docID = rev.getDocID(); List<String> revs = diffs.get(docID); if (revs == null) { revs = new ArrayList<String>(); diffs.put(docID, revs); } revs.add(rev.getRevID()); addPending(rev); } // Call _revs_diff on the target db: Log.v(Log.TAG_SYNC, "%s: posting to /_revs_diff", this); CustomFuture future = sendAsyncRequest( "POST", "/_revs_diff", diffs, new RemoteRequestCompletionBlock() { @Override public void onCompletion(HttpResponse httpResponse, Object response, Throwable e) { Log.v(Log.TAG_SYNC, "%s: got /_revs_diff response", this); Map<String, Object> results = (Map<String, Object>) response; if (e != null) { setError(e); } else { if (results.size() != 0) { // Go through the list of local changes again, selecting the ones the // destination server // said were missing and mapping them to a JSON dictionary in the form // _bulk_docs wants: List<Object> docsToSend = new ArrayList<Object>(); RevisionList revsToSend = new RevisionList(); long bufferedSize = 0; for (RevisionInternal rev : changes) { // Is this revision in the server's 'missing' list? Map<String, Object> properties = null; Map<String, Object> revResults = (Map<String, Object>) results.get(rev.getDocID()); if (revResults == null) { removePending(rev); continue; } List<String> revs = (List<String>) revResults.get("missing"); if (revs == null || !revs.contains(rev.getRevID())) { removePending(rev); continue; } // NOTE: force to load body by Database.loadRevisionBody() // In SQLiteStore.loadRevisionBody() does not load data from database // if sequence != 0 && body != null rev.setSequence(0); rev.setBody(null); RevisionInternal loadedRev; try { loadedRev = db.loadRevisionBody(rev); } catch (CouchbaseLiteException e1) { Log.w( Log.TAG_SYNC, "%s Couldn't get local contents of %s", rev, PusherInternal.this); continue; } RevisionInternal populatedRev = transformRevision(loadedRev); loadedRev = null; List<String> possibleAncestors = (List<String>) revResults.get("possible_ancestors"); properties = new HashMap<String, Object>(populatedRev.getProperties()); Map<String, Object> revisions = db.getRevisionHistoryDictStartingFromAnyAncestor( populatedRev, possibleAncestors); properties.put("_revisions", revisions); populatedRev.setProperties(properties); // Strip any attachments already known to the target db: if (properties.containsKey("_attachments")) { // Look for the latest common ancestor and stub out older attachments: int minRevPos = findCommonAncestor(populatedRev, possibleAncestors); Status status = new Status(Status.OK); if (!db.expandAttachments( populatedRev, minRevPos + 1, !dontSendMultipart, false, status)) { Log.w( Log.TAG_SYNC, "%s: Couldn't expand attachments of %s", this, populatedRev); continue; } properties = populatedRev.getProperties(); if (!dontSendMultipart && uploadMultipartRevision(populatedRev)) { continue; } } if (properties == null || !properties.containsKey("_id")) { throw new IllegalStateException("properties must contain a document _id"); } revsToSend.add(rev); docsToSend.add(properties); bufferedSize += JSONUtils.estimate(properties); if (bufferedSize > kMaxBulkDocsObjectSize) { uploadBulkDocs(docsToSend, revsToSend); docsToSend = new ArrayList<Object>(); revsToSend = new RevisionList(); bufferedSize = 0; } } // Post the revisions to the destination: uploadBulkDocs(docsToSend, revsToSend); } else { // None of the revisions are new to the remote for (RevisionInternal revisionInternal : changes) { removePending(revisionInternal); } } } } }); future.setQueue(pendingFutures); pendingFutures.add(future); pauseOrResume(); }
/** * Fetches the contents of a revision from the remote db, including its parent revision ID. The * contents are stored into rev.properties. */ @InterfaceAudience.Private public void pullRemoteRevision(final RevisionInternal rev) { Log.d(Log.TAG_SYNC, "%s: pullRemoteRevision with rev: %s", this, rev); ++httpConnectionCount; // Construct a query. We want the revision history, and the bodies of attachments that have // been added since the latest revisions we have locally. // See: http://wiki.apache.org/couchdb/HTTP_Document_API#Getting_Attachments_With_a_Document StringBuilder path = new StringBuilder("/"); path.append(encodeDocumentId(rev.getDocID())); path.append("?rev=").append(URIUtils.encode(rev.getRevID())); path.append("&revs=true&attachments=true"); // If the document has attachments, add an 'atts_since' param with a list of // already-known revisions, so the server can skip sending the bodies of any // attachments we already have locally: AtomicBoolean hasAttachment = new AtomicBoolean(false); List<String> knownRevs = db.getPossibleAncestorRevisionIDs( rev, PullerInternal.MAX_NUMBER_OF_ATTS_SINCE, hasAttachment); if (hasAttachment.get() && knownRevs != null && knownRevs.size() > 0) { path.append("&atts_since="); path.append(joinQuotedEscaped(knownRevs)); } // create a final version of this variable for the log statement inside // FIXME find a way to avoid this final String pathInside = path.toString(); CustomFuture future = sendAsyncMultipartDownloaderRequest( "GET", pathInside, null, db, new RemoteRequestCompletionBlock() { @Override public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) { if (e != null) { Log.e(Log.TAG_SYNC, "Error pulling remote revision", e); revisionFailed(rev, e); } else { Map<String, Object> properties = (Map<String, Object>) result; PulledRevision gotRev = new PulledRevision(properties); gotRev.setSequence(rev.getSequence()); Log.d( Log.TAG_SYNC, "%s: pullRemoteRevision add rev: %s to batcher: %s", PullerInternal.this, gotRev, downloadsToInsert); if (gotRev.getBody() != null) gotRev.getBody().compact(); // Add to batcher ... eventually it will be fed to -insertRevisions:. downloadsToInsert.queueObject(gotRev); } // Note that we've finished this task: --httpConnectionCount; // Start another task if there are still revisions waiting to be pulled: pullRemoteRevisions(); } }); future.setQueue(pendingFutures); pendingFutures.add(future); }
/** * The properties of the document this row was mapped from. To get this, you must have set the * -prefetch property on the query; else this will be nil. The map returned is immutable (run * through Collections.unmodifiableMap) */ @InterfaceAudience.Public public Map<String, Object> getDocumentProperties() { return documentRevision != null ? documentRevision.getProperties() : null; }