/** * Fallback to upload a revision if uploadMultipartRevision failed due to the server's rejecting * multipart format. - (void) uploadJSONRevision: (CBL_Revision*)originalRev in CBLRestPusher.m */ private void uploadJsonRevision(final RevisionInternal rev) { // Get the revision's properties: if (!db.inlineFollowingAttachmentsIn(rev)) { setError(new CouchbaseLiteException(Status.BAD_ATTACHMENT)); return; } final String path = String.format("/%s?new_edits=false", encodeDocumentId(rev.getDocID())); CustomFuture future = sendAsyncRequest( "PUT", path, rev.getProperties(), new RemoteRequestCompletionBlock() { public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) { if (e != null) { setError(e); } else { Log.v(Log.TAG_SYNC, "%s: Sent %s (JSON), response=%s", this, rev, result); removePending(rev); } } }); future.setQueue(pendingFutures); pendingFutures.add(future); }
/** * The ID of the document described by this view row. This is not necessarily the same as the * document that caused this row to be emitted; see the discussion of the .sourceDocumentID * property for details. */ @InterfaceAudience.Public public String getDocumentId() { // Get the doc id from either the embedded document contents, or the '_id' value key. // Failing that, there's no document linking, so use the regular old _sourceDocID String docID = null; if (documentRevision != null) docID = documentRevision.getDocID(); if (docID == null) { if (value != null) { if (value instanceof Map) { Map<String, Object> props = (Map<String, Object>) value; docID = (String) props.get("_id"); } } } if (docID == null) docID = sourceDocID; return docID; }
/** in CBL_Pusher.m - (CBLMultipartWriter*)multipartWriterForRevision: (CBL_Revision*)rev */ @InterfaceAudience.Private private boolean uploadMultipartRevision(final RevisionInternal revision) { // holds inputStream for blob to close after using final List<InputStream> streamList = new ArrayList<InputStream>(); MultipartEntity multiPart = null; Map<String, Object> revProps = revision.getProperties(); Map<String, Object> attachments = (Map<String, Object>) revProps.get("_attachments"); for (String attachmentKey : attachments.keySet()) { Map<String, Object> attachment = (Map<String, Object>) attachments.get(attachmentKey); if (attachment.containsKey("follows")) { if (multiPart == null) { multiPart = new MultipartEntity(); try { String json = Manager.getObjectMapper().writeValueAsString(revProps); Charset utf8charset = Charset.forName("UTF-8"); byte[] uncompressed = json.getBytes(utf8charset); byte[] compressed = null; byte[] data = uncompressed; String contentEncoding = null; if (uncompressed.length > RemoteRequest.MIN_JSON_LENGTH_TO_COMPRESS && canSendCompressedRequests()) { compressed = Utils.compressByGzip(uncompressed); if (compressed.length < uncompressed.length) { data = compressed; contentEncoding = "gzip"; } } // NOTE: StringBody.contentEncoding default value is null. Setting null value to // contentEncoding does not cause any impact. multiPart.addPart( "param1", new StringBody(data, "application/json", utf8charset, contentEncoding)); } catch (IOException e) { throw new IllegalArgumentException(e); } } BlobStore blobStore = this.db.getAttachmentStore(); String base64Digest = (String) attachment.get("digest"); BlobKey blobKey = new BlobKey(base64Digest); InputStream blobStream = blobStore.blobStreamForKey(blobKey); if (blobStream == null) { Log.w( Log.TAG_SYNC, "Unable to load the blob stream for blobKey: %s - Skipping upload of multipart revision.", blobKey); return false; } else { streamList.add(blobStream); String contentType = null; if (attachment.containsKey("content_type")) { contentType = (String) attachment.get("content_type"); } else if (attachment.containsKey("type")) { contentType = (String) attachment.get("type"); } else if (attachment.containsKey("content-type")) { Log.w( Log.TAG_SYNC, "Found attachment that uses content-type" + " field name instead of content_type (see couchbase-lite-android" + " issue #80): %s", attachment); } // contentType = null causes Exception from FileBody of apache. if (contentType == null) contentType = "application/octet-stream"; // default // NOTE: Content-Encoding might not be necessary to set. Apache FileBody does not set // Content-Encoding. // FileBody always return null for getContentEncoding(), and Content-Encoding header // is not set in multipart // CBL iOS: // https://github.com/couchbase/couchbase-lite-ios/blob/feb7ff5eda1e80bd00e5eb19f1d46c793f7a1951/Source/CBL_Pusher.m#L449-L452 String contentEncoding = null; if (attachment.containsKey("encoding")) { contentEncoding = (String) attachment.get("encoding"); } InputStreamBody inputStreamBody = new CustomStreamBody(blobStream, contentType, attachmentKey, contentEncoding); multiPart.addPart(attachmentKey, inputStreamBody); } } } if (multiPart == null) { return false; } final String path = String.format("/%s?new_edits=false", encodeDocumentId(revision.getDocID())); Log.d(Log.TAG_SYNC, "Uploading multipart request. Revision: %s", revision); addToChangesCount(1); CustomFuture future = sendAsyncMultipartRequest( "PUT", path, multiPart, new RemoteRequestCompletionBlock() { @Override public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) { try { if (e != null) { if (e instanceof HttpResponseException) { // Server doesn't like multipart, eh? Fall back to JSON. if (((HttpResponseException) e).getStatusCode() == 415) { // status 415 = "bad_content_type" dontSendMultipart = true; uploadJsonRevision(revision); } } else { Log.e(Log.TAG_SYNC, "Exception uploading multipart request", e); setError(e); } } else { Log.v(Log.TAG_SYNC, "Uploaded multipart request. Revision: %s", revision); removePending(revision); } } finally { // close all inputStreams for Blob for (InputStream stream : streamList) { try { stream.close(); } catch (IOException ioe) { } } addToCompletedChangesCount(1); } } }); future.setQueue(pendingFutures); pendingFutures.add(future); return true; }
/** - (void) processInbox: (CBL_RevisionList*)changes in CBLRestPusher.m */ @Override @InterfaceAudience.Private protected void processInbox(final RevisionList changes) { Log.v(Log.TAG_SYNC, "processInbox() changes=" + changes.size()); // Generate a set of doc/rev IDs in the JSON format that _revs_diff wants: // <http://wiki.apache.org/couchdb/HttpPostRevsDiff> Map<String, List<String>> diffs = new HashMap<String, List<String>>(); for (RevisionInternal rev : changes) { String docID = rev.getDocID(); List<String> revs = diffs.get(docID); if (revs == null) { revs = new ArrayList<String>(); diffs.put(docID, revs); } revs.add(rev.getRevID()); addPending(rev); } // Call _revs_diff on the target db: Log.v(Log.TAG_SYNC, "%s: posting to /_revs_diff", this); CustomFuture future = sendAsyncRequest( "POST", "/_revs_diff", diffs, new RemoteRequestCompletionBlock() { @Override public void onCompletion(HttpResponse httpResponse, Object response, Throwable e) { Log.v(Log.TAG_SYNC, "%s: got /_revs_diff response", this); Map<String, Object> results = (Map<String, Object>) response; if (e != null) { setError(e); } else { if (results.size() != 0) { // Go through the list of local changes again, selecting the ones the // destination server // said were missing and mapping them to a JSON dictionary in the form // _bulk_docs wants: List<Object> docsToSend = new ArrayList<Object>(); RevisionList revsToSend = new RevisionList(); long bufferedSize = 0; for (RevisionInternal rev : changes) { // Is this revision in the server's 'missing' list? Map<String, Object> properties = null; Map<String, Object> revResults = (Map<String, Object>) results.get(rev.getDocID()); if (revResults == null) { removePending(rev); continue; } List<String> revs = (List<String>) revResults.get("missing"); if (revs == null || !revs.contains(rev.getRevID())) { removePending(rev); continue; } // NOTE: force to load body by Database.loadRevisionBody() // In SQLiteStore.loadRevisionBody() does not load data from database // if sequence != 0 && body != null rev.setSequence(0); rev.setBody(null); RevisionInternal loadedRev; try { loadedRev = db.loadRevisionBody(rev); } catch (CouchbaseLiteException e1) { Log.w( Log.TAG_SYNC, "%s Couldn't get local contents of %s", rev, PusherInternal.this); continue; } RevisionInternal populatedRev = transformRevision(loadedRev); loadedRev = null; List<String> possibleAncestors = (List<String>) revResults.get("possible_ancestors"); properties = new HashMap<String, Object>(populatedRev.getProperties()); Map<String, Object> revisions = db.getRevisionHistoryDictStartingFromAnyAncestor( populatedRev, possibleAncestors); properties.put("_revisions", revisions); populatedRev.setProperties(properties); // Strip any attachments already known to the target db: if (properties.containsKey("_attachments")) { // Look for the latest common ancestor and stub out older attachments: int minRevPos = findCommonAncestor(populatedRev, possibleAncestors); Status status = new Status(Status.OK); if (!db.expandAttachments( populatedRev, minRevPos + 1, !dontSendMultipart, false, status)) { Log.w( Log.TAG_SYNC, "%s: Couldn't expand attachments of %s", this, populatedRev); continue; } properties = populatedRev.getProperties(); if (!dontSendMultipart && uploadMultipartRevision(populatedRev)) { continue; } } if (properties == null || !properties.containsKey("_id")) { throw new IllegalStateException("properties must contain a document _id"); } revsToSend.add(rev); docsToSend.add(properties); bufferedSize += JSONUtils.estimate(properties); if (bufferedSize > kMaxBulkDocsObjectSize) { uploadBulkDocs(docsToSend, revsToSend); docsToSend = new ArrayList<Object>(); revsToSend = new RevisionList(); bufferedSize = 0; } } // Post the revisions to the destination: uploadBulkDocs(docsToSend, revsToSend); } else { // None of the revisions are new to the remote for (RevisionInternal revisionInternal : changes) { removePending(revisionInternal); } } } } }); future.setQueue(pendingFutures); pendingFutures.add(future); pauseOrResume(); }
/** * Fetches the contents of a revision from the remote db, including its parent revision ID. The * contents are stored into rev.properties. */ @InterfaceAudience.Private public void pullRemoteRevision(final RevisionInternal rev) { Log.d(Log.TAG_SYNC, "%s: pullRemoteRevision with rev: %s", this, rev); ++httpConnectionCount; // Construct a query. We want the revision history, and the bodies of attachments that have // been added since the latest revisions we have locally. // See: http://wiki.apache.org/couchdb/HTTP_Document_API#Getting_Attachments_With_a_Document StringBuilder path = new StringBuilder("/"); path.append(encodeDocumentId(rev.getDocID())); path.append("?rev=").append(URIUtils.encode(rev.getRevID())); path.append("&revs=true&attachments=true"); // If the document has attachments, add an 'atts_since' param with a list of // already-known revisions, so the server can skip sending the bodies of any // attachments we already have locally: AtomicBoolean hasAttachment = new AtomicBoolean(false); List<String> knownRevs = db.getPossibleAncestorRevisionIDs( rev, PullerInternal.MAX_NUMBER_OF_ATTS_SINCE, hasAttachment); if (hasAttachment.get() && knownRevs != null && knownRevs.size() > 0) { path.append("&atts_since="); path.append(joinQuotedEscaped(knownRevs)); } // create a final version of this variable for the log statement inside // FIXME find a way to avoid this final String pathInside = path.toString(); CustomFuture future = sendAsyncMultipartDownloaderRequest( "GET", pathInside, null, db, new RemoteRequestCompletionBlock() { @Override public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) { if (e != null) { Log.e(Log.TAG_SYNC, "Error pulling remote revision", e); revisionFailed(rev, e); } else { Map<String, Object> properties = (Map<String, Object>) result; PulledRevision gotRev = new PulledRevision(properties); gotRev.setSequence(rev.getSequence()); Log.d( Log.TAG_SYNC, "%s: pullRemoteRevision add rev: %s to batcher: %s", PullerInternal.this, gotRev, downloadsToInsert); if (gotRev.getBody() != null) gotRev.getBody().compact(); // Add to batcher ... eventually it will be fed to -insertRevisions:. downloadsToInsert.queueObject(gotRev); } // Note that we've finished this task: --httpConnectionCount; // Start another task if there are still revisions waiting to be pulled: pullRemoteRevisions(); } }); future.setQueue(pendingFutures); pendingFutures.add(future); }