protected void doBatch() throws KettleException { WriteConcern concern = null; if (log.getLogLevel().getLevel() >= LogLevel.DETAILED.getLevel()) { concern = new WriteConcern(1); } WriteResult result = null; if (concern != null) { result = m_data.getCollection().insert(m_batch, concern); } else { result = m_data.getCollection().insert(m_batch); } CommandResult cmd = result.getLastError(); if (cmd != null && !cmd.ok()) { String message = cmd.getErrorMessage(); logError(BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.MongoReported", message)); try { cmd.throwOnError(); } catch (MongoException me) { throw new KettleException(me.getMessage(), me); } } m_batch.clear(); }
protected void throwOnError(WriteConcern wc, WriteResult wr) { if (wc == null && wr.getLastConcern() == null) { CommandResult cr = wr.getLastError(); if (cr != null && cr.getErrorMessage() != null && cr.getErrorMessage().length() > 0) cr.throwOnError(); } }
@Override // method public Role2 add(Role2 roleBaru) { final DBObject dbObject = new EntityToDBObject().apply(roleBaru); final WriteResult result = rolePersonColl.insert(dbObject); log.info("Role '{}' has been added: {}", roleBaru, result.getLastError()); return roleBaru; }
/** Delete a {<span class="referer">@link</span> Task} for a particular id. */ public boolean deleteObjectById(String id) { WriteResult result = mongoTemplate.remove(new Query(Criteria.where("uid").is(id)), Task.class); result.getLastError(); if (null != result) { if (result.getN() > 0) { return true; } } return false; }
@Override public boolean deleteObject(Task object) { WriteResult result = mongoTemplate.remove(object); result.getLastError(); if (null != result) { if (result.getN() > 0) { return true; } } return false; }
@Test @Ignore("Really slow on the delete, not a good unit tests atm") public void deleteBatchTest() throws Exception { DB db = getDb(); int count = (int) (OpDelete.BATCH_SIZE * 1.5); List<DBObject> docs = new ArrayList<DBObject>(count); for (int i = 0; i < count; i++) { BasicDBObject doc = new BasicDBObject(); doc.put("index", i); docs.add(doc); } WriteResult result = db.getCollection("deletebatchtests").insert(docs); assertNull(result.getLastError().getErrorMessage()); // iterate over all the data to make sure it's been inserted DBCursor cursor = db.getCollection("deletebatchtests").find(); for (int i = 0; i < count && cursor.hasNext(); i++) { int index = new BasicDBObject(cursor.next().toMap()).getInt("index"); assertEquals(i, index); } BasicDBObject query = new BasicDBObject(); query.put("index", new BasicDBObject("$lte", count)); // now delete the objects db.getCollection("deletebatchtests").remove(query, WriteConcern.SAFE); // now try and iterate, there should be no results cursor = db.getCollection("deletebatchtests").find(); assertFalse(cursor.hasNext()); // check it has been deleted UUID appId = emf.lookupApplication("test-organization/test-app"); EntityManager em = emf.getEntityManager(appId); Results results = em.searchCollection( new SimpleEntityRef("application", appId), "deletebatchtests", new Query()); assertEquals(0, results.size()); }
@Test public void testUpsertExisting() { DBCollection collection = newCollection(); collection.insert(new BasicDBObject("_id", 1)); WriteResult result = collection.update( new BasicDBObject("_id", 1), new BasicDBObject("$inc", new BasicDBObject("a", 1)), true, false); assertEquals(new BasicDBObject("_id", 1).append("a", 1), collection.findOne()); assertTrue(result.getLastError().getBoolean("updatedExisting")); }
private void processAndTransferWriteResult(WriteResult result, Exchange exchange) { // if invokeGetLastError is set, or a WriteConcern is set which implicitly calls getLastError, // then we have the chance to populate // the MONGODB_LAST_ERROR header, as well as setting an exception on the Exchange if one // occurred at the MongoDB server if (endpoint.isInvokeGetLastError() || (endpoint.getWriteConcern() != null ? endpoint.getWriteConcern().callGetLastError() : false)) { CommandResult cr = result.getCachedLastError() == null ? result.getLastError() : result.getCachedLastError(); exchange.getOut().setHeader(MongoDbConstants.LAST_ERROR, cr); if (!cr.ok()) { exchange.setException(MongoDbComponent.wrapInCamelMongoDbException(cr.getException())); } } // determine where to set the WriteResult: as the OUT body or as an IN message header if (endpoint.isWriteResultAsHeader()) { exchange.getOut().setHeader(MongoDbConstants.WRITERESULT, result); } else { exchange.getOut().setBody(result); } }
public boolean processRow(StepMetaInterface smi, StepDataInterface sdi) throws KettleException { Object[] row = getRow(); if (row == null) { // no more output // check any remaining buffered objects if (m_batch != null && m_batch.size() > 0) { doBatch(); } // INDEXING - http://www.mongodb.org/display/DOCS/Indexes // Indexing is computationally expensive - it needs to be // done after all data is inserted and done in the BACKGROUND. // UNIQUE indexes (prevent duplicates on the // keys in the index) and SPARSE indexes (don't index docs that // don't have the key field) - current limitation is that SPARSE // indexes can only have a single field List<MongoDbOutputMeta.MongoIndex> indexes = m_meta.getMongoIndexes(); if (indexes != null && indexes.size() > 0) { logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.ApplyingIndexOpps")); m_data.applyIndexes(indexes, log, m_meta.getTruncate()); } disconnect(); setOutputDone(); return false; } if (first) { first = false; m_batchInsertSize = 100; String batchInsert = environmentSubstitute(m_meta.getBatchInsertSize()); if (!Const.isEmpty(batchInsert)) { m_batchInsertSize = Integer.parseInt(batchInsert); } m_batch = new ArrayList<DBObject>(m_batchInsertSize); // output the same as the input m_data.setOutputRowMeta(getInputRowMeta()); m_mongoTopLevelStructure = MongoDbOutputData.checkTopLevelConsistency(m_meta.m_mongoFields, this); if (m_mongoTopLevelStructure == MongoDbOutputData.MongoTopLevel.INCONSISTENT) { throw new KettleException( BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.InconsistentMongoTopLevel")); } // first check our incoming fields against our meta data for fields to insert RowMetaInterface rmi = getInputRowMeta(); List<MongoDbOutputMeta.MongoField> mongoFields = m_meta.getMongoFields(); List<String> notToBeInserted = new ArrayList<String>(); for (int i = 0; i < rmi.size(); i++) { ValueMetaInterface vm = rmi.getValueMeta(i); boolean ok = false; for (MongoDbOutputMeta.MongoField field : mongoFields) { String mongoMatch = environmentSubstitute(field.m_incomingFieldName); if (vm.getName().equals(mongoMatch)) { ok = true; break; } } if (!ok) { notToBeInserted.add(vm.getName()); } } if (notToBeInserted.size() == rmi.size()) { throw new KettleException( BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.NotInsertingAnyFields")); } if (notToBeInserted.size() > 0) { StringBuffer b = new StringBuffer(); for (String s : notToBeInserted) { b.append(s).append(" "); } logBasic( BaseMessages.getString(PKG, "MongoDbOutput.Messages.FieldsNotToBeInserted"), b.toString()); } // init mongo fields for (MongoDbOutputMeta.MongoField m : m_meta.getMongoFields()) { m.init(this); } // check truncate if (m_meta.getTruncate()) { try { logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.TruncatingCollection")); m_data.getCollection().drop(); // re-establish the collection String collection = environmentSubstitute(m_meta.getCollection()); m_data.createCollection(collection); m_data.setCollection(m_data.getDB().getCollection(collection)); } catch (Exception m) { disconnect(); throw new KettleException(m.getMessage(), m); } } } if (!isStopped()) { if (m_meta.getUpsert()) { /*DBObject updateQuery = MongoDbOutputData.getQueryObject(m_meta.getMongoFields(), getInputRowMeta(), row, getParentVariableSpace(), m_mongoTopLevelStructure); */ DBObject updateQuery = MongoDbOutputData.getQueryObject(m_meta.getMongoFields(), getInputRowMeta(), row, this); if (log.isDebug()) { logDebug( BaseMessages.getString( PKG, "MongoDbOutput.Messages.Debug.QueryForUpsert", updateQuery)); } if (updateQuery != null) { // i.e. we have some non-null incoming query field values DBObject insertUpdate = null; // get the record to update the match with if (!m_meta.getModifierUpdate()) { // complete record replace or insert insertUpdate = MongoDbOutputData.kettleRowToMongo( m_meta.getMongoFields(), getInputRowMeta(), row, this, m_mongoTopLevelStructure); } else { // specific field update or insert insertUpdate = MongoDbOutputData.getModifierUpdateObject( m_meta.getMongoFields(), getInputRowMeta(), row, this, m_mongoTopLevelStructure); if (log.isDebug()) { logDebug( BaseMessages.getString( PKG, "MongoDbOutput.Messages.Debug.ModifierUpdateObject", insertUpdate)); } } if (insertUpdate != null) { WriteConcern concern = null; if (log.getLogLevel().getLevel() >= LogLevel.DETAILED.getLevel()) { concern = new WriteConcern(1); } WriteResult result = null; if (concern != null) { result = m_data .getCollection() .update(updateQuery, insertUpdate, true, m_meta.getMulti(), concern); } else { result = m_data.getCollection().update(updateQuery, insertUpdate, true, m_meta.getMulti()); } CommandResult cmd = result.getLastError(); if (cmd != null && !cmd.ok()) { String message = cmd.getErrorMessage(); logError( BaseMessages.getString( PKG, "MongoDbOutput.Messages.Error.MongoReported", message)); try { cmd.throwOnError(); } catch (MongoException me) { throw new KettleException(me.getMessage(), me); } } } } } else { // straight insert DBObject mongoInsert = MongoDbOutputData.kettleRowToMongo( m_meta.getMongoFields(), getInputRowMeta(), row, this, m_mongoTopLevelStructure); if (mongoInsert != null) { m_batch.add(mongoInsert); } if (m_batch.size() == m_batchInsertSize) { logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.CommitingABatch")); doBatch(); } } } return true; }