private void saveEvents(Map<String, List<DBObject>> eventMap) { if (eventMap.isEmpty()) { logger.debug("eventMap is empty"); return; } for (String eventCollection : eventMap.keySet()) { List<DBObject> docs = eventMap.get(eventCollection); if (logger.isDebugEnabled()) { logger.debug("collection: {}, length: {}", eventCollection, docs.size()); } int separatorIndex = eventCollection.indexOf(NAMESPACE_SEPARATOR); String eventDb = eventCollection.substring(0, separatorIndex); String collectionName = eventCollection.substring(separatorIndex + 1); // Warning: please change the WriteConcern level if you need high datum consistence. CommandResult result = mongo .getDB(eventDb) .getCollection(collectionName) .insert(docs, WriteConcern.NORMAL) .getLastError(); if (result.ok()) { String errorMessage = result.getErrorMessage(); if (errorMessage != null) { logger.error("can't insert documents with error: {} ", errorMessage); logger.error("with exception", result.getException()); throw new MongoException(errorMessage); } } else { logger.error("can't get last error"); } } }
public String newDocument(InputStream in, String json) throws DocumentException { try { GridFS gridFS = new GridFS(dataBase); GridFSInputFile gridFSInputFile = gridFS.createFile(in); ObjectId objectId = (ObjectId) gridFSInputFile.getId(); String GUID = objectId.toStringMongod(); DBObject dbObject = (DBObject) JSON.parse(json); gridFSInputFile.setFilename((String) dbObject.get(NAME)); gridFSInputFile.setContentType((String) dbObject.get(CONTENT_TYPE)); gridFSInputFile.setMetaData(dbObject); gridFSInputFile.save(); CommandResult result = dataBase.getLastError(); if (!result.ok()) { throw new DocumentException(result.getErrorMessage()); } return GUID; } catch (Exception e) { log.error("newDocument error:" + e.getMessage()); e.printStackTrace(); throw new DocumentException(e.getMessage()); } }
protected void doBatch() throws KettleException { WriteConcern concern = null; if (log.getLogLevel().getLevel() >= LogLevel.DETAILED.getLevel()) { concern = new WriteConcern(1); } WriteResult result = null; if (concern != null) { result = m_data.getCollection().insert(m_batch, concern); } else { result = m_data.getCollection().insert(m_batch); } CommandResult cmd = result.getLastError(); if (cmd != null && !cmd.ok()) { String message = cmd.getErrorMessage(); logError(BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.MongoReported", message)); try { cmd.throwOnError(); } catch (MongoException me) { throw new KettleException(me.getMessage(), me); } } m_batch.clear(); }
public String newDocument(InputStream in, Map<String, String> properties) throws DocumentException { try { GridFS gridFS = new GridFS(dataBase); GridFSInputFile gridFSInputFile = gridFS.createFile(in); ObjectId id = (ObjectId) gridFSInputFile.getId(); String GUID = id.toStringMongod(); gridFSInputFile.setFilename(properties.get(DocumentConnector.NAME)); gridFSInputFile.setContentType(properties.get(DocumentConnector.CONTENT_TYPE)); gridFSInputFile.put(DocumentConnector.ID, properties.get(DocumentConnector.ID)); gridFSInputFile.put( DocumentConnector.DOCUMENT_TYPE, properties.get(DocumentConnector.DOCUMENT_TYPE)); gridFSInputFile.save(); CommandResult result = dataBase.getLastError(); if (!result.ok()) { throw new DocumentException(result.getErrorMessage()); } return GUID; } catch (Exception e) { log.error("newDocument error:" + e.getMessage()); e.printStackTrace(); throw new DocumentException(e.getMessage()); } }
public List<String> findDocuments(String id) throws DocumentException { try { GridFS gridFS = new GridFS(dataBase); String key = "metadata." + DocumentConnector.ID; BasicDBObject query = new BasicDBObject(key, id); List<GridFSDBFile> gridFSDBFiles = gridFS.find(query); CommandResult result = dataBase.getLastError(); if (!result.ok()) { throw new DocumentException(result.getErrorMessage()); } List<String> objects = new ArrayList<String>(); for (GridFSDBFile gridFSDBFile : gridFSDBFiles) { ObjectId objectId = (ObjectId) gridFSDBFile.getId(); objects.add(objectId.toStringMongod()); } return objects; } catch (Exception e) { log.error("findDocuments error:" + e.getMessage()); e.printStackTrace(); throw new DocumentException(e.getMessage()); } }
public String getDocument(String GUID, OutputStream out) throws DocumentException { try { GridFS gridFS = new GridFS(dataBase); ObjectId key = new ObjectId(GUID); GridFSDBFile gridFSDBFile = gridFS.find(key); if (gridFSDBFile == null) { throw new DocumentException("No existe el documento"); } if (out != null) gridFSDBFile.writeTo(out); CommandResult result = dataBase.getLastError(); if (!result.ok()) { throw new DocumentException(result.getErrorMessage()); } DBObject dbObject = gridFSDBFile.getMetaData(); return JSON.serialize(dbObject); } catch (Exception e) { log.error("getDocument error:" + e.getMessage()); e.printStackTrace(); throw new DocumentException(e.getMessage()); } }
@Test public void testGetLastError() { Fongo fongo = newFongo(); DB db = fongo.getDB("db"); DBCollection collection = db.getCollection("coll"); collection.insert(new BasicDBObject("_id", 1)); CommandResult error = db.getLastError(); assertTrue(error.ok()); }
public void deleteDocument(String GUID) throws DocumentException { try { GridFS gridFS = new GridFS(dataBase); ObjectId key = new ObjectId(GUID); gridFS.remove(key); CommandResult result = dataBase.getLastError(); if (!result.ok()) { throw new DocumentException(result.getErrorMessage()); } } catch (Exception e) { log.error("deleteDocument error:" + e.getMessage()); e.printStackTrace(); throw new DocumentException(e.getMessage()); } }
private void processAndTransferWriteResult(WriteResult result, Exchange exchange) { // if invokeGetLastError is set, or a WriteConcern is set which implicitly calls getLastError, // then we have the chance to populate // the MONGODB_LAST_ERROR header, as well as setting an exception on the Exchange if one // occurred at the MongoDB server if (endpoint.isInvokeGetLastError() || (endpoint.getWriteConcern() != null ? endpoint.getWriteConcern().callGetLastError() : false)) { CommandResult cr = result.getCachedLastError() == null ? result.getLastError() : result.getCachedLastError(); exchange.getOut().setHeader(MongoDbConstants.LAST_ERROR, cr); if (!cr.ok()) { exchange.setException(MongoDbComponent.wrapInCamelMongoDbException(cr.getException())); } } // determine where to set the WriteResult: as the OUT body or as an IN message header if (endpoint.isWriteResultAsHeader()) { exchange.getOut().setHeader(MongoDbConstants.WRITERESULT, result); } else { exchange.getOut().setBody(result); } }
public boolean processRow(StepMetaInterface smi, StepDataInterface sdi) throws KettleException { Object[] row = getRow(); if (row == null) { // no more output // check any remaining buffered objects if (m_batch != null && m_batch.size() > 0) { doBatch(); } // INDEXING - http://www.mongodb.org/display/DOCS/Indexes // Indexing is computationally expensive - it needs to be // done after all data is inserted and done in the BACKGROUND. // UNIQUE indexes (prevent duplicates on the // keys in the index) and SPARSE indexes (don't index docs that // don't have the key field) - current limitation is that SPARSE // indexes can only have a single field List<MongoDbOutputMeta.MongoIndex> indexes = m_meta.getMongoIndexes(); if (indexes != null && indexes.size() > 0) { logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.ApplyingIndexOpps")); m_data.applyIndexes(indexes, log, m_meta.getTruncate()); } disconnect(); setOutputDone(); return false; } if (first) { first = false; m_batchInsertSize = 100; String batchInsert = environmentSubstitute(m_meta.getBatchInsertSize()); if (!Const.isEmpty(batchInsert)) { m_batchInsertSize = Integer.parseInt(batchInsert); } m_batch = new ArrayList<DBObject>(m_batchInsertSize); // output the same as the input m_data.setOutputRowMeta(getInputRowMeta()); m_mongoTopLevelStructure = MongoDbOutputData.checkTopLevelConsistency(m_meta.m_mongoFields, this); if (m_mongoTopLevelStructure == MongoDbOutputData.MongoTopLevel.INCONSISTENT) { throw new KettleException( BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.InconsistentMongoTopLevel")); } // first check our incoming fields against our meta data for fields to insert RowMetaInterface rmi = getInputRowMeta(); List<MongoDbOutputMeta.MongoField> mongoFields = m_meta.getMongoFields(); List<String> notToBeInserted = new ArrayList<String>(); for (int i = 0; i < rmi.size(); i++) { ValueMetaInterface vm = rmi.getValueMeta(i); boolean ok = false; for (MongoDbOutputMeta.MongoField field : mongoFields) { String mongoMatch = environmentSubstitute(field.m_incomingFieldName); if (vm.getName().equals(mongoMatch)) { ok = true; break; } } if (!ok) { notToBeInserted.add(vm.getName()); } } if (notToBeInserted.size() == rmi.size()) { throw new KettleException( BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.NotInsertingAnyFields")); } if (notToBeInserted.size() > 0) { StringBuffer b = new StringBuffer(); for (String s : notToBeInserted) { b.append(s).append(" "); } logBasic( BaseMessages.getString(PKG, "MongoDbOutput.Messages.FieldsNotToBeInserted"), b.toString()); } // init mongo fields for (MongoDbOutputMeta.MongoField m : m_meta.getMongoFields()) { m.init(this); } // check truncate if (m_meta.getTruncate()) { try { logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.TruncatingCollection")); m_data.getCollection().drop(); // re-establish the collection String collection = environmentSubstitute(m_meta.getCollection()); m_data.createCollection(collection); m_data.setCollection(m_data.getDB().getCollection(collection)); } catch (Exception m) { disconnect(); throw new KettleException(m.getMessage(), m); } } } if (!isStopped()) { if (m_meta.getUpsert()) { /*DBObject updateQuery = MongoDbOutputData.getQueryObject(m_meta.getMongoFields(), getInputRowMeta(), row, getParentVariableSpace(), m_mongoTopLevelStructure); */ DBObject updateQuery = MongoDbOutputData.getQueryObject(m_meta.getMongoFields(), getInputRowMeta(), row, this); if (log.isDebug()) { logDebug( BaseMessages.getString( PKG, "MongoDbOutput.Messages.Debug.QueryForUpsert", updateQuery)); } if (updateQuery != null) { // i.e. we have some non-null incoming query field values DBObject insertUpdate = null; // get the record to update the match with if (!m_meta.getModifierUpdate()) { // complete record replace or insert insertUpdate = MongoDbOutputData.kettleRowToMongo( m_meta.getMongoFields(), getInputRowMeta(), row, this, m_mongoTopLevelStructure); } else { // specific field update or insert insertUpdate = MongoDbOutputData.getModifierUpdateObject( m_meta.getMongoFields(), getInputRowMeta(), row, this, m_mongoTopLevelStructure); if (log.isDebug()) { logDebug( BaseMessages.getString( PKG, "MongoDbOutput.Messages.Debug.ModifierUpdateObject", insertUpdate)); } } if (insertUpdate != null) { WriteConcern concern = null; if (log.getLogLevel().getLevel() >= LogLevel.DETAILED.getLevel()) { concern = new WriteConcern(1); } WriteResult result = null; if (concern != null) { result = m_data .getCollection() .update(updateQuery, insertUpdate, true, m_meta.getMulti(), concern); } else { result = m_data.getCollection().update(updateQuery, insertUpdate, true, m_meta.getMulti()); } CommandResult cmd = result.getLastError(); if (cmd != null && !cmd.ok()) { String message = cmd.getErrorMessage(); logError( BaseMessages.getString( PKG, "MongoDbOutput.Messages.Error.MongoReported", message)); try { cmd.throwOnError(); } catch (MongoException me) { throw new KettleException(me.getMessage(), me); } } } } } else { // straight insert DBObject mongoInsert = MongoDbOutputData.kettleRowToMongo( m_meta.getMongoFields(), getInputRowMeta(), row, this, m_mongoTopLevelStructure); if (mongoInsert != null) { m_batch.add(mongoInsert); } if (m_batch.size() == m_batchInsertSize) { logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.CommitingABatch")); doBatch(); } } } return true; }
protected boolean assignCollections() { DB adminDb = mongo.getDB(MongoDBRiver.MONGODB_ADMIN_DATABASE); oplogDb = mongo.getDB(MongoDBRiver.MONGODB_LOCAL_DATABASE); if (!definition.getMongoAdminUser().isEmpty() && !definition.getMongoAdminPassword().isEmpty()) { logger.info( "Authenticate {} with {}", MongoDBRiver.MONGODB_ADMIN_DATABASE, definition.getMongoAdminUser()); CommandResult cmd = adminDb.authenticateCommand( definition.getMongoAdminUser(), definition.getMongoAdminPassword().toCharArray()); if (!cmd.ok()) { logger.error( "Autenticatication failed for {}: {}", MongoDBRiver.MONGODB_ADMIN_DATABASE, cmd.getErrorMessage()); // Can still try with mongoLocal credential if provided. // return false; } oplogDb = adminDb.getMongo().getDB(MongoDBRiver.MONGODB_LOCAL_DATABASE); } if (!definition.getMongoLocalUser().isEmpty() && !definition.getMongoLocalPassword().isEmpty() && !oplogDb.isAuthenticated()) { logger.info( "Authenticate {} with {}", MongoDBRiver.MONGODB_LOCAL_DATABASE, definition.getMongoLocalUser()); CommandResult cmd = oplogDb.authenticateCommand( definition.getMongoLocalUser(), definition.getMongoLocalPassword().toCharArray()); if (!cmd.ok()) { logger.error( "Autenticatication failed for {}: {}", MongoDBRiver.MONGODB_LOCAL_DATABASE, cmd.getErrorMessage()); return false; } } Set<String> collections = oplogDb.getCollectionNames(); if (!collections.contains(MongoDBRiver.OPLOG_COLLECTION)) { logger.error( "Cannot find " + MongoDBRiver.OPLOG_COLLECTION + " collection. Please check this link: http://goo.gl/2x5IW"); return false; } oplogCollection = oplogDb.getCollection(MongoDBRiver.OPLOG_COLLECTION); slurpedDb = mongo.getDB(definition.getMongoDb()); if (!definition.getMongoAdminUser().isEmpty() && !definition.getMongoAdminPassword().isEmpty() && adminDb.isAuthenticated()) { slurpedDb = adminDb.getMongo().getDB(definition.getMongoDb()); } // Not necessary as local user has access to all databases. // http://docs.mongodb.org/manual/reference/local-database/ // if (!mongoDbUser.isEmpty() && !mongoDbPassword.isEmpty() // && !slurpedDb.isAuthenticated()) { // logger.info("Authenticate {} with {}", mongoDb, mongoDbUser); // CommandResult cmd = slurpedDb.authenticateCommand(mongoDbUser, // mongoDbPassword.toCharArray()); // if (!cmd.ok()) { // logger.error("Authentication failed for {}: {}", // mongoDb, cmd.getErrorMessage()); // return false; // } // } // slurpedCollection = // slurpedDb.getCollection(definition.getMongoCollection()); // if (definition.isImportAllCollections()) { // for (String collection : slurpedDb.getCollectionNames()) { // slurpedCollections.put(collection, // slurpedDb.getCollection(collection)); // } // } else { // slurpedCollections.put(definition.getMongoCollection(), // slurpedDb.getCollection(definition.getMongoCollection())); // } return true; }