Exemple #1
0
  public List<String> findDocuments(String id) throws DocumentException {

    try {
      GridFS gridFS = new GridFS(dataBase);
      String key = "metadata." + DocumentConnector.ID;
      BasicDBObject query = new BasicDBObject(key, id);
      List<GridFSDBFile> gridFSDBFiles = gridFS.find(query);

      CommandResult result = dataBase.getLastError();
      if (!result.ok()) {
        throw new DocumentException(result.getErrorMessage());
      }

      List<String> objects = new ArrayList<String>();
      for (GridFSDBFile gridFSDBFile : gridFSDBFiles) {
        ObjectId objectId = (ObjectId) gridFSDBFile.getId();
        objects.add(objectId.toStringMongod());
      }

      return objects;
    } catch (Exception e) {
      log.error("findDocuments error:" + e.getMessage());
      e.printStackTrace();
      throw new DocumentException(e.getMessage());
    }
  }
  public WriteResult say(DB db, OutMessage m, DB.WriteConcern concern) throws MongoException {
    MyPort mp = _threadPort.get();
    DBPort port = mp.get(true);
    port.checkAuth(db);

    try {
      port.say(m);
      if (concern == DB.WriteConcern.STRICT) {
        return _checkWriteError(mp, port);
      } else {
        mp.done(port);
        return new WriteResult(db, port);
      }
    } catch (IOException ioe) {
      mp.error(ioe);
      _error(ioe);
      if (concern == DB.WriteConcern.NONE) {
        CommandResult res = new CommandResult();
        res.put("ok", false);
        res.put("$err", "NETWORK ERROR");
        return new WriteResult(res);
      }
      throw new MongoException.Network("can't say something", ioe);
    } catch (MongoException me) {
      throw me;
    } catch (RuntimeException re) {
      mp.error(re);
      throw re;
    }
  }
 /**
  * Forces the master server to fsync the RAM data to disk, then lock all writes. The database will
  * be read-only after this command returns.
  *
  * @return result of the command execution
  * @throws MongoException
  * @mongodb.driver.manual reference/command/fsync/ fsync command
  */
 public CommandResult fsyncAndLock() {
   DBObject cmd = new BasicDBObject("fsync", 1);
   cmd.put("lock", 1);
   CommandResult result = getDB(ADMIN_DATABASE_NAME).command(cmd);
   result.throwOnError();
   return result;
 }
Exemple #4
0
  public String getDocument(String GUID, OutputStream out) throws DocumentException {

    try {
      GridFS gridFS = new GridFS(dataBase);

      ObjectId key = new ObjectId(GUID);
      GridFSDBFile gridFSDBFile = gridFS.find(key);

      if (gridFSDBFile == null) {
        throw new DocumentException("No existe el documento");
      }
      if (out != null) gridFSDBFile.writeTo(out);
      CommandResult result = dataBase.getLastError();
      if (!result.ok()) {
        throw new DocumentException(result.getErrorMessage());
      }

      DBObject dbObject = gridFSDBFile.getMetaData();
      return JSON.serialize(dbObject);
    } catch (Exception e) {
      log.error("getDocument error:" + e.getMessage());
      e.printStackTrace();
      throw new DocumentException(e.getMessage());
    }
  }
Exemple #5
0
  public String newDocument(InputStream in, Map<String, String> properties)
      throws DocumentException {

    try {
      GridFS gridFS = new GridFS(dataBase);

      GridFSInputFile gridFSInputFile = gridFS.createFile(in);
      ObjectId id = (ObjectId) gridFSInputFile.getId();
      String GUID = id.toStringMongod();

      gridFSInputFile.setFilename(properties.get(DocumentConnector.NAME));
      gridFSInputFile.setContentType(properties.get(DocumentConnector.CONTENT_TYPE));
      gridFSInputFile.put(DocumentConnector.ID, properties.get(DocumentConnector.ID));
      gridFSInputFile.put(
          DocumentConnector.DOCUMENT_TYPE, properties.get(DocumentConnector.DOCUMENT_TYPE));
      gridFSInputFile.save();
      CommandResult result = dataBase.getLastError();
      if (!result.ok()) {
        throw new DocumentException(result.getErrorMessage());
      }

      return GUID;
    } catch (Exception e) {
      log.error("newDocument error:" + e.getMessage());
      e.printStackTrace();
      throw new DocumentException(e.getMessage());
    }
  }
Exemple #6
0
  public String newDocument(InputStream in, String json) throws DocumentException {

    try {
      GridFS gridFS = new GridFS(dataBase);

      GridFSInputFile gridFSInputFile = gridFS.createFile(in);
      ObjectId objectId = (ObjectId) gridFSInputFile.getId();
      String GUID = objectId.toStringMongod();

      DBObject dbObject = (DBObject) JSON.parse(json);

      gridFSInputFile.setFilename((String) dbObject.get(NAME));
      gridFSInputFile.setContentType((String) dbObject.get(CONTENT_TYPE));
      gridFSInputFile.setMetaData(dbObject);
      gridFSInputFile.save();
      CommandResult result = dataBase.getLastError();
      if (!result.ok()) {
        throw new DocumentException(result.getErrorMessage());
      }

      return GUID;
    } catch (Exception e) {
      log.error("newDocument error:" + e.getMessage());
      e.printStackTrace();
      throw new DocumentException(e.getMessage());
    }
  }
Exemple #7
0
  /**
   * Executes a database command.
   *
   * @param cmd {@code DBObject} representation the command to be executed
   * @param options query options to use
   * @param readPrefs {@link ReadPreference} for this command (nodes selection is the biggest part
   *     of this)
   * @param encoder {@link DBEncoder} to be used for command encoding
   * @return result of the command execution
   * @throws MongoException
   * @dochub commands
   */
  public CommandResult command(
      DBObject cmd, int options, ReadPreference readPrefs, DBEncoder encoder) {
    readPrefs = getCommandReadPreference(cmd, readPrefs);
    cmd = wrapCommand(cmd, readPrefs);

    Iterator<DBObject> i =
        getCollection("$cmd")
            .__find(
                cmd,
                new BasicDBObject(),
                0,
                -1,
                0,
                options,
                readPrefs,
                DefaultDBDecoder.FACTORY.create(),
                encoder);
    if (i == null || !i.hasNext()) return null;

    DBObject res = i.next();
    ServerAddress sa = (i instanceof Result) ? ((Result) i).getServerAddress() : null;
    CommandResult cr = new CommandResult(sa);
    cr.putAll(res);
    return cr;
  }
  private void saveEvents(Map<String, List<DBObject>> eventMap) {
    if (eventMap.isEmpty()) {
      logger.debug("eventMap is empty");
      return;
    }

    for (String eventCollection : eventMap.keySet()) {
      List<DBObject> docs = eventMap.get(eventCollection);
      if (logger.isDebugEnabled()) {
        logger.debug("collection: {}, length: {}", eventCollection, docs.size());
      }
      int separatorIndex = eventCollection.indexOf(NAMESPACE_SEPARATOR);
      String eventDb = eventCollection.substring(0, separatorIndex);
      String collectionName = eventCollection.substring(separatorIndex + 1);

      // Warning: please change the WriteConcern level if you need high datum consistence.
      CommandResult result =
          mongo
              .getDB(eventDb)
              .getCollection(collectionName)
              .insert(docs, WriteConcern.NORMAL)
              .getLastError();
      if (result.ok()) {
        String errorMessage = result.getErrorMessage();
        if (errorMessage != null) {
          logger.error("can't insert documents with error: {} ", errorMessage);
          logger.error("with exception", result.getException());

          throw new MongoException(errorMessage);
        }
      } else {
        logger.error("can't get last error");
      }
    }
  }
 protected void throwOnError(WriteConcern wc, WriteResult wr) {
   if (wc == null && wr.getLastConcern() == null) {
     CommandResult cr = wr.getLastError();
     if (cr != null && cr.getErrorMessage() != null && cr.getErrorMessage().length() > 0)
       cr.throwOnError();
   }
 }
  protected void doBatch() throws KettleException {
    WriteConcern concern = null;

    if (log.getLogLevel().getLevel() >= LogLevel.DETAILED.getLevel()) {
      concern = new WriteConcern(1);
    }
    WriteResult result = null;

    if (concern != null) {
      result = m_data.getCollection().insert(m_batch, concern);
    } else {
      result = m_data.getCollection().insert(m_batch);
    }

    CommandResult cmd = result.getLastError();

    if (cmd != null && !cmd.ok()) {
      String message = cmd.getErrorMessage();
      logError(BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.MongoReported", message));
      try {
        cmd.throwOnError();
      } catch (MongoException me) {
        throw new KettleException(me.getMessage(), me);
      }
    }

    m_batch.clear();
  }
Exemple #11
0
 @Test
 public void testUndefinedCommand() {
   Fongo fongo = newFongo();
   DB db = fongo.getDB("db");
   CommandResult result = db.command("undefined");
   assertEquals("ok should always be defined", false, result.get("ok"));
   assertEquals("undefined command: { \"undefined\" : true}", result.get("err"));
 }
Exemple #12
0
 @Test
 public void testGetLastError() {
   Fongo fongo = newFongo();
   DB db = fongo.getDB("db");
   DBCollection collection = db.getCollection("coll");
   collection.insert(new BasicDBObject("_id", 1));
   CommandResult error = db.getLastError();
   assertTrue(error.ok());
 }
Exemple #13
0
 /**
  * Creates a collection with a given name and options. If the collection does not exist, a new
  * collection is created.
  *
  * <p>Possible options:
  *
  * <ul>
  *   <li><b>capped</b> ({@code boolean}) - Enables a collection cap. False by default. If enabled,
  *       you must specify a size parameter.
  *   <li><b>size</b> ({@code int}) - If capped is true, size specifies a maximum size in bytes for
  *       the capped collection. When capped is false, you may use size to preallocate space.
  *   <li><b>max</b> ({@code int}) - Optional. Specifies a maximum "cap" in number of documents for
  *       capped collections. You must also specify size when specifying max.
  *       <p>
  * </ul>
  *
  * <p>Note that if the {@code options} parameter is {@code null}, the creation will be deferred to
  * when the collection is written to.
  *
  * @param name the name of the collection to return
  * @param options options
  * @return the collection
  * @throws MongoException
  */
 public DBCollection createCollection(String name, DBObject options) {
   if (options != null) {
     DBObject createCmd = new BasicDBObject("create", name);
     createCmd.putAll(options);
     CommandResult result = command(createCmd);
     result.throwOnError();
   }
   return getCollection(name);
 }
 /**
  * Forces the master server to fsync the RAM data to disk This is done automatically by the server
  * at intervals, but can be forced for better reliability.
  *
  * @param async if true, the fsync will be done asynchronously on the server.
  * @return result of the command execution
  * @throws MongoException
  * @mongodb.driver.manual reference/command/fsync/ fsync command
  */
 public CommandResult fsync(boolean async) {
   DBObject cmd = new BasicDBObject("fsync", 1);
   if (async) {
     cmd.put("async", 1);
   }
   CommandResult result = getDB(ADMIN_DATABASE_NAME).command(cmd);
   result.throwOnError();
   return result;
 }
Exemple #15
0
 @Override
 protected void refreshNode() {
   // db.getStats can be slow..
   // can't use driver's because doesnt use slaveOk
   CommandResult res = db.command(new BasicDBObject("dbstats", 1), db.getMongo().getOptions());
   //        CommandResult res = db.command(new BasicDBObject("profile", -1));
   res.throwOnError();
   stats = res;
   //        db.getCollection("foo").save(new BasicDBObject("a", 1));
 }
 public static String importRecord(InputStream inputFile, DBCollection collection) {
   XMLSerializer xmlSerializer = new XMLSerializer();
   JSON jsonDocument = xmlSerializer.readFromStream(inputFile);
   DBObject bson = (DBObject) com.mongodb.util.JSON.parse(jsonDocument.toString());
   CommandResult result = collection.insert(bson).getLastError();
   if (result != null) {
     return result.getErrorMessage();
   }
   return null;
 }
  private CommandResult convertToCommandResult(DBObject cmd, Response res) {
    if (res.size() == 0) return null;
    if (res.size() > 1) throw new MongoInternalException("something is wrong.  size:" + res.size());

    DBObject data = res.get(0);
    if (data == null) throw new MongoInternalException("something is wrong, no command result");

    CommandResult cr = new CommandResult(cmd, res.serverUsed());
    cr.putAll(data);
    return cr;
  }
Exemple #18
0
 @Test
 public void testCountCommand() {
   DBObject countCmd = new BasicDBObject("count", "coll");
   Fongo fongo = newFongo();
   DB db = fongo.getDB("db");
   DBCollection coll = db.getCollection("coll");
   coll.insert(new BasicDBObject());
   coll.insert(new BasicDBObject());
   CommandResult result = db.command(countCmd);
   assertEquals("The command should have been succesful", true, result.get("ok"));
   assertEquals("The count should be in the result", 2L, result.get("n"));
 }
 @Test
 public void shouldTimeOutCommand() {
   checkServerVersion(2.5);
   enableMaxTimeFailPoint();
   try {
     CommandResult res =
         getDatabase().command(new BasicDBObject("isMaster", 1).append("maxTimeMS", 1));
     res.throwOnError();
     fail("Show have thrown");
   } catch (MongoExecutionTimeoutException e) {
     assertEquals(50, e.getCode());
   } finally {
     disableMaxTimeFailPoint();
   }
 }
  WriteResult _checkWriteError(MyPort mp, DBPort port) throws MongoException {

    CommandResult e = _mongo.getDB("admin").getLastError();
    mp.done(port);

    Object foo = e.get("err");
    if (foo == null) return new WriteResult(e);

    int code = -1;
    if (e.get("code") instanceof Number) code = ((Number) e.get("code")).intValue();
    String s = foo.toString();
    if (code == 11000 || code == 11001 || s.startsWith("E11000") || s.startsWith("E11001"))
      throw new MongoException.DuplicateKey(code, s);
    throw new MongoException(code, s);
  }
Exemple #21
0
  public void deleteDocument(String GUID) throws DocumentException {

    try {
      GridFS gridFS = new GridFS(dataBase);

      ObjectId key = new ObjectId(GUID);
      gridFS.remove(key);
      CommandResult result = dataBase.getLastError();
      if (!result.ok()) {
        throw new DocumentException(result.getErrorMessage());
      }
    } catch (Exception e) {
      log.error("deleteDocument error:" + e.getMessage());
      e.printStackTrace();
      throw new DocumentException(e.getMessage());
    }
  }
  void checkAuth(DB db) throws IOException {
    DB.AuthorizationCredentials credentials = db.getAuthorizationCredentials();
    if (credentials == null) {
      if (db._name.equals("admin")) return;
      checkAuth(db._mongo.getDB("admin"));
      return;
    }
    if (_authed.containsKey(db)) return;

    CommandResult res = runCommand(db, credentials.getNonceCommand());
    res.throwOnError();

    res = runCommand(db, credentials.getAuthCommand(res.getString("nonce")));
    res.throwOnError();

    _authed.put(db, true);
  }
 @Override
 public List<DependencyStatus> status() {
   // note failures are tested manually for now, if you make changes test
   // things still work
   // TODO TEST add tests exercising failures
   final String version;
   try {
     final CommandResult bi = gfs.getDB().command("buildInfo");
     version = bi.getString("version");
   } catch (MongoException e) {
     LoggerFactory.getLogger(getClass()).error("Failed to connect to MongoDB", e);
     return Arrays.asList(
         new DependencyStatus(
             false, "Couldn't connect to MongoDB: " + e.getMessage(), "GridFS", "Unknown"));
   }
   return Arrays.asList(new DependencyStatus(true, "OK", "GridFS", version));
 }
  @Test
  @SuppressWarnings("unchecked")
  public void verifySetMembers() throws Exception {

    final Mongo mongo = new Mongo(new DBAddress("127.0.0.1", 27017, "admin"));

    final CommandResult result =
        mongo.getDB("admin").command(new BasicDBObject("replSetGetStatus", 1));

    final List<BasicDBObject> members = (List<BasicDBObject>) result.get("members");

    assertEquals(3, members.size());

    for (final BasicDBObject member : members) {
      // System.out.println(member);
    }
  }
  /**
   * Gets a list of the names of all databases on the connected server.
   *
   * @return list of database names
   * @throws MongoException
   */
  public List<String> getDatabaseNames() {

    BasicDBObject cmd = new BasicDBObject();
    cmd.put("listDatabases", 1);

    CommandResult res = getDB(ADMIN_DATABASE_NAME).command(cmd, getOptions());
    res.throwOnError();

    List l = (List) res.get("databases");

    List<String> list = new ArrayList<String>();

    for (Object o : l) {
      list.add(((BasicDBObject) o).getString("name"));
    }
    return list;
  }
  /**
   * Imports the sample dataset (zips.json) if necessary (e.g. if it doen't exist yet). The dataset
   * can originally be found on the mongodb aggregation framework example website:
   *
   * @see http://docs.mongodb.org/manual/tutorial/aggregation-examples/.
   */
  private void initSampleDataIfNecessary() {

    if (!initialized) {

      CommandResult result = mongoTemplate.executeCommand("{ buildInfo: 1 }");
      Object version = result.get("version");
      LOGGER.debug("Server uses MongoDB Version: {}", version);

      mongoTemplate.dropCollection(ZipInfo.class);
      mongoTemplate.execute(
          ZipInfo.class,
          new CollectionCallback<Void>() {

            @Override
            public Void doInCollection(DBCollection collection)
                throws MongoException, DataAccessException {

              Scanner scanner = null;
              try {
                scanner =
                    new Scanner(
                        new BufferedInputStream(
                            new ClassPathResource("zips.json").getInputStream()));
                while (scanner.hasNextLine()) {
                  String zipInfoRecord = scanner.nextLine();
                  collection.save((DBObject) JSON.parse(zipInfoRecord));
                }
              } catch (Exception e) {
                if (scanner != null) {
                  scanner.close();
                }
                throw new RuntimeException("Could not load mongodb sample dataset!", e);
              }

              return null;
            }
          });

      long count = mongoTemplate.count(new Query(), ZipInfo.class);
      assertThat(count, is(29467L));

      initialized = true;
    }
  }
  protected <T> void ensureIndex(
      Class<T> clazz,
      String name,
      BasicDBObject fields,
      boolean unique,
      boolean dropDupsOnCreate,
      boolean background,
      boolean sparse) {
    // validate field names and translate them to the stored values
    BasicDBObject keys = new BasicDBObject();
    for (Entry<String, Object> entry : fields.entrySet()) {
      StringBuffer sb = new StringBuffer(entry.getKey());
      Mapper.validate(clazz, mapr, sb, FilterOperator.IN, "", true, false);
      keys.put(sb.toString(), entry.getValue());
    }

    BasicDBObjectBuilder keyOpts = new BasicDBObjectBuilder();
    if (name != null && name.length() > 0) {
      keyOpts.add("name", name);
    }
    if (unique) {
      keyOpts.add("unique", true);
      if (dropDupsOnCreate) keyOpts.add("dropDups", true);
    }

    if (background) keyOpts.add("background", true);
    if (sparse) keyOpts.add("sparse", true);

    DBCollection dbColl = getCollection(clazz);

    BasicDBObject opts = (BasicDBObject) keyOpts.get();
    if (opts.isEmpty()) {
      log.debug("Ensuring index for " + dbColl.getName() + " with keys:" + keys);
      dbColl.ensureIndex(keys);
    } else {
      log.debug(
          "Ensuring index for " + dbColl.getName() + " with keys:" + keys + " and opts:" + opts);
      dbColl.ensureIndex(keys, opts);
    }

    // TODO: remove this once using 2.4 driver does this in ensureIndex
    CommandResult cr = dbColl.getDB().getLastError();
    cr.throwOnError();
  }
  private void setup() throws UnknownHostException {
    if (isInitialized()) {
      mongoClient = new MongoClient(mongoUri);
    }

    try {
      CommandResult res = mongoClient.getDB("admin").command("buildInfo");
      Object _version = res.get("version");

      if (_version != null && _version instanceof String) {
        serverVersion = (String) _version;
      } else {
        LOGGER.warn("Cannot get the MongoDb version.");
        serverVersion = "3.x?";
      }
    } catch (Throwable t) {
      LOGGER.warn("Cannot get the MongoDb version.");
      serverVersion = "?";
    }
  }
  private void processAndTransferWriteResult(WriteResult result, Exchange exchange) {
    // if invokeGetLastError is set, or a WriteConcern is set which implicitly calls getLastError,
    // then we have the chance to populate
    // the MONGODB_LAST_ERROR header, as well as setting an exception on the Exchange if one
    // occurred at the MongoDB server
    if (endpoint.isInvokeGetLastError()
        || (endpoint.getWriteConcern() != null
            ? endpoint.getWriteConcern().callGetLastError()
            : false)) {
      CommandResult cr =
          result.getCachedLastError() == null ? result.getLastError() : result.getCachedLastError();
      exchange.getOut().setHeader(MongoDbConstants.LAST_ERROR, cr);
      if (!cr.ok()) {
        exchange.setException(MongoDbComponent.wrapInCamelMongoDbException(cr.getException()));
      }
    }

    // determine where to set the WriteResult: as the OUT body or as an IN message header
    if (endpoint.isWriteResultAsHeader()) {
      exchange.getOut().setHeader(MongoDbConstants.WRITERESULT, result);
    } else {
      exchange.getOut().setBody(result);
    }
  }
  /** 초기 데이터를 로드합니다. */
  private void initData() {
    collectionList.clear();

    try {
      DB mongoDB = MongoDBConnection.connection(userDB);

      for (String col : mongoDB.getCollectionNames()) {

        CommandResult commandResult = mongoDB.getCollection(col).getStats();
        // logger.debug(commandResult);

        MongoDBCollectionInfoDTO info = new MongoDBCollectionInfoDTO();
        info.setName(col);

        try {
          info.setCount(commandResult.getInt("count")); // $NON-NLS-1$
          info.setSize(commandResult.getInt("size")); // $NON-NLS-1$
          info.setStorage(commandResult.getInt("storageSize")); // $NON-NLS-1$
          info.setIndex(commandResult.getInt("totalIndexSize")); // $NON-NLS-1$
          info.setAvgObj(commandResult.getDouble("avgObjSize")); // $NON-NLS-1$
          info.setPadding(commandResult.getInt("paddingFactor")); // $NON-NLS-1$
        } catch (Exception e) {
          logger.error("collection info error [" + col + "]", e); // $NON-NLS-1$ //$NON-NLS-2$
        }
        collectionList.add(info);
      }
      treeViewerCollections.setInput(collectionList);

      // summary 정보를 표시합니다.
      double dblSize = 0, dblStorage = 0, dblIndex = 0;
      for (MongoDBCollectionInfoDTO info : collectionList) {
        dblSize += info.getSize();
        dblStorage += info.getStorage();
        dblIndex += info.getIndex();
      }
      lblCollection.setText(collectionList.size() + " Collections"); // $NON-NLS-1$
      lblSizes.setText("Size " + NumberFormatUtils.kbMbFormat(dblSize)); // $NON-NLS-1$
      lblStorages.setText("Storage " + NumberFormatUtils.kbMbFormat(dblStorage)); // $NON-NLS-1$
      lblIndex.setText("Index " + NumberFormatUtils.kbMbFormat(dblIndex)); // $NON-NLS-1$

    } catch (Exception e) {
      logger.error("mongodb collection infomtion init", e); // $NON-NLS-1$

      Status errStatus =
          new Status(IStatus.ERROR, Activator.PLUGIN_ID, e.getMessage(), e); // $NON-NLS-1$
      ExceptionDetailsErrorDialog.openError(
          null, "Error", "MongoDB Information", errStatus); // $NON-NLS-1$ //$NON-NLS-2$
    }
  }