@Override
  public Object execute() throws Exception {

    MongoDatabase adminDatabase = mongoService.getMongoClient().getDatabase(database);
    Document document = adminDatabase.runCommand(new Document("dbStats", 1));
    System.out.println(document.toJson(new JsonWriterSettings(true)));

    return null;
  }
示例#2
0
    @Override
    public long getEstimatedSizeBytes(PipelineOptions pipelineOptions) {
      MongoClient mongoClient = new MongoClient(new MongoClientURI(spec.uri()));
      MongoDatabase mongoDatabase = mongoClient.getDatabase(spec.database());

      // get the Mongo collStats object
      // it gives the size for the entire collection
      BasicDBObject stat = new BasicDBObject();
      stat.append("collStats", spec.collection());
      Document stats = mongoDatabase.runCommand(stat);
      return Long.valueOf(stats.get("size").toString());
    }
示例#3
0
    @Override
    public List<BoundedSource<Document>> splitIntoBundles(
        long desiredBundleSizeBytes, PipelineOptions options) {
      MongoClient mongoClient = new MongoClient(new MongoClientURI(spec.uri()));
      MongoDatabase mongoDatabase = mongoClient.getDatabase(spec.database());

      List<Document> splitKeys;
      if (spec.numSplits() > 0) {
        // the user defines his desired number of splits
        // calculate the batch size
        long estimatedSizeBytes = getEstimatedSizeBytes(options);
        desiredBundleSizeBytes = estimatedSizeBytes / spec.numSplits();
      }

      // the desired batch size is small, using default chunk size of 1MB
      if (desiredBundleSizeBytes < 1024 * 1024) {
        desiredBundleSizeBytes = 1 * 1024 * 1024;
      }

      // now we have the batch size (provided by user or provided by the runner)
      // we use Mongo splitVector command to get the split keys
      BasicDBObject splitVectorCommand = new BasicDBObject();
      splitVectorCommand.append("splitVector", spec.database() + "." + spec.collection());
      splitVectorCommand.append("keyPattern", new BasicDBObject().append("_id", 1));
      splitVectorCommand.append("force", false);
      // maxChunkSize is the Mongo partition size in MB
      LOGGER.debug("Splitting in chunk of {} MB", desiredBundleSizeBytes / 1024 / 1024);
      splitVectorCommand.append("maxChunkSize", desiredBundleSizeBytes / 1024 / 1024);
      Document splitVectorCommandResult = mongoDatabase.runCommand(splitVectorCommand);
      splitKeys = (List<Document>) splitVectorCommandResult.get("splitKeys");

      List<BoundedSource<Document>> sources = new ArrayList<>();
      if (splitKeys.size() < 1) {
        LOGGER.debug("Split keys is low, using an unique source");
        sources.add(this);
        return sources;
      }

      LOGGER.debug("Number of splits is {}", splitKeys.size());
      for (String shardFilter : splitKeysToFilters(splitKeys, spec.filter())) {
        sources.add(new BoundedMongoDbSource(spec.withFilter(shardFilter)));
      }

      return sources;
    }
  /**
   * Return Stats of a particular Database in mongo to which user is connected to.
   *
   * @param dbName Name of Database
   * @return Array of JSON Objects each containing a key value pair in Db Stats.
   * @throws JSONException While parsing JSON
   * @throws DatabaseException Error while performing this operation
   * @throws ValidationException throw super type of EmptyDatabaseNameException
   */
  public JSONArray getDbStats(String dbName)
      throws DatabaseException, ValidationException, JSONException {
    if (dbName == null) {
      throw new DatabaseException(ErrorCodes.DB_NAME_EMPTY, "Database name is null");
    }
    if (dbName.equals("")) {
      throw new DatabaseException(ErrorCodes.DB_NAME_EMPTY, "Database Name Empty");
    }

    JSONArray dbStats = new JSONArray();
    try {
      List<String> dbList = getDbList();
      boolean dbPresent = dbList.contains(dbName);
      if (!dbPresent) {
        throw new DatabaseException(
            ErrorCodes.DB_DOES_NOT_EXISTS, "DB with name '" + dbName + "'  DOES NOT EXIST");
      }

      MongoDatabase db = mongoInstance.getDatabase(dbName);
      Document stats = db.runCommand(new Document("dbStats", "1"));

      Set<String> keys = stats.keySet();

      Iterator<String> keyIterator = keys.iterator();

      while (keyIterator.hasNext()) {
        JSONObject temp = new JSONObject();
        String key = keyIterator.next();
        temp.put("Key", key);
        String value = stats.get(key).toString();
        temp.put("Value", value);
        String type = stats.get(key).getClass().toString();
        temp.put("Type", type.substring(type.lastIndexOf('.') + 1));
        dbStats.put(temp);
      }
    } catch (MongoException m) {
      throw new DatabaseException(ErrorCodes.GET_DB_STATS_EXCEPTION, m.getMessage());
    }

    return dbStats;
  }
示例#5
0
  public void ensureSchema() {
    // ensures collections and indexes
    MongoCollection<Document> snapshots = snapshotsCollection();
    snapshots.createIndex(new BasicDBObject(GLOBAL_ID_KEY, ASC));
    snapshots.createIndex(new BasicDBObject(GLOBAL_ID_VALUE_OBJECT, ASC));
    snapshots.createIndex(new BasicDBObject(GLOBAL_ID_OWNER_ID_ENTITY, ASC));
    snapshots.createIndex(new BasicDBObject(CHANGED_PROPERTIES, ASC));
    snapshots.createIndex(
        new BasicDBObject(COMMIT_PROPERTIES + ".key", ASC)
            .append(COMMIT_PROPERTIES + ".value", ASC));

    headCollection();

    // schema migration script from JaVers 2.0 to 2.1
    dropIndexIfExists(snapshots, GLOBAL_ID_ENTITY);

    // schema migration script from JaVers 1.1 to 1.2
    Document doc = snapshots.find().first();
    if (doc != null) {
      Object stringCommitId = ((Map) doc.get("commitMetadata")).get("id");
      if (stringCommitId instanceof String) {
        logger.info("executing db migration script, from JaVers 1.1 to 1.2 ...");

        Document update =
            new Document(
                "eval",
                "function() { \n"
                    + "    db.jv_snapshots.find().forEach( \n"
                    + "      function(snapshot) { \n"
                    + "        snapshot.commitMetadata.id = Number(snapshot.commitMetadata.id); \n"
                    + "        db.jv_snapshots.save(snapshot); } \n"
                    + "    ); "
                    + "    return 'ok'; \n"
                    + "}");

        Document ret = mongo.runCommand(update);
        logger.info("result: \n " + ret.toJson());
      }
    }
  }