/**
   * @param dbName
   * @param collName
   * @param documentId
   * @param content
   * @param requestEtag
   * @param patching
   * @return the HttpStatus code
   */
  @Override
  public OperationResult upsertDocument(
      String dbName,
      String collName,
      Object documentId,
      DBObject content,
      ObjectId requestEtag,
      boolean patching) {
    DB db = client.getDB(dbName);

    DBCollection coll = db.getCollection(collName);

    ObjectId newEtag = new ObjectId();

    if (content == null) {
      content = new BasicDBObject();
    }

    content.put("_etag", newEtag);

    BasicDBObject idQuery = new BasicDBObject("_id", documentId);

    if (patching) {
      DBObject oldDocument =
          coll.findAndModify(
              idQuery, null, null, false, new BasicDBObject("$set", content), false, false);

      if (oldDocument == null) {
        return new OperationResult(HttpStatus.SC_NOT_FOUND);
      } else {
        // check the old etag (in case restore the old document version)
        return optimisticCheckEtag(coll, oldDocument, newEtag, requestEtag, HttpStatus.SC_OK);
      }
    } else {
      // we use findAndModify to get the @created_on field value from the existing document
      // in case this is an update well need to upsertDocument it back using a second update
      // it is not possible to do it with a single update
      // (even using $setOnInsert update because we'll need to use the $set operator for other data
      // and this would make it a partial update (patch semantic)
      DBObject oldDocument = coll.findAndModify(idQuery, null, null, false, content, false, true);

      if (oldDocument != null) { // upsertDocument
        // check the old etag (in case restore the old document)
        return optimisticCheckEtag(coll, oldDocument, newEtag, requestEtag, HttpStatus.SC_OK);
      } else { // insert
        return new OperationResult(HttpStatus.SC_CREATED, newEtag);
      }
    }
  }
示例#2
0
  @Override
  public String ActualizarJson(String nombreDB, String json, int clienteId)
      throws UnknownHostException {
    // TODO Auto-generated method stub

    if (!ExisteCliente(nombreDB, clienteId)) {
      //        	System.out.println("********************\n");
      //        	System.out.println("No existe el cliente, no se puede actualizar");
      //        	System.out.println("********************\n");
      return "No existe el cliente, no se puede actualizar";
    }
    MongoClient mongoClient = new MongoClient("localhost", 27017);
    DB base = mongoClient.getDB(nombreDB);
    DBCollection collection = base.getCollection("Json");

    BasicDBObject document = new BasicDBObject();
    DBObject object = (DBObject) JSON.parse(json);
    document.put("id", clienteId);
    document.put("json", object);

    BasicDBObject query = new BasicDBObject().append("id", clienteId);

    collection.findAndModify(query, document);
    return "Cliente actualizado";
  }
  public <T> T findAndDelete(Query<T> query) {
    DBCollection dbColl = ((QueryImpl<T>) query).getCollection();
    // TODO remove this after testing.
    if (dbColl == null) dbColl = getCollection(((QueryImpl<T>) query).getEntityClass());

    QueryImpl<T> qi = ((QueryImpl<T>) query);
    EntityCache cache = createCache();

    if (log.isTraceEnabled())
      log.trace("Executing findAndModify(" + dbColl.getName() + ") with delete ...");

    DBObject result =
        dbColl.findAndModify(
            qi.getQueryObject(),
            qi.getFieldsObject(),
            qi.getSortObject(),
            true,
            null,
            false,
            false);

    if (result != null) {
      T entity = (T) mapr.fromDBObject(qi.getEntityClass(), result, cache);
      return entity;
    }

    return null;
  }
示例#4
0
  /**
   * 获取表"collectionName"的主键id
   *
   * @param collectionName
   * @return
   */
  public Long getIndexIdFromMongo(String collectionName) {

    long indexNum = 0;
    try {
      // 主键存储表名:index
      DBCollection collection = getCollection("TableName.INDEX_TABLE_NAME");
      // 获取某个应用对象存储表的表名
      BasicDBObject oldDB = new BasicDBObject("name", collectionName);
      BasicDBObject update = new BasicDBObject("$inc", new BasicDBObject("id", 1));
      DBObject reFields = new BasicDBObject();
      reFields.put("id", "id");

      // 自增主键id
      DBObject result = collection.findAndModify(oldDB, reFields, null, false, update, true, true);
      if (result.get("id") == null) {
        throw new RuntimeException("获取主键id异常,请重试!!!");
      }
      indexNum = Long.parseLong(result.get("id").toString());
      if (indexNum <= 0) {
        throw new RuntimeException("获取主键id异常,请重试!!!");
      }
    } catch (Exception e) {
      logger.error("BaseMongoDAO.countRecord查询数据条数时发生异常,入参collectionName=" + collectionName, e);
    }
    return indexNum;
  }
示例#5
0
  @Test
  public void testFindAndModifyRemove() {
    DBCollection collection = newCollection();
    collection.insert(new BasicDBObject("_id", 1).append("a", 1));
    DBObject result =
        collection.findAndModify(new BasicDBObject("_id", 1), null, null, true, null, false, false);

    assertEquals(new BasicDBObject("_id", 1).append("a", 1), result);
    assertEquals(null, collection.findOne());
  }
 private static int getRange(String id, int range, DBCollection collection) {
   DBObject doc =
       collection.findAndModify(
           new BasicDBObject("_id", id),
           null,
           null,
           false,
           new BasicDBObject("$inc", new BasicDBObject("counter", range)),
           true,
           true);
   return (Integer) doc.get("counter") - range + 1;
 }
示例#7
0
 public Integer nextId(String nameSpace) {
   return (Integer)
       collection
           .findAndModify(
               new BasicDBObject(MongoKey._id, nameSpace),
               seqField,
               MongoKey.NO_SORT,
               false,
               incSeq,
               true,
               true)
           .get(FIELD);
 }
示例#8
0
  @Test
  public void testFindAndModifyUpsertReturnNewFalse() {
    DBCollection collection = newCollection();

    DBObject result =
        collection.findAndModify(
            new BasicDBObject("_id", 1),
            null,
            null,
            false,
            new BasicDBObject("$inc", new BasicDBObject("a", 1)),
            false,
            true);

    assertEquals(new BasicDBObject(), result);
    assertEquals(new BasicDBObject("_id", 1).append("a", 1), collection.findOne());
  }
  /**
   * @param dbName
   * @param collName
   * @param documentId
   * @param requestEtag
   * @return
   */
  @Override
  public OperationResult deleteDocument(
      String dbName, String collName, Object documentId, ObjectId requestEtag) {
    DB db = client.getDB(dbName);

    DBCollection coll = db.getCollection(collName);

    BasicDBObject idQuery = new BasicDBObject("_id", documentId);

    DBObject oldDocument = coll.findAndModify(idQuery, null, null, true, null, false, false);

    if (oldDocument == null) {
      return new OperationResult(HttpStatus.SC_NOT_FOUND);
    } else {
      // check the old etag (in case restore the old document version)
      return optimisticCheckEtag(coll, oldDocument, null, requestEtag, HttpStatus.SC_NO_CONTENT);
    }
  }
示例#10
0
  /**
   * @param dbName
   * @param collName
   * @param documentId
   * @param content
   * @param requestEtag
   * @return
   */
  @Override
  public OperationResult upsertDocumentPost(
      String dbName, String collName, Object documentId, DBObject content, ObjectId requestEtag) {
    DB db = client.getDB(dbName);

    DBCollection coll = db.getCollection(collName);

    ObjectId newEtag = new ObjectId();

    if (content == null) {
      content = new BasicDBObject();
    }

    content.put("_etag", newEtag);

    Object _idInContent = content.get("_id");

    content.removeField("_id");

    if (_idInContent == null) {
      // new document since the id was just auto-generated
      content.put("_id", documentId);

      coll.insert(content);

      return new OperationResult(HttpStatus.SC_CREATED, newEtag);
    }

    BasicDBObject idQuery = new BasicDBObject("_id", documentId);

    DBObject oldDocument = coll.findAndModify(idQuery, null, null, false, content, false, true);

    if (oldDocument != null) { // upsertDocument
      // check the old etag (in case restore the old document version)
      return optimisticCheckEtag(coll, oldDocument, newEtag, requestEtag, HttpStatus.SC_OK);
    } else { // insert
      return new OperationResult(HttpStatus.SC_CREATED, newEtag);
    }
  }
示例#11
0
  public <T> T findAndModify(
      Query<T> query, UpdateOperations<T> ops, boolean oldVersion, boolean createIfMissing) {
    QueryImpl<T> qi = (QueryImpl<T>) query;

    DBCollection dbColl = qi.getCollection();
    // TODO remove this after testing.
    if (dbColl == null) dbColl = getCollection(qi.getEntityClass());

    if (log.isTraceEnabled())
      log.info("Executing findAndModify(" + dbColl.getName() + ") with update ");

    DBObject res =
        dbColl.findAndModify(
            qi.getQueryObject(),
            qi.getFieldsObject(),
            qi.getSortObject(),
            false,
            ((UpdateOpsImpl<T>) ops).getOps(),
            !oldVersion,
            createIfMissing);

    if (res == null) return null;
    else return (T) mapr.fromDBObject(qi.getEntityClass(), res, createCache());
  }
  /**
   * Add events to the elastic search index for events and the mongodb collection so they are
   * searchable for searchsuggest
   *
   * <p>Step 1.a, try to just update alias's Step 1.b, if fail, create new entry
   *
   * <p>Step 2, Update totalfreq and doccount
   *
   * <p>Step 3, After updating totalfreq and doccount, write to ES for every group
   *
   * @param events
   */
  public static void updateEventFeatures(
      Map<String, Map<ObjectId, AssociationFeaturePojo>> eventFeatures) {
    // Some diagnostic counters:
    int numCacheMisses = 0;
    int numCacheHits = 0;
    int numNewAssocs = 0;
    long entityAggregationTime = new Date().getTime();

    DBCollection col = DbManager.getFeature().getAssociation();

    // (This fn is normally run for a single community id)
    CommunityFeatureCaches.CommunityFeatureCache currCache = null;

    String savedSyncTime = null;
    for (Map<ObjectId, AssociationFeaturePojo> evtCommunity : eventFeatures.values()) {

      Iterator<Map.Entry<ObjectId, AssociationFeaturePojo>> it = evtCommunity.entrySet().iterator();
      while (it.hasNext()) {
        Map.Entry<ObjectId, AssociationFeaturePojo> evtFeatureKV = it.next();
        try {
          AssociationFeaturePojo evtFeature = evtFeatureKV.getValue();
          long nSavedDocCount = evtFeature.getDoccount();

          ObjectId communityID = evtFeature.getCommunityId();

          if ((null == currCache) || !currCache.getCommunityId().equals(evtFeatureKV.getKey())) {
            currCache = CommunityFeatureCaches.getCommunityFeatureCache(evtFeatureKV.getKey());
            if (_diagnosticMode) {
              if (_logInDiagnosticMode)
                System.out.println(
                    "AssociationAggregationUtils.updateEventFeatures, Opened cache for community: "
                        + evtFeatureKV.getKey());
            }
          } // TESTED (by hand)

          // Is this in our cache? If so can short cut a bunch of the DB interaction:
          AssociationFeaturePojo cachedAssoc = currCache.getCachedAssocFeature(evtFeature);
          if (null != cachedAssoc) {
            if (_incrementalMode) {
              if (_diagnosticMode) {
                if (_logInDiagnosticMode)
                  System.out.println(
                      "AssociationAggregationUtils.updateEventFeatures, skip cached: "
                          + cachedAssoc.toDb());
                // TODO (INF-2825): should be continue-ing here so can use delta more efficiently...
              }
            } else if (_diagnosticMode) {
              if (_logInDiagnosticMode)
                System.out.println(
                    "AssociationAggregationUtils.updateEventFeatures, grabbed cached: "
                        + cachedAssoc.toDb());
            }
            numCacheHits++;
          } // TESTED (by hand)
          else {
            numCacheMisses++;
          }

          // try to update
          BasicDBObject query =
              new BasicDBObject(AssociationFeaturePojo.index_, evtFeature.getIndex());
          query.put(AssociationFeaturePojo.communityId_, communityID);

          // Step1 try to update alias
          // update arrays
          BasicDBObject multiopAliasArrays = new BasicDBObject();
          // Entity1 Alias:
          if (null != evtFeature.getEntity1_index()) {
            evtFeature.addEntity1(evtFeature.getEntity1_index());
          }
          if (null != evtFeature.getEntity1()) {
            if ((null == cachedAssoc)
                || (null == cachedAssoc.getEntity1())
                || !cachedAssoc.getEntity1().containsAll(evtFeature.getEntity1())) {
              BasicDBObject multiopE =
                  new BasicDBObject(MongoDbManager.each_, evtFeature.getEntity1());
              multiopAliasArrays.put(AssociationFeaturePojo.entity1_, multiopE);
            }
          } // TESTED (by hand)

          // Entity2 Alias:
          if (null != evtFeature.getEntity2_index()) {
            evtFeature.addEntity2(evtFeature.getEntity2_index());
          }
          if (null != evtFeature.getEntity2()) {
            if ((null == cachedAssoc)
                || (null == cachedAssoc.getEntity2())
                || !cachedAssoc.getEntity2().containsAll(evtFeature.getEntity2())) {
              BasicDBObject multiopE =
                  new BasicDBObject(MongoDbManager.each_, evtFeature.getEntity2());
              multiopAliasArrays.put(AssociationFeaturePojo.entity2_, multiopE);
            }
          } // TESTED (by hand)

          // verb/verb cat alias:
          if (null != evtFeature.getVerb_category()) {
            evtFeature.addVerb(evtFeature.getVerb_category());
          }
          if (null != evtFeature.getVerb()) {
            if ((null == cachedAssoc)
                || (null == cachedAssoc.getVerb())
                || !cachedAssoc.getVerb().containsAll(evtFeature.getVerb())) {
              BasicDBObject multiopE =
                  new BasicDBObject(MongoDbManager.each_, evtFeature.getVerb());
              multiopAliasArrays.put(AssociationFeaturePojo.verb_, multiopE);
            }
          } // TESTED (by hand)

          // OK - now we can copy across the fields into the cache:
          if (null != cachedAssoc) {
            currCache.updateCachedAssocFeatureStatistics(
                cachedAssoc, evtFeature); // (evtFeature is now fully up to date)
          } // TESTED (by hand)

          BasicDBObject updateOp = new BasicDBObject();
          if (!multiopAliasArrays.isEmpty()) {
            updateOp.put(MongoDbManager.addToSet_, multiopAliasArrays);
          }
          // Document count for this event
          BasicDBObject updateFreqDocCount =
              new BasicDBObject(AssociationFeaturePojo.doccount_, nSavedDocCount);
          updateOp.put(MongoDbManager.inc_, updateFreqDocCount);

          BasicDBObject fields = new BasicDBObject(AssociationFeaturePojo.doccount_, 1);
          fields.put(AssociationFeaturePojo.entity1_, 1);
          fields.put(AssociationFeaturePojo.entity2_, 1);
          fields.put(AssociationFeaturePojo.verb_, 1);
          // (slightly annoying, since only want these if updating dc but won't know
          // until after i've got this object)

          fields.put(AssociationFeaturePojo.db_sync_time_, 1);
          fields.put(AssociationFeaturePojo.db_sync_doccount_, 1);

          DBObject dboUpdate = null;
          if (_diagnosticMode) {
            if (null == cachedAssoc) {
              dboUpdate = col.findOne(query, fields);
            }
          } else {
            if (null != cachedAssoc) {
              col.update(query, updateOp, false, false);
            } else { // Not cached - so have to grab the feature we're either getting or creating
              dboUpdate =
                  col.findAndModify(
                      query, fields, new BasicDBObject(), false, updateOp, false, true);
              // (can use findAndModify because specify index, ie the shard key)
              // (returns event before the changes above, update the feature object below)
              // (also atomically creates the object if it doesn't exist so is "distributed-safe")
            }
          }
          if ((null != cachedAssoc)
              || ((dboUpdate != null) && !dboUpdate.keySet().isEmpty())) // (feature already exists)
          {
            AssociationFeaturePojo egp = cachedAssoc;

            if (null == egp) {
              egp = AssociationFeaturePojo.fromDb(dboUpdate, AssociationFeaturePojo.class);
              evtFeature.setDoccount(egp.getDoccount() + nSavedDocCount);
              evtFeature.setDb_sync_doccount(egp.getDb_sync_doccount());
              evtFeature.setDb_sync_time(egp.getDb_sync_time());
              if (null != egp.getEntity1()) {
                for (String ent : egp.getEntity1()) evtFeature.addEntity1(ent);
              }
              if (null != egp.getEntity2()) {
                for (String ent : egp.getEntity2()) evtFeature.addEntity2(ent);
              }
              if (null != egp.getVerb()) {
                for (String verb : egp.getVerb()) evtFeature.addVerb(verb);
              }
            } // TESTED (cached and non-cached cases)
            // (in the cached case, evtFeature has already been updated by
            // updateCachedAssocFeatureStatistics)

            if (_diagnosticMode) {
              if (_logInDiagnosticMode)
                System.out.println(
                    "AssociationAggregationUtils.updateEventFeatures, found: "
                        + ((BasicDBObject) egp.toDb()).toString());
              if (_logInDiagnosticMode)
                System.out.println(
                    "AssociationAggregationUtils.updateEventFeatures, ^^^ found from query: "
                        + query.toString()
                        + " / "
                        + updateOp.toString());
            }
            // (In background aggregation mode we update db_sync_prio when checking the -otherwise
            // unused, unlike entities- document update schedule)
          } else // (the object in memory is now an accurate representation of the database, minus
                 // some fields we'll now add)
          {
            numNewAssocs++;

            // Synchronization settings for the newly created object
            evtFeature.setDb_sync_doccount(nSavedDocCount);
            if (null == savedSyncTime) {
              savedSyncTime = Long.toString(System.currentTimeMillis());
            }
            evtFeature.setDb_sync_time(savedSyncTime);

            // This is all "distributed safe" (apart from the db_syc_xxx and it doesn't matter if
            // that is
            // out of date, the update will just be slightly out-of-date at worst) since (otherwise)
            // these fields are
            // only set here, and the findAndModify is atomic

            BasicDBObject baseFields = new BasicDBObject();
            if (null != evtFeature.getEntity1_index()) {
              baseFields.put(AssociationFeaturePojo.entity1_index_, evtFeature.getEntity1_index());
            }
            if (null != evtFeature.getEntity2_index()) {
              baseFields.put(AssociationFeaturePojo.entity2_index_, evtFeature.getEntity2_index());
            }
            if (null != evtFeature.getVerb_category()) {
              baseFields.put(AssociationFeaturePojo.verb_category_, evtFeature.getVerb_category());
            }
            baseFields.put(AssociationFeaturePojo.assoc_type_, evtFeature.getAssociation_type());
            baseFields.put(
                AssociationFeaturePojo.db_sync_doccount_, evtFeature.getDb_sync_doccount());
            baseFields.put(AssociationFeaturePojo.db_sync_time_, evtFeature.getDb_sync_time());
            baseFields.put(
                AssociationFeaturePojo.db_sync_prio_,
                1000.0); // (ensures new objects are quickly index-synchronized)

            if (!_diagnosticMode) {
              // Store the object
              col.update(query, new BasicDBObject(MongoDbManager.set_, baseFields));
            } else {
              if (_logInDiagnosticMode)
                System.out.println(
                    "AssociationAggregationUtils.updateEventFeatures, not found: "
                        + query.toString()
                        + " / "
                        + baseFields.toString()
                        + "/ orig_update= "
                        + updateOp.toString());
            }

            // (Note even in background aggregation mode we still perform the feature
            // synchronization
            //  for new entities - and it has to be right at the end because it "corrupts" the
            // objects)

          } // (end if first time seen)

          if (null == cachedAssoc) { // First time we've seen this locally, so add to cache
            currCache.addCachedAssocFeature(evtFeature);
            if (_diagnosticMode) {
              if (_logInDiagnosticMode)
                System.out.println(
                    "AssociationAggregationUtils.updateEventFeatures, added to cache: "
                        + evtFeature.toDb());
            }
          } // TESTED (by hand)
        } catch (Exception e) {
          // Exception, remove from feature list
          it.remove();

          // If an exception occurs log the error
          logger.error("Exception Message: " + e.getMessage(), e);
        }
      } // (end loop over all communities for the set of features sharing and index)
    } // (end loop over indexes)

    if ((numCacheHits > 0) || (numCacheMisses > 0)) { // ie some assocs were grabbed
      int cacheSize = 0;
      if (null != currCache) {
        cacheSize = currCache.getAssocCacheSize();
      }
      StringBuffer logMsg =
          new StringBuffer() // (should append key, but don't have that...)
              .append(" assoc_agg_time_ms=")
              .append(new Date().getTime() - entityAggregationTime)
              .append(" total_assocs=")
              .append(eventFeatures.size())
              .append(" new_assocs=")
              .append(numNewAssocs)
              .append(" cache_misses=")
              .append(numCacheMisses)
              .append(" cache_hits=")
              .append(numCacheHits)
              .append(" cache_size=")
              .append(cacheSize);

      logger.info(logMsg.toString());
    }
  } // TESTED (by eye, reasonably significant changes, but still based on proven Beta code)
  /**
   * Updates the feature entries for the list of entities that was just extracted including changing
   * frequency, adding aliases etc
   *
   * <p>This method now has 3 steps: 1. Try to update alias 1.a If fail, create new gaz 2. Update
   * totalfreq and doccount
   *
   * @param ents List of entities to update in the entity feature
   */
  public static void updateEntityFeatures(
      Map<String, Map<ObjectId, EntityFeaturePojo>> entFeatures) {
    DBCollection col = DbManager.getFeature().getEntity();
    String savedSyncTime = null;
    for (Map<ObjectId, EntityFeaturePojo> entCommunity : entFeatures.values()) {

      Iterator<Map.Entry<ObjectId, EntityFeaturePojo>> it = entCommunity.entrySet().iterator();
      while (it.hasNext()) {
        Map.Entry<ObjectId, EntityFeaturePojo> entFeatureKV = it.next();
        try {
          EntityFeaturePojo entFeature = entFeatureKV.getValue();

          long nSavedDocCount = entFeature.getDoccount();
          long nSavedFreqCount = entFeature.getTotalfreq();
          // (these should be constant across all communities but keep it here
          //  so can assign it using entFeature, it's v cheap so no need to get once like for sync
          // vars)

          ObjectId communityID = entFeature.getCommunityId();
          if (null != communityID) {
            // For each community, see if the entity feature already exists *for that community*

            BasicDBObject query =
                new BasicDBObject(EntityFeaturePojo.index_, entFeature.getIndex());
            query.put(EntityFeaturePojo.communityId_, communityID);
            BasicDBObject updateOp = new BasicDBObject();
            // Add aliases:
            BasicDBObject updateOpA = new BasicDBObject();
            BasicDBObject multiopE = new BasicDBObject(MongoDbManager.each_, entFeature.getAlias());
            updateOpA.put(EntityFeaturePojo.alias_, multiopE);
            // Add link data, if there is any:
            if ((null != entFeature.getSemanticLinks())
                && !entFeature.getSemanticLinks().isEmpty()) {
              BasicDBObject multiopF =
                  new BasicDBObject(MongoDbManager.each_, entFeature.getSemanticLinks());
              updateOpA.put(EntityFeaturePojo.linkdata_, multiopF);
            }
            updateOp.put(MongoDbManager.addToSet_, updateOpA);
            // Update frequency:
            BasicDBObject updateOpB = new BasicDBObject();
            updateOpB.put(EntityFeaturePojo.totalfreq_, nSavedFreqCount);
            updateOpB.put(EntityFeaturePojo.doccount_, nSavedDocCount);
            updateOp.put(MongoDbManager.inc_, updateOpB);

            // try to use find/modify to see if something comes back and set doc freq/totalfreq
            BasicDBObject fields = new BasicDBObject(EntityFeaturePojo.totalfreq_, 1);
            fields.put(EntityFeaturePojo.doccount_, 1);
            fields.put(EntityFeaturePojo.alias_, 1);
            fields.put(EntityFeaturePojo.linkdata_, 1);
            // (slightly annoying, since only want these 2 largish fields if updating freq but won't
            // know
            // until after i've got this object)
            fields.put(EntityFeaturePojo.db_sync_time_, 1);
            fields.put(EntityFeaturePojo.db_sync_doccount_, 1);

            DBObject dboUpdate = null;
            if (_diagnosticMode) {
              dboUpdate = col.findOne(query, fields);
            } else {
              dboUpdate =
                  col.findAndModify(
                      query, fields, new BasicDBObject(), false, updateOp, false, true);
              // (can use findAndModify because specify index, ie the shard key)
              // (returns entity before the changes above, update the feature object below)
              // (also atomically creates the object if it doesn't exist so is "distributed-safe")
            }
            if ((dboUpdate != null) && !dboUpdate.keySet().isEmpty()) {
              // (Update the entity feature to be correct so that it can be accurately synchronized
              // with the index)
              EntityFeaturePojo gp = EntityFeaturePojo.fromDb(dboUpdate, EntityFeaturePojo.class);
              entFeature.setTotalfreq(gp.getTotalfreq() + nSavedFreqCount);
              entFeature.setDoccount(gp.getDoccount() + nSavedDocCount);
              entFeature.setDbSyncDoccount(gp.getDbSyncDoccount());
              entFeature.setDbSyncTime(gp.getDbSyncTime());
              if (null != gp.getAlias()) {
                entFeature.addAllAlias(gp.getAlias());
              }
              if (null != gp.getSemanticLinks()) {
                entFeature.addToSemanticLinks(gp.getSemanticLinks());
              }
              if (_diagnosticMode) {
                System.out.println(
                    "EntityAggregationUtils.updateEntityFeatures, found: "
                        + ((BasicDBObject) gp.toDb()).toString());
                System.out.println(
                    "EntityAggregationUtils.updateEntityFeatures, ^^^ found from query: "
                        + query.toString()
                        + " / "
                        + updateOp.toString());
              }
            } else // (the object in memory is now an accurate representation of the database, minus
                   // some fields we'll now add)
            {
              // Synchronization settings for the newly created object
              if (null == savedSyncTime) {
                savedSyncTime = Long.toString(System.currentTimeMillis());
              }
              entFeature.setDbSyncDoccount(nSavedDocCount);
              entFeature.setDbSyncTime(savedSyncTime);

              // This is all "distributed safe" (apart from the db_syc_xxx and it doesn't matter if
              // that is
              // out of date, the update will just be slightly out-of-date at worst) since
              // (otherwise) these fields are
              // only set here, and the findAndModify is atomic

              // (Do in raw MongoDB for performance)
              BasicDBObject baseFields = new BasicDBObject();
              baseFields.put(EntityFeaturePojo.dimension_, entFeature.getDimension().toString());
              baseFields.put(EntityFeaturePojo.type_, entFeature.getType());
              baseFields.put(
                  EntityFeaturePojo.disambiguated_name_, entFeature.getDisambiguatedName());
              baseFields.put(EntityFeaturePojo.db_sync_doccount_, entFeature.getDbSyncDoccount());
              baseFields.put(EntityFeaturePojo.db_sync_time_, entFeature.getDbSyncTime());
              if ((null != entFeature.getSemanticLinks())
                  && !entFeature.getSemanticLinks().isEmpty()) {
                baseFields.put(EntityFeaturePojo.linkdata_, entFeature.getSemanticLinks());
              }

              // attempt to add geotag (makes necessary checks on util side)
              // also add ontology type if geotag is found
              EntityGeotagAggregationUtils.addEntityGeo(entFeature);
              if (entFeature.getGeotag() != null) {
                BasicDBObject geo = new BasicDBObject(GeoPojo.lat_, entFeature.getGeotag().lat);
                geo.put(GeoPojo.lon_, entFeature.getGeotag().lon);
                baseFields.put(EntityFeaturePojo.geotag_, geo);

                if (entFeature.getOntology_type() != null) {
                  baseFields.put(EntityFeaturePojo.ontology_type_, entFeature.getOntology_type());
                }
              }

              if (!_diagnosticMode) {
                // Store the object
                col.update(query, new BasicDBObject(MongoDbManager.set_, baseFields));
              } else {
                System.out.println(
                    "EntityAggregationUtils.updateEntityFeatures, not found: "
                        + query.toString()
                        + ": "
                        + baseFields.toString());
              }
              entFeature.setDbSyncTime(null); // (ensures that index re-sync will occur)
            }
          }
        } catch (Exception e) {
          // Exception, remove from feature list
          it.remove();

          // If an exception occurs log the error
          logger.error("Exception Message: " + e.getMessage(), e);
        }
      } // (end loop over communities)
    } // (end loop over indexes)
  } // TESTED (just by eye - made few changes during re-factoring)