/**
   * Update the GridFSDBFile in the associated DB with the key/values in updateKeys
   *
   * @param updateKeys Map of new tag data
   * @param file GridFSDBFile to update with tag data
   * @param db
   * @param songId ID of Song to update with tag data
   * @return
   */
  public static boolean updateFile(
      Map<String, String> updateKeys,
      GridFSDBFile file,
      DB db,
      ObjectId songId) { // TODO updateKeys?
    File audioFile = null;
    try {
      audioFile = File.createTempFile("tmp", ".mp3");
    } catch (IOException e) {
      log.error("tmp file not created", e);
    }

    audioFile.deleteOnExit();
    AudioFile f = null;
    ObjectId id = (ObjectId) file.getId();
    ObjectId oid = null;
    try {
      file.writeTo(audioFile);
      f = AudioFileIO.read(audioFile);
      Tag tag = f.getTagOrCreateAndSetDefault();
      DBObject q = new BasicDBObject("_id", songId);
      DBObject o = new BasicDBObject("$set", new BasicDBObject(updateKeys));

      if (updateKeys.get("artist") != null) {
        tag.setField(FieldKey.ARTIST, updateKeys.get("artist"));
      }
      if (updateKeys.get("album") != null) {
        tag.setField(FieldKey.ALBUM, updateKeys.get("album"));
      }
      if (updateKeys.get("title") != null) {
        tag.setField(FieldKey.TITLE, updateKeys.get("title"));
      }
      if (updateKeys.get("track") != null) {
        tag.setField(FieldKey.TRACK, updateKeys.get("track"));
      }
      if (updateKeys.get("year") != null) {
        tag.setField(FieldKey.YEAR, updateKeys.get("year"));
      }
      AudioFileIO.write(f);
      GridFS myFS = new GridFS(db);
      myFS.remove(id);
      GridFSInputFile inputFile =
          putSongFileInDB(f.getFile(), db, file.getContentType(), file.getFilename(), id);
      oid = (ObjectId) inputFile.getId();
      if (oid.equals(id)) {
        db.getCollection("songs").update(q, o);
      }
    } catch (KeyNotFoundException knfe) {
      log.error("key not found", knfe);
    } catch (FieldDataInvalidException fdie) {
      log.error("tried to set field with invalid value", fdie);
    } catch (Exception e) {
      log.error("error reading/writing file", e);
    }
    return (oid.equals(id));
  }
  private Timestamp<?> processOplogEntry(final DBObject entry, final Timestamp<?> startTimestamp)
      throws InterruptedException {
    // To support transactions, TokuMX wraps one or more operations in a single oplog entry, in a
    // list.
    // As long as clients are not transaction-aware, we can pretty safely assume there will only be
    // one operation in the list.
    // Supporting genuine multi-operation transactions will require a bit more logic here.
    flattenOps(entry);

    if (!isValidOplogEntry(entry, startTimestamp)) {
      return startTimestamp;
    }
    Operation operation = Operation.fromString(entry.get(MongoDBRiver.OPLOG_OPERATION).toString());
    String namespace = entry.get(MongoDBRiver.OPLOG_NAMESPACE).toString();
    String collection = null;
    Timestamp<?> oplogTimestamp = Timestamp.on(entry);
    DBObject object = (DBObject) entry.get(MongoDBRiver.OPLOG_OBJECT);

    if (definition.isImportAllCollections()) {
      if (namespace.startsWith(definition.getMongoDb()) && !namespace.equals(cmdOplogNamespace)) {
        collection = getCollectionFromNamespace(namespace);
      }
    } else {
      collection = definition.getMongoCollection();
    }

    if (namespace.equals(cmdOplogNamespace)) {
      if (object.containsField(MongoDBRiver.OPLOG_DROP_COMMAND_OPERATION)) {
        operation = Operation.DROP_COLLECTION;
        if (definition.isImportAllCollections()) {
          collection = object.get(MongoDBRiver.OPLOG_DROP_COMMAND_OPERATION).toString();
          if (collection.startsWith("tmp.mr.")) {
            return startTimestamp;
          }
        }
      }
      if (object.containsField(MongoDBRiver.OPLOG_DROP_DATABASE_COMMAND_OPERATION)) {
        operation = Operation.DROP_DATABASE;
      }
    }

    logger.trace("namespace: {} - operation: {}", namespace, operation);
    if (namespace.equals(MongoDBRiver.OPLOG_ADMIN_COMMAND)) {
      if (operation == Operation.COMMAND) {
        processAdminCommandOplogEntry(entry, startTimestamp);
        return startTimestamp;
      }
    }

    if (logger.isTraceEnabled()) {
      logger.trace("MongoDB object deserialized: {}", object.toString());
      logger.trace("collection: {}", collection);
      logger.trace("oplog entry - namespace [{}], operation [{}]", namespace, operation);
      logger.trace("oplog processing item {}", entry);
    }

    String objectId = getObjectIdFromOplogEntry(entry);
    if (operation == Operation.DELETE) {
      // Include only _id in data, as vanilla MongoDB does, so transformation scripts won't be
      // broken by Toku
      if (object.containsField(MongoDBRiver.MONGODB_ID_FIELD)) {
        if (object.keySet().size() > 1) {
          entry.put(
              MongoDBRiver.OPLOG_OBJECT,
              object = new BasicDBObject(MongoDBRiver.MONGODB_ID_FIELD, objectId));
        }
      } else {
        throw new NullPointerException(MongoDBRiver.MONGODB_ID_FIELD);
      }
    }

    if (definition.isMongoGridFS()
        && namespace.endsWith(MongoDBRiver.GRIDFS_FILES_SUFFIX)
        && (operation == Operation.INSERT || operation == Operation.UPDATE)) {
      if (objectId == null) {
        throw new NullPointerException(MongoDBRiver.MONGODB_ID_FIELD);
      }
      GridFS grid = new GridFS(mongo.getDB(definition.getMongoDb()), collection);
      GridFSDBFile file = grid.findOne(new ObjectId(objectId));
      if (file != null) {
        logger.info("Caught file: {} - {}", file.getId(), file.getFilename());
        object = file;
      } else {
        logger.warn("Cannot find file from id: {}", objectId);
      }
    }

    if (object instanceof GridFSDBFile) {
      if (objectId == null) {
        throw new NullPointerException(MongoDBRiver.MONGODB_ID_FIELD);
      }
      if (logger.isTraceEnabled()) {
        logger.trace("Add attachment: {}", objectId);
      }
      addToStream(operation, oplogTimestamp, applyFieldFilter(object), collection);
    } else {
      if (operation == Operation.UPDATE) {
        DBObject update = (DBObject) entry.get(MongoDBRiver.OPLOG_UPDATE);
        logger.debug("Updated item: {}", update);
        addQueryToStream(operation, oplogTimestamp, update, collection);
      } else {
        if (operation == Operation.INSERT) {
          addInsertToStream(oplogTimestamp, applyFieldFilter(object), collection);
        } else {
          addToStream(operation, oplogTimestamp, applyFieldFilter(object), collection);
        }
      }
    }
    return oplogTimestamp;
  }