@Override
 public ByteArrayFileCache getBlob(final MD5 md5, final ByteArrayFileCacheManager bafcMan)
     throws NoSuchBlobException, BlobStoreCommunicationException, FileCacheIOException,
         FileCacheLimitExceededException {
   final GridFSDBFile out;
   try {
     out = getFile(md5);
     if (out == null) {
       throw new NoSuchBlobException(
           "Attempt to retrieve non-existant blob with chksum " + md5.getMD5());
     }
     final boolean sorted;
     if (!out.containsField(Fields.GFS_SORTED)) {
       sorted = false;
     } else {
       sorted = (Boolean) out.get(Fields.GFS_SORTED);
     }
     final InputStream file = out.getInputStream();
     try {
       return bafcMan.createBAFC(file, true, sorted);
     } finally {
       try {
         file.close();
       } catch (IOException ioe) {
         throw new RuntimeException("Something is broken", ioe);
       }
     }
   } catch (MongoException me) {
     throw new BlobStoreCommunicationException("Could not read from the mongo database", me);
   }
 }
Beispiel #2
0
  public List<String> findDocuments(String id) throws DocumentException {

    try {
      GridFS gridFS = new GridFS(dataBase);
      String key = "metadata." + DocumentConnector.ID;
      BasicDBObject query = new BasicDBObject(key, id);
      List<GridFSDBFile> gridFSDBFiles = gridFS.find(query);

      CommandResult result = dataBase.getLastError();
      if (!result.ok()) {
        throw new DocumentException(result.getErrorMessage());
      }

      List<String> objects = new ArrayList<String>();
      for (GridFSDBFile gridFSDBFile : gridFSDBFiles) {
        ObjectId objectId = (ObjectId) gridFSDBFile.getId();
        objects.add(objectId.toStringMongod());
      }

      return objects;
    } catch (Exception e) {
      log.error("findDocuments error:" + e.getMessage());
      e.printStackTrace();
      throw new DocumentException(e.getMessage());
    }
  }
Beispiel #3
0
  public String getDocument(String GUID, OutputStream out) throws DocumentException {

    try {
      GridFS gridFS = new GridFS(dataBase);

      ObjectId key = new ObjectId(GUID);
      GridFSDBFile gridFSDBFile = gridFS.find(key);

      if (gridFSDBFile == null) {
        throw new DocumentException("No existe el documento");
      }
      if (out != null) gridFSDBFile.writeTo(out);
      CommandResult result = dataBase.getLastError();
      if (!result.ok()) {
        throw new DocumentException(result.getErrorMessage());
      }

      DBObject dbObject = gridFSDBFile.getMetaData();
      return JSON.serialize(dbObject);
    } catch (Exception e) {
      log.error("getDocument error:" + e.getMessage());
      e.printStackTrace();
      throw new DocumentException(e.getMessage());
    }
  }
 /**
  * Return the file type or <tt>null</tt>.
  *
  * @param path the path to test
  * @return the file type
  */
 protected final Type getType(JailedResourcePath path) {
   GridFSDBFile file = getGridFSDBFile(path, false);
   if (file == null) {
     return null;
   }
   Type type = Type.valueOf((String) file.get(RESOURCE_TYPE));
   return type;
 }
 @Override
 public Resource rename(String name) {
   GridFSDBFile gridFSDBFile = getGridFSDBFile(getPath(), true);
   JailedResourcePath renamed = getPath().unjail().getParent().get(name);
   gridFSDBFile.put("filename", getFilename(renamed));
   gridFSDBFile.save();
   return getRenamedResource(renamed);
 }
  @Test
  public void testImportAttachment() throws Exception {
    logger.debug("*** testImportAttachment ***");
    byte[] content =
        copyToBytesFromClasspath(
            "/test/elasticsearch/plugin/river/mongodb/gridfs/test-attachment.html");
    logger.debug("Content in bytes: {}", content.length);
    GridFS gridFS = new GridFS(mongoDB);
    GridFSInputFile in = gridFS.createFile(content);
    in.setFilename("test-attachment.html");
    in.setContentType("text/html");
    in.save();
    in.validate();

    String id = in.getId().toString();
    logger.debug("GridFS in: {}", in);
    logger.debug("Document created with id: {}", id);

    GridFSDBFile out = gridFS.findOne(in.getFilename());
    logger.debug("GridFS from findOne: {}", out);
    out = gridFS.findOne(new ObjectId(id));
    logger.debug("GridFS from findOne: {}", out);
    Assert.assertEquals(out.getId(), in.getId());

    Thread.sleep(wait);
    refreshIndex();

    CountResponse countResponse = getNode().client().count(countRequest(getIndex())).actionGet();
    logger.debug("Index total count: {}", countResponse.getCount());
    assertThat(countResponse.getCount(), equalTo(1l));

    countResponse =
        getNode().client().count(countRequest(getIndex()).query(fieldQuery("_id", id))).actionGet();
    logger.debug("Index count for id {}: {}", id, countResponse.getCount());
    assertThat(countResponse.getCount(), equalTo(1l));

    SearchResponse response =
        getNode()
            .client()
            .prepareSearch(getIndex())
            .setQuery(QueryBuilders.queryString("Aliquam"))
            .execute()
            .actionGet();
    logger.debug("SearchResponse {}", response.toString());
    long totalHits = response.getHits().getTotalHits();
    logger.debug("TotalHits: {}", totalHits);
    assertThat(totalHits, equalTo(1l));

    gridFS.remove(new ObjectId(id));

    Thread.sleep(wait);
    refreshIndex();

    countResponse =
        getNode().client().count(countRequest(getIndex()).query(fieldQuery("_id", id))).actionGet();
    logger.debug("Count after delete request: {}", countResponse.getCount());
    assertThat(countResponse.getCount(), equalTo(0L));
  }
Beispiel #7
0
 /**
  * Queries GridFS for the cover image binary and sends it to the user.
  *
  * @param isbn Match against the aliases of all files in the GridFS store.
  * @return Matched file, or nothing if not found.
  */
 public static Result cover(String isbn) {
   GridFSDBFile cover = Book.findCoverByISBN(isbn);
   response().setContentType("image/gif");
   if (null != cover) {
     return ok(cover.getInputStream());
   } else {
     return notFound();
   }
 }
  /**
   * 生成随机默认头像,如果存在原图,则查询各个尺寸头像,对不存在的头像重新用原图生成
   *
   * @param userId
   * @param userName
   * @return
   */
  public void avatarGenerate(String userId, String userName) {

    // 获取缩略名字
    userName = DefaultAvatarGeneratorService.obatainName(userName);

    // 判断是否有初始头像,如果有用初始头像切割小头像,然后组装
    // 如果没有则随机选择一个初始头像,添加姓名水印后切割小头像,其实是一种容错机制
    GridFSDBFile intinalAvatarFile =
        imageStoreService.get(AvatarIdGenerator.obtainMobileLargeAvatarId(userId));

    // 要裁剪的尺寸
    List<Double> thumbnailSizeList = SnapFileConfigUtils.obtainAvatarScaleSizes();
    int number = 0;
    BufferedImage bimage = null;
    if (intinalAvatarFile == null) {

      Random random = new Random();
      number = random.nextInt(DefaultAvatarCacheService.DEFAULT_AVATAR_NUMBER);
      number++;
      bimage = DefaultAvatarCacheService.obtainCacheRandomAvatar(number);

      // 如果姓名不为空,加水印随机图片
      if (!StringUtils.isEmpty(userName)) {
        drawName(bimage, userName);
      }
    } else {
      try {
        bimage = ImageIO.read(intinalAvatarFile.getInputStream());
        thumbnailSizeList = this.obtainThumbnailSizeList(userId);
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
    int width = bimage.getWidth();
    int height = bimage.getHeight();

    ByteArrayOutputStream bs = new ByteArrayOutputStream();
    ImageOutputStream imOut;
    try {
      imOut = ImageIO.createImageOutputStream(bs);

      ImageIO.write(bimage, "png", imOut);

      // 存储原图到数据库
      imageService.uploadTempLogo(new ByteArrayInputStream(bs.toByteArray()), userId);

    } catch (IOException e) {
      Logger.error("上传随机头像出错", e);
      e.printStackTrace();
    }

    if (!thumbnailSizeList.isEmpty()) {
      // 生成切割小图片,存入数据库
      imageService.rectAndStoreAvatar(
          userId, 0, 0, width, height, thumbnailSizeList, AvatarIdGenerator.USER_TYPE);
    }
  }
  /**
   * Update the GridFSDBFile in the associated DB with the key/values in updateKeys
   *
   * @param updateKeys Map of new tag data
   * @param file GridFSDBFile to update with tag data
   * @param db
   * @param songId ID of Song to update with tag data
   * @return
   */
  public static boolean updateFile(
      Map<String, String> updateKeys,
      GridFSDBFile file,
      DB db,
      ObjectId songId) { // TODO updateKeys?
    File audioFile = null;
    try {
      audioFile = File.createTempFile("tmp", ".mp3");
    } catch (IOException e) {
      log.error("tmp file not created", e);
    }

    audioFile.deleteOnExit();
    AudioFile f = null;
    ObjectId id = (ObjectId) file.getId();
    ObjectId oid = null;
    try {
      file.writeTo(audioFile);
      f = AudioFileIO.read(audioFile);
      Tag tag = f.getTagOrCreateAndSetDefault();
      DBObject q = new BasicDBObject("_id", songId);
      DBObject o = new BasicDBObject("$set", new BasicDBObject(updateKeys));

      if (updateKeys.get("artist") != null) {
        tag.setField(FieldKey.ARTIST, updateKeys.get("artist"));
      }
      if (updateKeys.get("album") != null) {
        tag.setField(FieldKey.ALBUM, updateKeys.get("album"));
      }
      if (updateKeys.get("title") != null) {
        tag.setField(FieldKey.TITLE, updateKeys.get("title"));
      }
      if (updateKeys.get("track") != null) {
        tag.setField(FieldKey.TRACK, updateKeys.get("track"));
      }
      if (updateKeys.get("year") != null) {
        tag.setField(FieldKey.YEAR, updateKeys.get("year"));
      }
      AudioFileIO.write(f);
      GridFS myFS = new GridFS(db);
      myFS.remove(id);
      GridFSInputFile inputFile =
          putSongFileInDB(f.getFile(), db, file.getContentType(), file.getFilename(), id);
      oid = (ObjectId) inputFile.getId();
      if (oid.equals(id)) {
        db.getCollection("songs").update(q, o);
      }
    } catch (KeyNotFoundException knfe) {
      log.error("key not found", knfe);
    } catch (FieldDataInvalidException fdie) {
      log.error("tried to set field with invalid value", fdie);
    } catch (Exception e) {
      log.error("error reading/writing file", e);
    }
    return (oid.equals(id));
  }
 private void writeResponse(GridFSDBFile file, HttpServletResponse response) throws IOException {
   if (file != null) {
     byte[] data = IOUtils.toByteArray(file.getInputStream());
     response.setContentType(file.getContentType());
     response.setContentLength((int) file.getLength());
     response.getOutputStream().write(data);
     response.getOutputStream().flush();
   } else {
     response.setStatus(HttpStatus.NOT_FOUND.value());
   }
 }
Beispiel #11
0
 public static Result getBeerImg(String id) {
   GridFSDBFile img = gfs.find(new ObjectId(id));
   ByteArrayOutputStream baos = new ByteArrayOutputStream();
   try {
     img.writeTo(baos);
   } catch (IOException e) {
     // TODO Auto-generated catch block
     e.printStackTrace();
   }
   return ok(baos.toByteArray()).as("image/png");
 }
 @Override
 public InputStream getAvatar(String userName) {
   GridFS avatarFS = new GridFS(m_template.getDb());
   GridFSDBFile imageForOutput = avatarFS.findOne(String.format(AVATAR_NAME, userName));
   if (imageForOutput != null) {
     return imageForOutput.getInputStream();
   }
   // try default avatar
   imageForOutput = avatarFS.findOne(String.format(AVATAR_NAME, "default"));
   if (imageForOutput != null) {
     return imageForOutput.getInputStream();
   }
   return null;
 }
Beispiel #13
0
 @GET
 @Path("/{id}")
 public javax.ws.rs.core.Response getAttachment(@PathParam("id") final String id)
     throws IOException {
   final DB db = this.client.getDB("grid");
   final GridFS gridFS = new GridFS(db);
   final GridFSDBFile file = gridFS.findOne(id);
   // log.info(file);
   if (file == null) {
     throw new WebApplicationException(404);
   }
   return Response.ok(
           org.apache.commons.io.IOUtils.toByteArray(file.getInputStream()), file.getContentType())
       .build();
 }
  @Test
  public void shouldStoreFileInMultipleChunks() throws Exception {
    final byte[] data = new byte[] {1, 2, 3, 4, 5};

    final GridFSInputFile file = fs.createFile(data);
    file.setChunkSize(3); // chunk size is less than data size in order to get more than one chunk
    file.save();

    final GridFSDBFile result = fs.findOne((ObjectId) file.getId());

    final ByteArrayOutputStream out = new ByteArrayOutputStream();
    assertEquals(data.length, result.writeTo(out));

    assertArrayEquals(data, out.toByteArray());
  }
Beispiel #15
0
 @GET
 @Path("{objectid}")
 public Response getImage(
     @PathParam("objectid") String objectId, @HeaderParam("If-Modified-Since") String modified) {
   GridFSDBFile mongoFile = imageRepository.getImage(objectId);
   if (mongoFile != null) {
     if (modified != null) {
       if (new Date(modified).before(mongoFile.getUploadDate())) {
         return Response.status(Status.NOT_MODIFIED).build();
       }
     }
     return Response.ok(mongoFile.getInputStream(), mongoFile.getContentType())
         .lastModified(mongoFile.getUploadDate())
         .build();
   }
   return Response.status(Status.NOT_FOUND).build();
 }
  @Override
  protected boolean has(String objectKey) {
    Query query = new Query(Criteria.where("filename").is(objectKey));
    GridFSDBFile file = this.template.findOne(query);
    if (file == null) {
      return false;
    }

    // check for expiry
    Long expiry = (Long) file.get(FIELD_EXPIRY);
    if (expiry != null) {
      if (expiry.longValue() < System.currentTimeMillis()) {
        this.remove(objectKey);
        return false;
      }
    }

    return true;
  }
Beispiel #17
0
  public static void main(String[] args) throws Exception {

    MongoClient client = new MongoClient();
    DB db = client.getDB("course");
    FileInputStream inputStream = null;

    GridFS videos =
        new GridFS(db, "video"); // does not create new GridFS it just gives object to manipulate it
    // returns bucket named video
    try {

      inputStream = new FileInputStream("BinarySearch.mp4");

    } catch (FileNotFoundException e) {

      e.printStackTrace();
    }

    GridFSInputFile video = videos.createFile(inputStream, "BinarySearch.mp4");

    BasicDBObject meta = new BasicDBObject("description", "Binary Search");

    ArrayList<String> tags = new ArrayList<String>();
    tags.add("search");
    tags.add("data structures");

    meta.append("tags", tags);

    video.setMetaData(meta);
    video.save();

    System.out.println(video.get("_id"));
    System.out.println("file saved");
    System.out.println("reading file from mongo");

    GridFSDBFile dbFile = videos.findOne(new BasicDBObject("filename", "BinarySearch.mp4"));

    FileOutputStream outputStream = new FileOutputStream("BinarySearch_copy.mp4");

    dbFile.writeTo(outputStream);
  }
Beispiel #18
0
  @RequestMapping(value = "/getPhoto/{code}", method = RequestMethod.GET)
  public @ResponseBody ResponseEntity<byte[]> getCodableDTO(@PathVariable("code") String code) {

    System.out.println("finding getCodableDTO: code: " + code);

    try {

      // List<GridFSDBFile> result = gridFsTemplate.find(new
      // Query().addCriteria(Criteria.where("filename").is(code)));
      GridFSDBFile gridFsFile =
          gridFsTemplate.findOne(new Query().addCriteria(Criteria.where("_id").is(code)));

      final HttpHeaders headers = new HttpHeaders();
      headers.setContentType(MediaType.IMAGE_JPEG);
      return new ResponseEntity<>(
          IOUtils.toByteArray(gridFsFile.getInputStream()), headers, HttpStatus.CREATED);

    } catch (Exception e) {
      System.out.println("eeeeeeeey get photo " + e);
      return null;
    }
  }
  @Override
  protected UrnObject get(String objectKey) {
    Query query = new Query(Criteria.where("filename").is(objectKey));
    GridFSDBFile file = this.template.findOne(query);
    if (file == null) {
      return null;
    }

    // check for expiry
    Long expiry = (Long) file.get(FIELD_EXPIRY);
    if (expiry != null) {
      if (expiry.longValue() < System.currentTimeMillis()) {
        this.remove(objectKey);
        return null;
      }
    }

    byte[] bytes = null;
    try {
      ByteArrayOutputStream stream = new ByteArrayOutputStream();
      file.writeTo(stream);
      stream.close();
      bytes = stream.toByteArray();
    } catch (IOException e) {
      LOGGER.error("Error reading file from mongo database for key: " + objectKey, e);
      return null;
    }

    UrnObject urnObject = new UrnObject(objectKey, bytes);

    // now the metadata
    if (expiry != null) {
      urnObject.expiry = expiry.longValue();
    }

    urnObject.name = (String) file.get(FIELD_NAME);
    urnObject.mime = file.getContentType();
    urnObject.stored = file.getUploadDate().getTime();

    // return the object
    return urnObject;
  }
  private Timestamp<?> processOplogEntry(final DBObject entry, final Timestamp<?> startTimestamp)
      throws InterruptedException {
    // To support transactions, TokuMX wraps one or more operations in a single oplog entry, in a
    // list.
    // As long as clients are not transaction-aware, we can pretty safely assume there will only be
    // one operation in the list.
    // Supporting genuine multi-operation transactions will require a bit more logic here.
    flattenOps(entry);

    if (!isValidOplogEntry(entry, startTimestamp)) {
      return startTimestamp;
    }
    Operation operation = Operation.fromString(entry.get(MongoDBRiver.OPLOG_OPERATION).toString());
    String namespace = entry.get(MongoDBRiver.OPLOG_NAMESPACE).toString();
    String collection = null;
    Timestamp<?> oplogTimestamp = Timestamp.on(entry);
    DBObject object = (DBObject) entry.get(MongoDBRiver.OPLOG_OBJECT);

    if (definition.isImportAllCollections()) {
      if (namespace.startsWith(definition.getMongoDb()) && !namespace.equals(cmdOplogNamespace)) {
        collection = getCollectionFromNamespace(namespace);
      }
    } else {
      collection = definition.getMongoCollection();
    }

    if (namespace.equals(cmdOplogNamespace)) {
      if (object.containsField(MongoDBRiver.OPLOG_DROP_COMMAND_OPERATION)) {
        operation = Operation.DROP_COLLECTION;
        if (definition.isImportAllCollections()) {
          collection = object.get(MongoDBRiver.OPLOG_DROP_COMMAND_OPERATION).toString();
          if (collection.startsWith("tmp.mr.")) {
            return startTimestamp;
          }
        }
      }
      if (object.containsField(MongoDBRiver.OPLOG_DROP_DATABASE_COMMAND_OPERATION)) {
        operation = Operation.DROP_DATABASE;
      }
    }

    logger.trace("namespace: {} - operation: {}", namespace, operation);
    if (namespace.equals(MongoDBRiver.OPLOG_ADMIN_COMMAND)) {
      if (operation == Operation.COMMAND) {
        processAdminCommandOplogEntry(entry, startTimestamp);
        return startTimestamp;
      }
    }

    if (logger.isTraceEnabled()) {
      logger.trace("MongoDB object deserialized: {}", object.toString());
      logger.trace("collection: {}", collection);
      logger.trace("oplog entry - namespace [{}], operation [{}]", namespace, operation);
      logger.trace("oplog processing item {}", entry);
    }

    String objectId = getObjectIdFromOplogEntry(entry);
    if (operation == Operation.DELETE) {
      // Include only _id in data, as vanilla MongoDB does, so transformation scripts won't be
      // broken by Toku
      if (object.containsField(MongoDBRiver.MONGODB_ID_FIELD)) {
        if (object.keySet().size() > 1) {
          entry.put(
              MongoDBRiver.OPLOG_OBJECT,
              object = new BasicDBObject(MongoDBRiver.MONGODB_ID_FIELD, objectId));
        }
      } else {
        throw new NullPointerException(MongoDBRiver.MONGODB_ID_FIELD);
      }
    }

    if (definition.isMongoGridFS()
        && namespace.endsWith(MongoDBRiver.GRIDFS_FILES_SUFFIX)
        && (operation == Operation.INSERT || operation == Operation.UPDATE)) {
      if (objectId == null) {
        throw new NullPointerException(MongoDBRiver.MONGODB_ID_FIELD);
      }
      GridFS grid = new GridFS(mongo.getDB(definition.getMongoDb()), collection);
      GridFSDBFile file = grid.findOne(new ObjectId(objectId));
      if (file != null) {
        logger.info("Caught file: {} - {}", file.getId(), file.getFilename());
        object = file;
      } else {
        logger.warn("Cannot find file from id: {}", objectId);
      }
    }

    if (object instanceof GridFSDBFile) {
      if (objectId == null) {
        throw new NullPointerException(MongoDBRiver.MONGODB_ID_FIELD);
      }
      if (logger.isTraceEnabled()) {
        logger.trace("Add attachment: {}", objectId);
      }
      addToStream(operation, oplogTimestamp, applyFieldFilter(object), collection);
    } else {
      if (operation == Operation.UPDATE) {
        DBObject update = (DBObject) entry.get(MongoDBRiver.OPLOG_UPDATE);
        logger.debug("Updated item: {}", update);
        addQueryToStream(operation, oplogTimestamp, update, collection);
      } else {
        if (operation == Operation.INSERT) {
          addInsertToStream(oplogTimestamp, applyFieldFilter(object), collection);
        } else {
          addToStream(operation, oplogTimestamp, applyFieldFilter(object), collection);
        }
      }
    }
    return oplogTimestamp;
  }
  @Test
  public void testImportAttachment() throws Exception {
    logger.debug("*** testImportAttachment ***");
    try {
      // createDatabase();
      byte[] content = copyToBytesFromClasspath(TEST_ATTACHMENT_HTML);
      logger.debug("Content in bytes: {}", content.length);
      GridFS gridFS = new GridFS(mongoDB);
      GridFSInputFile in = gridFS.createFile(content);
      in.setFilename("test-attachment.html");
      in.setContentType("text/html");
      in.save();
      in.validate();

      String id = in.getId().toString();
      logger.debug("GridFS in: {}", in);
      logger.debug("Document created with id: {}", id);

      GridFSDBFile out = gridFS.findOne(in.getFilename());
      logger.debug("GridFS from findOne: {}", out);
      out = gridFS.findOne(new ObjectId(id));
      logger.debug("GridFS from findOne: {}", out);
      Assert.assertEquals(out.getId(), in.getId());

      Thread.sleep(wait);
      refreshIndex();

      CountResponse countResponse = getNode().client().count(countRequest(getIndex())).actionGet();
      logger.debug("Index total count: {}", countResponse.getCount());
      assertThat(countResponse.getCount(), equalTo(1l));

      GetResponse getResponse = getNode().client().get(getRequest(getIndex()).id(id)).get();
      logger.debug("Get request for id {}: {}", id, getResponse.isExists());
      assertThat(getResponse.isExists(), equalTo(true));
      //            countResponse =
      // getNode().client().count(countRequest(getIndex()).query(fieldQuery("_id",
      // id))).actionGet();
      //            logger.debug("Index count for id {}: {}", id, countResponse.getCount());
      //            assertThat(countResponse.getCount(), equalTo(1l));

      SearchResponse response =
          getNode()
              .client()
              .prepareSearch(getIndex())
              .setQuery(QueryBuilders.queryString("Aliquam"))
              .execute()
              .actionGet();
      logger.debug("SearchResponse {}", response.toString());
      long totalHits = response.getHits().getTotalHits();
      logger.debug("TotalHits: {}", totalHits);
      assertThat(totalHits, equalTo(1l));

      gridFS.remove(new ObjectId(id));

      Thread.sleep(wait);
      refreshIndex();

      getResponse = getNode().client().get(getRequest(getIndex()).id(id)).get();
      logger.debug("Get request for id {}: {}", id, getResponse.isExists());
      assertThat(getResponse.isExists(), equalTo(false));
      //            countResponse =
      // getNode().client().count(countRequest(getIndex()).query(fieldQuery("_id",
      // id))).actionGet();
      //            logger.debug("Count after delete request: {}", countResponse.getCount());
      //            assertThat(countResponse.getCount(), equalTo(0L));
    } catch (Throwable t) {
      logger.error("testImportAttachment failed.", t);
      Assert.fail("testImportAttachment failed", t);
    } finally {
      // cleanUp();
    }
  }
 @Override
 public void touch() {
   GridFSDBFile gridFSDBFile = getGridFSDBFile(getPath(), true);
   gridFSDBFile.put("uploadDate", new Date());
   gridFSDBFile.save();
 }