private FileContent getFileContentWithChunkChecksums(FileChecksum fileChecksum) {
    try (PreparedStatement preparedStatement =
        getStatement("filecontent.select.all.getFileContentByChecksumWithChunkChecksums.sql")) {
      preparedStatement.setString(1, fileChecksum.toString());

      try (ResultSet resultSet = preparedStatement.executeQuery()) {
        FileContent fileContent = null;

        while (resultSet.next()) {
          if (fileContent == null) {
            fileContent = new FileContent();

            fileContent.setChecksum(
                FileChecksum.parseFileChecksum(resultSet.getString("checksum")));
            fileContent.setSize(resultSet.getLong("size"));
          }

          // Add chunk references
          ChunkChecksum chunkChecksum =
              ChunkChecksum.parseChunkChecksum(resultSet.getString("chunk_checksum"));
          fileContent.addChunk(chunkChecksum);
        }

        return fileContent;
      }
    } catch (SQLException e) {
      throw new RuntimeException(e);
    }
  }
  private Map<FileChecksum, FileContent> createFileContents(ResultSet resultSet)
      throws SQLException {
    Map<FileChecksum, FileContent> fileContents = new HashMap<FileChecksum, FileContent>();
    FileChecksum currentFileChecksum = null;

    while (resultSet.next()) {
      FileChecksum fileChecksum = FileChecksum.parseFileChecksum(resultSet.getString("checksum"));
      FileContent fileContent = null;

      if (currentFileChecksum != null && currentFileChecksum.equals(fileChecksum)) {
        fileContent = fileContents.get(fileChecksum);
      } else {
        fileContent = new FileContent();

        fileContent.setChecksum(fileChecksum);
        fileContent.setSize(resultSet.getLong("size"));
      }

      ChunkChecksum chunkChecksum =
          ChunkChecksum.parseChunkChecksum(resultSet.getString("chunk_checksum"));
      fileContent.addChunk(chunkChecksum);

      fileContents.put(fileChecksum, fileContent);
      currentFileChecksum = fileChecksum;
    }

    return fileContents;
  }
Exemple #3
0
  private void handleGetFileRequest(GetFileRequest fileRequest) {
    try {
      FileHistoryId fileHistoryId = FileHistoryId.parseFileId(fileRequest.getFileHistoryId());
      long version = fileRequest.getVersion();

      FileVersion fileVersion = localDatabase.getFileVersion(fileHistoryId, version);
      FileContent fileContent = localDatabase.getFileContent(fileVersion.getChecksum(), true);
      Map<ChunkChecksum, MultiChunkId> multiChunks =
          localDatabase.getMultiChunkIdsByChecksums(fileContent.getChunks());

      TransferManager transferManager =
          config.getTransferPlugin().createTransferManager(config.getConnection());
      Downloader downloader = new Downloader(config, transferManager);
      Assembler assembler = new Assembler(config, localDatabase);

      downloader.downloadAndDecryptMultiChunks(new HashSet<MultiChunkId>(multiChunks.values()));

      File tempFile = assembler.assembleToCache(fileVersion);
      String tempFileToken = StringUtil.toHex(ObjectId.secureRandomBytes(40));

      GetFileResponse fileResponse =
          new GetFileResponse(fileRequest.getId(), fileRequest.getRoot(), tempFileToken);
      GetFileResponseInternal fileResponseInternal =
          new GetFileResponseInternal(fileResponse, tempFile);

      eventBus.post(fileResponseInternal);
    } catch (Exception e) {
      logger.log(Level.WARNING, "Cannot reassemble file.", e);
      eventBus.post(new BadRequestResponse(fileRequest.getId(), "Cannot reassemble file."));
    }
  }
Exemple #4
0
    private void addFileVersion(FileProperties fileProperties) {
      if (fileProperties.getChecksum() != null) {
        logger.log(
            Level.FINER,
            "- /File: {0} (checksum {1})",
            new Object[] {fileProperties.getRelativePath(), fileProperties.getChecksum()});
      } else {
        logger.log(
            Level.FINER,
            "- /File: {0} (directory/symlink/0-byte-file)",
            fileProperties.getRelativePath());
      }

      // 1. Determine if file already exists in database
      PartialFileHistory lastFileHistory = guessLastFileHistory(fileProperties);
      FileVersion lastFileVersion =
          (lastFileHistory != null) ? lastFileHistory.getLastVersion() : null;

      // 2. Create new file history/version
      PartialFileHistory fileHistory = createNewFileHistory(lastFileHistory);
      FileVersion fileVersion = createNewFileVersion(lastFileVersion, fileProperties);

      // 3. Compare new and last version
      FileProperties lastFileVersionProperties =
          fileVersionComparator.captureFileProperties(lastFileVersion);
      FileVersionComparison lastToNewFileVersionComparison =
          fileVersionComparator.compare(fileProperties, lastFileVersionProperties, true);

      boolean newVersionDiffersFromToLastVersion = !lastToNewFileVersionComparison.equals();

      if (newVersionDiffersFromToLastVersion) {
        fileHistory.addFileVersion(fileVersion);
        newDatabaseVersion.addFileHistory(fileHistory);

        logger.log(Level.INFO, "   * Added file version:    " + fileVersion);
        logger.log(Level.INFO, "     based on file version: " + lastFileVersion);
      } else {
        logger.log(Level.INFO, "   * NOT ADDING file version: " + fileVersion);
        logger.log(Level.INFO, "         b/c IDENTICAL prev.: " + lastFileVersion);
      }

      // 4. Add file content (if not a directory)
      if (fileProperties.getChecksum() != null && fileContent != null) {
        fileContent.setSize(fileProperties.getSize());
        fileContent.setChecksum(fileProperties.getChecksum());

        // Check if content already exists, throw gathered content away if it does!
        FileContent existingContent =
            localDatabase.getFileContent(fileProperties.getChecksum(), false);

        if (existingContent == null) {
          newDatabaseVersion.addFileContent(fileContent);
        } else {
          // Uses existing content (already in database); ref. by checksum
        }
      }
    }
  /**
   * Finds the multichunks that need to be downloaded for the given file version -- using the local
   * database and given winners database. Returns a set of multichunk identifiers.
   */
  private Collection<MultiChunkId> determineMultiChunksToDownload(
      FileVersion fileVersion, MemoryDatabase winnersDatabase) {
    Set<MultiChunkId> multiChunksToDownload = new HashSet<MultiChunkId>();

    // First: Check if we know this file locally!
    List<MultiChunkId> multiChunkIds = localDatabase.getMultiChunkIds(fileVersion.getChecksum());

    if (multiChunkIds.size() > 0) {
      multiChunksToDownload.addAll(multiChunkIds);
    } else {
      // Second: We don't know it locally; must be from the winners database
      FileContent winningFileContent = winnersDatabase.getContent(fileVersion.getChecksum());
      boolean winningFileHasContent = winningFileContent != null;

      if (winningFileHasContent) { // File can be empty!
        List<ChunkChecksum> fileChunks = winningFileContent.getChunks();

        // TODO [medium] Instead of just looking for multichunks to download here, we should look
        // for chunks in local files as well
        // and return the chunk positions in the local files ChunkPosition (chunk123 at file12,
        // offset 200, size 250)

        Map<ChunkChecksum, MultiChunkId> checksumsWithMultiChunkIds =
            localDatabase.getMultiChunkIdsByChecksums(fileChunks);

        for (ChunkChecksum chunkChecksum : fileChunks) {
          MultiChunkId multiChunkIdForChunk = checksumsWithMultiChunkIds.get(chunkChecksum);
          if (multiChunkIdForChunk == null) {
            multiChunkIdForChunk = winnersDatabase.getMultiChunkIdForChunk(chunkChecksum);

            if (multiChunkIdForChunk == null) {
              throw new RuntimeException("Cannot find multichunk for chunk " + chunkChecksum);
            }
          }

          if (!multiChunksToDownload.contains(multiChunkIdForChunk)) {
            logger.log(
                Level.INFO,
                "  + Adding multichunk " + multiChunkIdForChunk + " to download list ...");
            multiChunksToDownload.add(multiChunkIdForChunk);
          }
        }
      }
    }

    return multiChunksToDownload;
  }
Exemple #6
0
 @Override
 public void onFileAddChunk(File file, Chunk chunk) {
   logger.log(
       Level.FINER,
       "- Chunk > FileContent: {0} > {1}",
       new Object[] {StringUtil.toHex(chunk.getChecksum()), file});
   fileContent.addChunk(new ChunkChecksum(chunk.getChecksum()));
 }
  /**
   * Writes a list of {@link FileContent}s to the database using <tt>INSERT</tt>s and the given
   * connection. It fills two tables, the <i>filecontent</i> table ({@link FileContent}) and the
   * <i>filecontent_chunk</i> table ({@link ChunkChecksum}).
   *
   * <p>To do the latter (write chunk references), this method calls {@link
   * #writeFileContentChunkRefs(Connection, FileContent) writeFileContentChunkRefs()} for every
   * {@link FileContent}.
   *
   * <p><b>Note:</b> This method executes, but does not commit the queries.
   *
   * @param connection The connection used to execute the statements
   * @param databaseVersionId
   * @param fileContents List of {@link FileContent}s to be inserted in the database
   * @throws SQLException If the SQL statement fails
   */
  public void writeFileContents(
      Connection connection, long databaseVersionId, Collection<FileContent> fileContents)
      throws SQLException {
    for (FileContent fileContent : fileContents) {
      PreparedStatement preparedStatement =
          getStatement(connection, "filecontent.insert.all.writeFileContents.sql");

      preparedStatement.setString(1, fileContent.getChecksum().toString());
      preparedStatement.setLong(2, databaseVersionId);
      preparedStatement.setLong(3, fileContent.getSize());

      preparedStatement.executeUpdate();
      preparedStatement.close();

      // Write chunk references
      writeFileContentChunkRefs(connection, fileContent);
    }
  }
  private void writeFileContentChunkRefs(Connection connection, FileContent fileContent)
      throws SQLException {
    PreparedStatement preparedStatement =
        getStatement(connection, "filecontent.insert.all.writeFileContentChunkRefs.sql");
    int order = 0;

    for (ChunkChecksum chunkChecksum : fileContent.getChunks()) {

      preparedStatement.setString(1, fileContent.getChecksum().toString());
      preparedStatement.setString(2, chunkChecksum.toString());
      preparedStatement.setInt(3, order);

      preparedStatement.addBatch();

      order++;
    }

    preparedStatement.executeBatch();
    preparedStatement.close();
  }
  private FileContent getFileContentWithoutChunkChecksums(FileChecksum fileChecksum) {
    try (PreparedStatement preparedStatement =
        getStatement("filecontent.select.all.getFileContentByChecksumWithoutChunkChecksums.sql")) {
      preparedStatement.setString(1, fileChecksum.toString());

      try (ResultSet resultSet = preparedStatement.executeQuery()) {
        if (resultSet.next()) {
          FileContent fileContent = new FileContent();

          fileContent.setChecksum(FileChecksum.parseFileChecksum(resultSet.getString("checksum")));
          fileContent.setSize(resultSet.getLong("size"));

          return fileContent;
        }
      }

      return null;
    } catch (SQLException e) {
      throw new RuntimeException(e);
    }
  }
Exemple #10
0
  private Collection<MultiChunkEntry> determineMultiChunksToDownload(
      FileVersion fileVersion, Database localDatabase, Database winnersDatabase) {
    Set<MultiChunkEntry> multiChunksToDownload = new HashSet<MultiChunkEntry>();

    FileContent winningFileContent = localDatabase.getContent(fileVersion.getChecksum());

    if (winningFileContent == null) {
      winningFileContent = winnersDatabase.getContent(fileVersion.getChecksum());
    }

    boolean winningFileHasContent = winningFileContent != null;

    if (winningFileHasContent) { // File can be empty!
      Collection<ChunkEntryId> fileChunks =
          winningFileContent
              .getChunks(); // TODO [medium] Instead of just looking for multichunks to download
      // here, we should look for chunks in local files as well and return the
      // chunk positions in the local files ChunkPosition (chunk123 at file12,
      // offset 200, size 250)

      for (ChunkEntryId chunkChecksum : fileChunks) {
        MultiChunkEntry multiChunkForChunk = localDatabase.getMultiChunkForChunk(chunkChecksum);

        if (multiChunkForChunk == null) {
          multiChunkForChunk = winnersDatabase.getMultiChunkForChunk(chunkChecksum);
        }

        if (!multiChunksToDownload.contains(multiChunkForChunk)) {
          logger.log(
              Level.INFO,
              "  + Adding multichunk "
                  + StringUtil.toHex(multiChunkForChunk.getId())
                  + " to download list ...");
          multiChunksToDownload.add(multiChunkForChunk);
        }
      }
    }

    return multiChunksToDownload;
  }