Пример #1
0
  /**
   * Get a map from author names to their ids in the database. The authors that are not in the
   * database are added to it.
   *
   * @param conn the connection to the database
   * @param history the history to get the author names from
   * @param reposId the id of the repository
   * @return a map from author names to author ids
   */
  private Map<String, Integer> getAuthors(ConnectionResource conn, History history, int reposId)
      throws SQLException {
    HashMap<String, Integer> map = new HashMap<String, Integer>();
    PreparedStatement ps = conn.getStatement(GET_AUTHORS);
    ps.setInt(1, reposId);
    try (ResultSet rs = ps.executeQuery()) {
      while (rs.next()) {
        map.put(rs.getString(1), rs.getInt(2));
      }
    }

    PreparedStatement insert = conn.getStatement(ADD_AUTHOR);
    insert.setInt(1, reposId);
    for (HistoryEntry entry : history.getHistoryEntries()) {
      String author = entry.getAuthor();
      if (!map.containsKey(author)) {
        int id = nextAuthorId.getAndIncrement();
        insert.setString(2, author);
        insert.setInt(3, id);
        insert.executeUpdate();
        map.put(author, id);
        conn.commit();
      }
    }

    return map;
  }
Пример #2
0
 /**
  * Make sure Derby's index cardinality statistics are up to date. Otherwise, the optimizer may
  * choose a bad execution strategy for some queries. This method should be called if the size of
  * the tables has changed significantly.
  *
  * <p>This is a workaround for the problems described in <a
  * href="https://issues.apache.org/jira/browse/DERBY-269">DERBY-269</a> and <a
  * href="https://issues.apache.org/jira/browse/DERBY-3788">DERBY-3788</a>. When automatic update
  * of index cardinality statistics has been implemented in Derby, the workaround may be removed.
  *
  * <p>Without this workaround, poor performance has been observed in {@code get()} due to bad
  * choices made by the optimizer.
  *
  * <p>Note that this method uses a system procedure introduced in Derby 10.5. If this procedure
  * does not exist, this method is a no-op.
  */
 private void updateIndexCardinalityStatistics(ConnectionResource conn) throws SQLException {
   DatabaseMetaData dmd = conn.getMetaData();
   if (procedureExists(dmd, "SYSCS_UTIL", "SYSCS_UPDATE_STATISTICS")) {
     try (PreparedStatement ps =
         conn.prepareStatement("CALL SYSCS_UTIL.SYSCS_UPDATE_STATISTICS(?, ?, NULL)")) {
       ps.setString(1, SCHEMA);
       for (String table : TABLES) {
         ps.setString(2, table);
         retry:
         for (int i = 0; ; i++) {
           try {
             ps.execute();
             // Successfully executed statement. Break out of
             // retry loop.
             break retry;
           } catch (SQLException sqle) {
             handleSQLException(sqle, i);
             conn.rollback();
           }
         }
         conn.commit();
       }
     }
   }
 }
Пример #3
0
 /**
  * If this is a Derby database, force a checkpoint so that the disk space occupied by the
  * transaction log is freed as early as possible.
  */
 private void checkpointDatabase(ConnectionResource conn) throws SQLException {
   DatabaseMetaData dmd = conn.getMetaData();
   if (procedureExists(dmd, "SYSCS_UTIL", "SYSCS_CHECKPOINT_DATABASE")) {
     try (Statement s = conn.createStatement()) {
       s.execute("CALL SYSCS_UTIL.SYSCS_CHECKPOINT_DATABASE()");
     }
     conn.commit();
   }
 }
Пример #4
0
 /** Helper for {@link #clear(Repository)}. */
 private void clearHistoryForRepository(Repository repository) throws SQLException {
   final ConnectionResource conn = connectionManager.getConnectionResource();
   try {
     try (PreparedStatement ps = conn.prepareStatement(getQuery("clearRepository"))) {
       ps.setInt(1, getRepositoryId(conn, repository));
       ps.execute();
       conn.commit();
     }
   } finally {
     connectionManager.releaseConnection(conn);
   }
 }
Пример #5
0
  /**
   * Build maps from directory names and file names to their respective identifiers in the database.
   * The directories and files that are not already in the database, are added to it.
   *
   * @param conn the connection to the database
   * @param history the history to get the file and directory names from
   * @param reposId the id of the repository
   * @param dirMap a map which will be filled with directory names and ids
   * @param fileMap a map which will be filled with file names and ids
   */
  private void getFilesAndDirectories(
      ConnectionResource conn,
      History history,
      int reposId,
      Map<String, Integer> dirMap,
      Map<String, Integer> fileMap)
      throws SQLException {

    populateFileOrDirMap(conn.getStatement(GET_DIRS), reposId, dirMap);
    populateFileOrDirMap(conn.getStatement(GET_FILES), reposId, fileMap);

    int insertCount = 0;

    PreparedStatement insDir = conn.getStatement(INSERT_DIR);
    PreparedStatement insFile = conn.getStatement(INSERT_FILE);
    for (HistoryEntry entry : history.getHistoryEntries()) {
      for (String file : entry.getFiles()) {
        String fullPath = toUnixPath(file);
        // Add the file to the database and to the map if it isn't
        // there already. Assumption: If the file is in the database,
        // all its parent directories are also there.
        if (!fileMap.containsKey(fullPath)) {
          // Get the dir id for this file, potentially adding the
          // parent directories to the db and to dirMap.
          int dir = addAllDirs(insDir, reposId, fullPath, dirMap);
          int fileId = nextFileId.getAndIncrement();
          insFile.setInt(1, dir);
          insFile.setString(2, getBaseName(fullPath));
          insFile.setInt(3, fileId);
          insFile.executeUpdate();
          fileMap.put(fullPath, fileId);

          // Commit every now and then to allow the database to free
          // resources (like locks and transaction log), but not too
          // frequently, since that may kill the performance. It is
          // OK not to commit for every file added, since the worst
          // thing that could happen is that we need to re-insert
          // the files added since the last commit in case of a crash.
          insertCount++;
          if (insertCount % 30 == 0) {
            conn.commit();
          }
        }
      }
    }
  }
Пример #6
0
 @Override
 public void initialize() throws HistoryException {
   try {
     connectionManager = new ConnectionManager(jdbcDriverClass, jdbcConnectionURL);
     for (int i = 0; ; i++) {
       final ConnectionResource conn = connectionManager.getConnectionResource();
       try {
         try (Statement stmt = conn.createStatement()) {
           initDB(stmt);
         }
         conn.commit();
         // Success! Break out of the loop.
         return;
       } catch (SQLException sqle) {
         handleSQLException(sqle, i);
       } finally {
         connectionManager.releaseConnection(conn);
       }
     }
   } catch (Exception e) {
     throw new HistoryException(e);
   }
 }
Пример #7
0
  private void storeHistory(ConnectionResource conn, History history, Repository repository)
      throws SQLException {

    Integer reposId = null;
    Map<String, Integer> authors = null;
    Map<String, Integer> files = null;
    Map<String, Integer> directories = null;
    PreparedStatement addChangeset = null;
    PreparedStatement addDirchange = null;
    PreparedStatement addFilechange = null;
    PreparedStatement addFilemove = null;
    RuntimeEnvironment env = RuntimeEnvironment.getInstance();

    // return immediately when there is nothing to do
    List<HistoryEntry> entries = history.getHistoryEntries();
    if (entries.isEmpty()) {
      return;
    }

    for (int i = 0; ; i++) {
      try {
        if (reposId == null) {
          reposId = getRepositoryId(conn, repository);
          conn.commit();
        }

        if (authors == null) {
          authors = getAuthors(conn, history, reposId);
          conn.commit();
        }

        if (directories == null || files == null) {
          Map<String, Integer> dirs = new HashMap<String, Integer>();
          Map<String, Integer> fls = new HashMap<String, Integer>();
          getFilesAndDirectories(conn, history, reposId, dirs, fls);
          conn.commit();
          directories = dirs;
          files = fls;
        }

        if (addChangeset == null) {
          addChangeset = conn.getStatement(ADD_CHANGESET);
        }

        if (addDirchange == null) {
          addDirchange = conn.getStatement(ADD_DIRCHANGE);
        }

        if (addFilechange == null) {
          addFilechange = conn.getStatement(ADD_FILECHANGE);
        }

        if (addFilemove == null) {
          addFilemove = conn.getStatement(ADD_FILEMOVE);
        }

        // Success! Break out of the loop.
        break;

      } catch (SQLException sqle) {
        handleSQLException(sqle, i);
        conn.rollback();
      }
    }

    addChangeset.setInt(1, reposId);

    // getHistoryEntries() returns the entries in reverse chronological
    // order, but we want to insert them in chronological order so that
    // their auto-generated identity column can be used as a chronological
    // ordering column. Otherwise, incremental updates will make the
    // identity column unusable for chronological ordering. So therefore
    // we walk the list backwards.
    for (ListIterator<HistoryEntry> it = entries.listIterator(entries.size()); it.hasPrevious(); ) {
      HistoryEntry entry = it.previous();
      retry:
      for (int i = 0; ; i++) {
        try {
          addChangeset.setString(2, entry.getRevision());
          addChangeset.setInt(3, authors.get(entry.getAuthor()));
          addChangeset.setTimestamp(4, new Timestamp(entry.getDate().getTime()));
          String msg = entry.getMessage();
          // Truncate the message if it can't fit in a VARCHAR
          // (bug #11663).
          if (msg.length() > MAX_MESSAGE_LENGTH) {
            msg = truncate(msg, MAX_MESSAGE_LENGTH);
          }
          addChangeset.setString(5, msg);
          int changesetId = nextChangesetId.getAndIncrement();
          addChangeset.setInt(6, changesetId);
          addChangeset.executeUpdate();

          // Add one row for each file in FILECHANGES, and one row
          // for each path element of the directories in DIRCHANGES.
          Set<String> addedDirs = new HashSet<String>();
          addDirchange.setInt(1, changesetId);
          addFilechange.setInt(1, changesetId);
          for (String file : entry.getFiles()) {
            // ignore ignored files
            String repodir = "";
            try {
              repodir = env.getPathRelativeToSourceRoot(new File(repository.getDirectoryName()), 0);
            } catch (IOException ex) {
              Logger.getLogger(JDBCHistoryCache.class.getName()).log(Level.SEVERE, null, ex);
            }

            String fullPath = toUnixPath(file);
            if (!history.isIgnored(file.substring(repodir.length() + 1))) {
              int fileId = files.get(fullPath);
              addFilechange.setInt(2, fileId);
              addFilechange.executeUpdate();
            }
            String[] pathElts = splitPath(fullPath);
            for (int j = 0; j < pathElts.length; j++) {
              String dir = unsplitPath(pathElts, j);
              // Only add to DIRCHANGES if we haven't already
              // added this dir/changeset combination.
              if (!addedDirs.contains(dir)) {
                addDirchange.setInt(2, directories.get(dir));
                addDirchange.executeUpdate();
                addedDirs.add(dir);
              }
            }
          }

          conn.commit();

          // Successfully added the entry. Break out of retry loop.
          break retry;

        } catch (SQLException sqle) {
          handleSQLException(sqle, i);
          conn.rollback();
        }
      }
    }

    /*
     * Special handling for certain files - this is mainly for files which
     * have been renamed in Mercurial repository.
     * This ensures that their complete history (follow) will be saved.
     */
    for (String filename : history.getIgnoredFiles()) {
      String file_path = repository.getDirectoryName() + File.separatorChar + filename;
      File file = new File(file_path);
      String repo_path = file_path.substring(env.getSourceRootPath().length());
      History hist;
      try {
        hist = repository.getHistory(file);
      } catch (HistoryException ex) {
        Logger.getLogger(JDBCHistoryCache.class.getName()).log(Level.SEVERE, null, ex);
        continue;
      }

      int fileId = files.get(repo_path);
      for (HistoryEntry entry : hist.getHistoryEntries()) {
        retry:
        for (int i = 0; ; i++) {
          try {
            int changesetId = getIdForRevision(entry.getRevision());

            /*
             * If the file exists in the changeset, store it in
             * the table tracking moves of the file when it had
             * one of its precedent names so it can be found
             * when performing historyget on directory.
             */
            if (entry.getFiles().contains(repo_path)) {
              addFilechange.setInt(1, changesetId);
              addFilechange.setInt(2, fileId);
              addFilechange.executeUpdate();
            } else {
              addFilemove.setInt(1, changesetId);
              addFilemove.setInt(2, fileId);
              addFilemove.executeUpdate();
            }

            conn.commit();
            break retry;
          } catch (SQLException sqle) {
            handleSQLException(sqle, i);
            conn.rollback();
          }
        }
      }
    }
  }