/** * Get a map from author names to their ids in the database. The authors that are not in the * database are added to it. * * @param conn the connection to the database * @param history the history to get the author names from * @param reposId the id of the repository * @return a map from author names to author ids */ private Map<String, Integer> getAuthors(ConnectionResource conn, History history, int reposId) throws SQLException { HashMap<String, Integer> map = new HashMap<String, Integer>(); PreparedStatement ps = conn.getStatement(GET_AUTHORS); ps.setInt(1, reposId); try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { map.put(rs.getString(1), rs.getInt(2)); } } PreparedStatement insert = conn.getStatement(ADD_AUTHOR); insert.setInt(1, reposId); for (HistoryEntry entry : history.getHistoryEntries()) { String author = entry.getAuthor(); if (!map.containsKey(author)) { int id = nextAuthorId.getAndIncrement(); insert.setString(2, author); insert.setInt(3, id); insert.executeUpdate(); map.put(author, id); conn.commit(); } } return map; }
/** * Make sure Derby's index cardinality statistics are up to date. Otherwise, the optimizer may * choose a bad execution strategy for some queries. This method should be called if the size of * the tables has changed significantly. * * <p>This is a workaround for the problems described in <a * href="https://issues.apache.org/jira/browse/DERBY-269">DERBY-269</a> and <a * href="https://issues.apache.org/jira/browse/DERBY-3788">DERBY-3788</a>. When automatic update * of index cardinality statistics has been implemented in Derby, the workaround may be removed. * * <p>Without this workaround, poor performance has been observed in {@code get()} due to bad * choices made by the optimizer. * * <p>Note that this method uses a system procedure introduced in Derby 10.5. If this procedure * does not exist, this method is a no-op. */ private void updateIndexCardinalityStatistics(ConnectionResource conn) throws SQLException { DatabaseMetaData dmd = conn.getMetaData(); if (procedureExists(dmd, "SYSCS_UTIL", "SYSCS_UPDATE_STATISTICS")) { try (PreparedStatement ps = conn.prepareStatement("CALL SYSCS_UTIL.SYSCS_UPDATE_STATISTICS(?, ?, NULL)")) { ps.setString(1, SCHEMA); for (String table : TABLES) { ps.setString(2, table); retry: for (int i = 0; ; i++) { try { ps.execute(); // Successfully executed statement. Break out of // retry loop. break retry; } catch (SQLException sqle) { handleSQLException(sqle, i); conn.rollback(); } } conn.commit(); } } } }
/** Helper for {@link #get(File, Repository)}. */ private History getHistory(File file, Repository repository, boolean withFiles) throws HistoryException, SQLException { final String filePath = getSourceRootRelativePath(file); final String reposPath = toUnixPath(repository.getDirectoryName()); final ArrayList<HistoryEntry> entries = new ArrayList<HistoryEntry>(); final ConnectionResource conn = connectionManager.getConnectionResource(); try { final PreparedStatement ps; if (file.isDirectory()) { // Fetch history for all files under this directory. ps = conn.getStatement(GET_DIR_HISTORY); ps.setString(2, filePath); } else { // Fetch history for a single file only. ps = conn.getStatement(GET_FILE_HISTORY); ps.setString(2, getParentPath(filePath)); ps.setString(3, getBaseName(filePath)); } ps.setString(1, reposPath); final PreparedStatement filePS = withFiles ? conn.getStatement(GET_CS_FILES) : null; try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { // Get the information about a changeset String revision = rs.getString(1); String author = rs.getString(2); Timestamp time = rs.getTimestamp(3); String message = rs.getString(4); HistoryEntry entry = new HistoryEntry(revision, time, author, null, message, true); entries.add(entry); // Fill the list of files touched by the changeset, if // requested. if (withFiles) { int changeset = rs.getInt(5); filePS.setInt(1, changeset); try (ResultSet fileRS = filePS.executeQuery()) { while (fileRS.next()) { entry.addFile(fileRS.getString(1)); } } } } } } finally { connectionManager.releaseConnection(conn); } History history = new History(); history.setHistoryEntries(entries); RuntimeEnvironment env = RuntimeEnvironment.getInstance(); if (env.isTagsEnabled() && repository.hasFileBasedTags()) { repository.assignTagsInHistory(history); } return history; }
/** * If this is a Derby database, force a checkpoint so that the disk space occupied by the * transaction log is freed as early as possible. */ private void checkpointDatabase(ConnectionResource conn) throws SQLException { DatabaseMetaData dmd = conn.getMetaData(); if (procedureExists(dmd, "SYSCS_UTIL", "SYSCS_CHECKPOINT_DATABASE")) { try (Statement s = conn.createStatement()) { s.execute("CALL SYSCS_UTIL.SYSCS_CHECKPOINT_DATABASE()"); } conn.commit(); } }
/** * Get ID value for revision string by querying the DB. * * @param revision * @return ID */ private int getIdForRevision(String revision) throws SQLException { final ConnectionResource conn = connectionManager.getConnectionResource(); try { PreparedStatement ps = conn.getStatement(GET_REV_ID); ps.setString(1, revision); ResultSet rs = ps.executeQuery(); return rs.next() ? Integer.valueOf(rs.getString(1)).intValue() : -1; } finally { connectionManager.releaseConnection(conn); } }
/** Helper for {@link #clear(Repository)}. */ private void clearHistoryForRepository(Repository repository) throws SQLException { final ConnectionResource conn = connectionManager.getConnectionResource(); try { try (PreparedStatement ps = conn.prepareStatement(getQuery("clearRepository"))) { ps.setInt(1, getRepositoryId(conn, repository)); ps.execute(); conn.commit(); } } finally { connectionManager.releaseConnection(conn); } }
/** Helper for {@link #getLatestCachedRevision(Repository)}. */ private String getLatestRevisionForRepository(Repository repository) throws SQLException { final ConnectionResource conn = connectionManager.getConnectionResource(); try { PreparedStatement ps = conn.getStatement(GET_LATEST_REVISION); ps.setString(1, toUnixPath(repository.getDirectoryName())); try (ResultSet rs = ps.executeQuery()) { return rs.next() ? rs.getString(1) : null; } } finally { connectionManager.releaseConnection(conn); } }
/** * Get the id of a repository in the database. If the repository is not stored in the database, * add it and return its id. * * @param conn the connection to the database * @param repository the repository whose id to get * @return the id of the repository */ private int getRepositoryId(ConnectionResource conn, Repository repository) throws SQLException { String reposPath = toUnixPath(repository.getDirectoryName()); PreparedStatement reposIdPS = conn.getStatement(GET_REPOSITORY); reposIdPS.setString(1, reposPath); try (ResultSet reposIdRS = reposIdPS.executeQuery()) { if (reposIdRS.next()) { return reposIdRS.getInt(1); } } // Repository is not in the database. Add it. PreparedStatement insert = conn.getStatement(INSERT_REPOSITORY); insert.setString(1, reposPath); insert.executeUpdate(); return getGeneratedIntKey(insert); }
/** * Build maps from directory names and file names to their respective identifiers in the database. * The directories and files that are not already in the database, are added to it. * * @param conn the connection to the database * @param history the history to get the file and directory names from * @param reposId the id of the repository * @param dirMap a map which will be filled with directory names and ids * @param fileMap a map which will be filled with file names and ids */ private void getFilesAndDirectories( ConnectionResource conn, History history, int reposId, Map<String, Integer> dirMap, Map<String, Integer> fileMap) throws SQLException { populateFileOrDirMap(conn.getStatement(GET_DIRS), reposId, dirMap); populateFileOrDirMap(conn.getStatement(GET_FILES), reposId, fileMap); int insertCount = 0; PreparedStatement insDir = conn.getStatement(INSERT_DIR); PreparedStatement insFile = conn.getStatement(INSERT_FILE); for (HistoryEntry entry : history.getHistoryEntries()) { for (String file : entry.getFiles()) { String fullPath = toUnixPath(file); // Add the file to the database and to the map if it isn't // there already. Assumption: If the file is in the database, // all its parent directories are also there. if (!fileMap.containsKey(fullPath)) { // Get the dir id for this file, potentially adding the // parent directories to the db and to dirMap. int dir = addAllDirs(insDir, reposId, fullPath, dirMap); int fileId = nextFileId.getAndIncrement(); insFile.setInt(1, dir); insFile.setString(2, getBaseName(fullPath)); insFile.setInt(3, fileId); insFile.executeUpdate(); fileMap.put(fullPath, fileId); // Commit every now and then to allow the database to free // resources (like locks and transaction log), but not too // frequently, since that may kill the performance. It is // OK not to commit for every file added, since the worst // thing that could happen is that we need to re-insert // the files added since the last commit in case of a crash. insertCount++; if (insertCount % 30 == 0) { conn.commit(); } } } } }
private Map<String, Date> getLastModifiedTimesForAllFiles(File directory, Repository repository) throws HistoryException, SQLException { final Map<String, Date> map = new HashMap<String, Date>(); final ConnectionResource conn = connectionManager.getConnectionResource(); try { PreparedStatement ps = conn.getStatement(GET_LAST_MODIFIED_TIMES); ps.setString(1, toUnixPath(repository.getDirectoryName())); ps.setString(2, getSourceRootRelativePath(directory)); try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { map.put(rs.getString(1), rs.getTimestamp(2)); } } } finally { connectionManager.releaseConnection(conn); } return map; }
@Override public void initialize() throws HistoryException { try { connectionManager = new ConnectionManager(jdbcDriverClass, jdbcConnectionURL); for (int i = 0; ; i++) { final ConnectionResource conn = connectionManager.getConnectionResource(); try { try (Statement stmt = conn.createStatement()) { initDB(stmt); } conn.commit(); // Success! Break out of the loop. return; } catch (SQLException sqle) { handleSQLException(sqle, i); } finally { connectionManager.releaseConnection(conn); } } } catch (Exception e) { throw new HistoryException(e); } }
// We do check the return value from ResultSet.next(), but PMD doesn't // understand it, so suppress the warning. @SuppressWarnings("PMD.CheckResultSet") @Override public boolean hasCacheForDirectory(File file, Repository repository) throws HistoryException { assert file.isDirectory(); try { for (int i = 0; ; i++) { final ConnectionResource conn = connectionManager.getConnectionResource(); try { PreparedStatement ps = conn.getStatement(IS_DIR_IN_CACHE); ps.setString(1, toUnixPath(repository.getDirectoryName())); ps.setString(2, getSourceRootRelativePath(file)); try (ResultSet rs = ps.executeQuery()) { return rs.next(); } } catch (SQLException sqle) { handleSQLException(sqle, i); } finally { connectionManager.releaseConnection(conn); } } } catch (SQLException sqle) { throw new HistoryException(sqle); } }
private void storeHistory(ConnectionResource conn, History history, Repository repository) throws SQLException { Integer reposId = null; Map<String, Integer> authors = null; Map<String, Integer> files = null; Map<String, Integer> directories = null; PreparedStatement addChangeset = null; PreparedStatement addDirchange = null; PreparedStatement addFilechange = null; PreparedStatement addFilemove = null; RuntimeEnvironment env = RuntimeEnvironment.getInstance(); // return immediately when there is nothing to do List<HistoryEntry> entries = history.getHistoryEntries(); if (entries.isEmpty()) { return; } for (int i = 0; ; i++) { try { if (reposId == null) { reposId = getRepositoryId(conn, repository); conn.commit(); } if (authors == null) { authors = getAuthors(conn, history, reposId); conn.commit(); } if (directories == null || files == null) { Map<String, Integer> dirs = new HashMap<String, Integer>(); Map<String, Integer> fls = new HashMap<String, Integer>(); getFilesAndDirectories(conn, history, reposId, dirs, fls); conn.commit(); directories = dirs; files = fls; } if (addChangeset == null) { addChangeset = conn.getStatement(ADD_CHANGESET); } if (addDirchange == null) { addDirchange = conn.getStatement(ADD_DIRCHANGE); } if (addFilechange == null) { addFilechange = conn.getStatement(ADD_FILECHANGE); } if (addFilemove == null) { addFilemove = conn.getStatement(ADD_FILEMOVE); } // Success! Break out of the loop. break; } catch (SQLException sqle) { handleSQLException(sqle, i); conn.rollback(); } } addChangeset.setInt(1, reposId); // getHistoryEntries() returns the entries in reverse chronological // order, but we want to insert them in chronological order so that // their auto-generated identity column can be used as a chronological // ordering column. Otherwise, incremental updates will make the // identity column unusable for chronological ordering. So therefore // we walk the list backwards. for (ListIterator<HistoryEntry> it = entries.listIterator(entries.size()); it.hasPrevious(); ) { HistoryEntry entry = it.previous(); retry: for (int i = 0; ; i++) { try { addChangeset.setString(2, entry.getRevision()); addChangeset.setInt(3, authors.get(entry.getAuthor())); addChangeset.setTimestamp(4, new Timestamp(entry.getDate().getTime())); String msg = entry.getMessage(); // Truncate the message if it can't fit in a VARCHAR // (bug #11663). if (msg.length() > MAX_MESSAGE_LENGTH) { msg = truncate(msg, MAX_MESSAGE_LENGTH); } addChangeset.setString(5, msg); int changesetId = nextChangesetId.getAndIncrement(); addChangeset.setInt(6, changesetId); addChangeset.executeUpdate(); // Add one row for each file in FILECHANGES, and one row // for each path element of the directories in DIRCHANGES. Set<String> addedDirs = new HashSet<String>(); addDirchange.setInt(1, changesetId); addFilechange.setInt(1, changesetId); for (String file : entry.getFiles()) { // ignore ignored files String repodir = ""; try { repodir = env.getPathRelativeToSourceRoot(new File(repository.getDirectoryName()), 0); } catch (IOException ex) { Logger.getLogger(JDBCHistoryCache.class.getName()).log(Level.SEVERE, null, ex); } String fullPath = toUnixPath(file); if (!history.isIgnored(file.substring(repodir.length() + 1))) { int fileId = files.get(fullPath); addFilechange.setInt(2, fileId); addFilechange.executeUpdate(); } String[] pathElts = splitPath(fullPath); for (int j = 0; j < pathElts.length; j++) { String dir = unsplitPath(pathElts, j); // Only add to DIRCHANGES if we haven't already // added this dir/changeset combination. if (!addedDirs.contains(dir)) { addDirchange.setInt(2, directories.get(dir)); addDirchange.executeUpdate(); addedDirs.add(dir); } } } conn.commit(); // Successfully added the entry. Break out of retry loop. break retry; } catch (SQLException sqle) { handleSQLException(sqle, i); conn.rollback(); } } } /* * Special handling for certain files - this is mainly for files which * have been renamed in Mercurial repository. * This ensures that their complete history (follow) will be saved. */ for (String filename : history.getIgnoredFiles()) { String file_path = repository.getDirectoryName() + File.separatorChar + filename; File file = new File(file_path); String repo_path = file_path.substring(env.getSourceRootPath().length()); History hist; try { hist = repository.getHistory(file); } catch (HistoryException ex) { Logger.getLogger(JDBCHistoryCache.class.getName()).log(Level.SEVERE, null, ex); continue; } int fileId = files.get(repo_path); for (HistoryEntry entry : hist.getHistoryEntries()) { retry: for (int i = 0; ; i++) { try { int changesetId = getIdForRevision(entry.getRevision()); /* * If the file exists in the changeset, store it in * the table tracking moves of the file when it had * one of its precedent names so it can be found * when performing historyget on directory. */ if (entry.getFiles().contains(repo_path)) { addFilechange.setInt(1, changesetId); addFilechange.setInt(2, fileId); addFilechange.executeUpdate(); } else { addFilemove.setInt(1, changesetId); addFilemove.setInt(2, fileId); addFilemove.executeUpdate(); } conn.commit(); break retry; } catch (SQLException sqle) { handleSQLException(sqle, i); conn.rollback(); } } } } }