/** * This will look through the completed_txn_components table and look for partitions or tables * that may be ready for compaction. Also, look through txns and txn_components tables for aborted * transactions that we should add to the list. * * @param maxAborted Maximum number of aborted queries to allow before marking this as a potential * compaction. * @return list of CompactionInfo structs. These will not have id, type, or runAs set since these * are only potential compactions not actual ones. */ public Set<CompactionInfo> findPotentialCompactions(int maxAborted) throws MetaException { Connection dbConn = getDbConn(); Set<CompactionInfo> response = new HashSet<CompactionInfo>(); try { Statement stmt = dbConn.createStatement(); // Check for completed transactions String s = "select distinct ctc_database, ctc_table, " + "ctc_partition from COMPLETED_TXN_COMPONENTS"; LOG.debug("Going to execute query <" + s + ">"); ResultSet rs = stmt.executeQuery(s); while (rs.next()) { CompactionInfo info = new CompactionInfo(); info.dbname = rs.getString(1); info.tableName = rs.getString(2); info.partName = rs.getString(3); response.add(info); } // Check for aborted txns s = "select tc_database, tc_table, tc_partition " + "from TXNS, TXN_COMPONENTS " + "where txn_id = tc_txnid and txn_state = '" + TXN_ABORTED + "' " + "group by tc_database, tc_table, tc_partition " + "having count(*) > " + maxAborted; LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); while (rs.next()) { CompactionInfo info = new CompactionInfo(); info.dbname = rs.getString(1); info.tableName = rs.getString(2); info.partName = rs.getString(3); info.tooManyAborts = true; response.add(info); } LOG.debug("Going to rollback"); dbConn.rollback(); } catch (SQLException e) { LOG.error("Unable to connect to transaction database " + e.getMessage()); } finally { closeDbConn(dbConn); } return response; }
/** * Find entries in the queue that are ready to be cleaned. * * @return information on the entry in the queue. */ public List<CompactionInfo> findReadyToClean() throws MetaException { Connection dbConn = getDbConn(); List<CompactionInfo> rc = new ArrayList<CompactionInfo>(); try { Statement stmt = dbConn.createStatement(); String s = "select cq_id, cq_database, cq_table, cq_partition, " + "cq_type, cq_run_as from COMPACTION_QUEUE where cq_state = '" + READY_FOR_CLEANING + "'"; LOG.debug("Going to execute query <" + s + ">"); ResultSet rs = stmt.executeQuery(s); while (rs.next()) { CompactionInfo info = new CompactionInfo(); info.id = rs.getLong(1); info.dbname = rs.getString(2); info.tableName = rs.getString(3); info.partName = rs.getString(4); switch (rs.getString(5).charAt(0)) { case MAJOR_TYPE: info.type = CompactionType.MAJOR; break; case MINOR_TYPE: info.type = CompactionType.MINOR; break; default: throw new MetaException("Unexpected compaction type " + rs.getString(5)); } info.runAs = rs.getString(6); rc.add(info); } LOG.debug("Going to rollback"); dbConn.rollback(); return rc; } catch (SQLException e) { LOG.error("Unable to select next element for cleaning, " + e.getMessage()); try { LOG.debug("Going to rollback"); dbConn.rollback(); } catch (SQLException e1) { } throw new MetaException( "Unable to connect to transaction database " + StringUtils.stringifyException(e)); } finally { closeDbConn(dbConn); } }
/** * This will grab the next compaction request off of the queue, and assign it to the worker. * * @param workerId id of the worker calling this, will be recorded in the db * @return an info element for this compaction request, or null if there is no work to do now. */ public CompactionInfo findNextToCompact(String workerId) throws MetaException { try { Connection dbConn = getDbConn(); CompactionInfo info = new CompactionInfo(); try { Statement stmt = dbConn.createStatement(); String s = "select cq_id, cq_database, cq_table, cq_partition, " + "cq_type from COMPACTION_QUEUE where cq_state = '" + INITIATED_STATE + "' for update"; LOG.debug("Going to execute query <" + s + ">"); ResultSet rs = stmt.executeQuery(s); if (!rs.next()) { LOG.debug("No compactions found ready to compact"); dbConn.rollback(); return null; } info.id = rs.getLong(1); info.dbname = rs.getString(2); info.tableName = rs.getString(3); info.partName = rs.getString(4); switch (rs.getString(5).charAt(0)) { case MAJOR_TYPE: info.type = CompactionType.MAJOR; break; case MINOR_TYPE: info.type = CompactionType.MINOR; break; default: throw new MetaException("Unexpected compaction type " + rs.getString(5)); } // Now, update this record as being worked on by this worker. long now = System.currentTimeMillis(); s = "update COMPACTION_QUEUE set cq_worker_id = '" + workerId + "', " + "cq_start = " + now + ", cq_state = '" + WORKING_STATE + "' where cq_id = " + info.id; LOG.debug("Going to execute update <" + s + ">"); if (stmt.executeUpdate(s) != 1) { LOG.error("Unable to update compaction record"); LOG.debug("Going to rollback"); dbConn.rollback(); } LOG.debug("Going to commit"); dbConn.commit(); return info; } catch (SQLException e) { LOG.error("Unable to select next element for compaction, " + e.getMessage()); try { LOG.debug("Going to rollback"); dbConn.rollback(); } catch (SQLException e1) { } detectDeadlock(e, "findNextToCompact"); throw new MetaException( "Unable to connect to transaction database " + StringUtils.stringifyException(e)); } finally { closeDbConn(dbConn); } } catch (DeadlockException e) { return findNextToCompact(workerId); } finally { deadlockCnt = 0; } }