/**
   * Retrieve maximum or minimum value of a table's primary key. Table must have a single-column
   * numeric key for this to work correctly.
   */
  private MinMax retrieveMinMaxCountPK(Database conn, Table table) throws ReplicatorException {

    String pkName = table.getPrimaryKey().getColumns().get(0).getName();
    String sql =
        String.format(
            "SELECT MIN(%s),MAX(%s), COUNT(%s) FROM %s",
            pkName,
            pkName,
            pkName,
            conn.getDatabaseObjectName(table.getSchema())
                + '.'
                + conn.getDatabaseObjectName(table.getName()));
    if (eventId != null) sql += " AS OF SCN " + eventId;

    Statement st = null;
    ResultSet rs = null;
    try {
      st = conn.createStatement();
      rs = st.executeQuery(sql);
      if (rs.next()) {
        Object min = rs.getObject(1);
        Object max = rs.getObject(2);
        if (min instanceof Long && max instanceof Long) {
          return new MinMax(((Long) min), ((Long) max), rs.getLong(3));

        } else if (min instanceof BigDecimal && max instanceof BigDecimal) {
          return new MinMax(((BigDecimal) min), ((BigDecimal) max), rs.getLong(3));
        }
        return null;
      }
    } catch (Exception e) {
      logger.warn(
          "Failed to retrieve min, max and count values for PK "
              + pkName
              + " in table "
              + conn.getDatabaseObjectName(table.getSchema())
              + '.'
              + conn.getDatabaseObjectName(table.getName()));
    } finally {
      if (rs != null) {
        try {
          rs.close();
        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
      if (st != null) {
        try {
          st.close();
        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
    }
    return null;
  }
  public void initializeHeartbeatStageTable(Database database) throws SQLException {
    if (logger.isDebugEnabled()) logger.debug("Initializing heartbeat staging table");

    // Create the table if it does not exist.
    if (database.findTable(hbStageTable.getSchema(), hbStageTable.getName()) == null) {
      database.createTable(
          this.hbStageTable, false, this.hbStageTable.getSchema(), tableType, serviceName);
    }
  }
  /** Set up the heartbeat table on the master. */
  public void initializeHeartbeatTable(Database database) throws SQLException {
    if (logger.isDebugEnabled()) logger.debug("Initializing heartbeat table");

    // Create the table if it does not exist.
    if (database.findTungstenTable(hbTable.getSchema(), hbTable.getName()) == null) {
      database.createTable(this.hbTable, false, this.hbTable.getSchema(), tableType, serviceName);
    }

    // Add an initial heartbeat value if needed
    ResultSet res = null;
    PreparedStatement hbRowCount = null;
    int rows = 0;

    try {
      hbRowCount =
          database.prepareStatement(
              "SELECT count(*) from " + this.hbTable.getSchema() + "." + this.hbTable.getName());
      res = hbRowCount.executeQuery();
      if (res.next()) {
        rows = res.getInt(1);
      }
    } finally {
      if (res != null) {
        try {
          res.close();
        } catch (SQLException e) {
        }
      }
      if (hbRowCount != null) {
        try {
          hbRowCount.close();
        } catch (Exception e) {
        }
      }
    }

    if (rows == 0) {

      hbId.setValue(KEY);
      hbSourceTstamp.setValue(new Timestamp(System.currentTimeMillis()));
      hbSalt.setValue(saltValue.getAndIncrement());
      database.insert(hbTable);
    }
  }
  /**
   * Generates chunk definitions for the whole given schema.
   *
   * @param schemaName Name of the schema for which chunk definitions are generated.
   */
  private void generateChunksForSchema(String schemaName) {
    try {
      if (logger.isDebugEnabled()) logger.debug("Getting list of tables from " + schemaName);

      ArrayList<Table> tablesFromSchema = connection.getTables(schemaName, true, true);
      if (logger.isDebugEnabled()) logger.debug("Tables : " + tablesFromSchema);
      if (tablesFromSchema != null && tablesFromSchema.size() > 0) {
        for (Table table : tablesFromSchema) {
          ChunkRequest tableReq = new ChunkRequest(table.getSchema(), table.getName());
          if (!ignoreTablesDefinition.getChunksDefinitions().contains(tableReq)) {
            generateChunksForTable(table, -1, null);
          }
        }
      }
    } catch (Exception e) {
      // TODO: handle exception
      e.printStackTrace();
    }
  }
  /**
   * Generates chunk definitions for the given table, eventually using a given chunk size and a
   * given column list.
   *
   * @param table Table for which chunk definitions are generated.
   * @param tableChunkSize Chunk size to be used for the given table.
   * @param columns List of columns to be extracted for the given table.
   * @throws ReplicatorException
   * @throws InterruptedException
   */
  private void generateChunksForTable(Table table, long tableChunkSize, String[] columns)
      throws ReplicatorException, InterruptedException {
    long chunkSize;

    Integer pkType = getPKType(table);

    if (tableChunkSize == 0) {
      chunks.put(new NoChunk(table, columns));
      // No chunks for this table (all table at once)
      return;
    } else if (tableChunkSize < 0) {
      // Use default chunk size
      chunkSize = this.chunkSize;
    } else {
      chunkSize = tableChunkSize;
    }

    if (logger.isDebugEnabled())
      logger.debug("Processing table " + table.getSchema() + "." + table.getName());

    if (pkType == null) chunkLimit(table);
    else if (pkType == Types.NUMERIC) chunkNumericPK(table, columns, chunkSize);
    else if (pkType == Types.VARCHAR) chunkVarcharPK(table);
  }
  private void generateChunkingPreparedStatement(Table table, long blockSize) throws SQLException {
    String fqnTable =
        connection.getDatabaseObjectName(table.getSchema())
            + '.'
            + connection.getDatabaseObjectName(table.getName());

    StringBuffer sqlBuffer = new StringBuffer("SELECT ");
    StringBuffer colBuf = new StringBuffer();
    whereClause = new String();

    if (table.getPrimaryKey() != null) {
      // TODO
      // No dedicated chunking algorithm for this type of pk (either
      // composite or datatype not handled)
    } else {
      if (logger.isDebugEnabled()) logger.debug("Handling table " + table.toExtendedString());
      // This is a unique key that can be used
      Key key = table.getPKFromUniqueIndex();

      if (key == null) {
        logger.info("getPKFromUniqueIndex returned null key");
      }
      ArrayList<Column> colsList = key.getColumns();

      if (logger.isDebugEnabled()) logger.debug("colsList = " + colsList);

      Column[] columns = new Column[colsList.size()];
      int i = 0;
      for (Column column : colsList) {
        columns[i] = column;
        i++;
      }

      whereClause = buildWhereClause(columns, 0);

      for (int j = 0; j < columns.length; j++) {
        if (j > 0) {
          colBuf.append(", ");
        }
        colBuf.append(columns[j].getName());
      }
      sqlBuffer.append(colBuf);
    }

    sqlBuffer.append(" FROM ");
    sqlBuffer.append(fqnTable);
    if (eventId != null) {
      sqlBuffer.append(" AS OF SCN ");
      sqlBuffer.append(eventId);
    }

    sqlBuffer.append(" ORDER BY ");
    sqlBuffer.append(colBuf);

    String sql = sqlBuffer.toString();
    if (logger.isDebugEnabled()) logger.debug("Generated statement :" + sql);
    pStmt = connection.prepareStatement(sql);

    // TODO : have a setting ?
    pStmt.setFetchSize(100);
  }
  private void chunkLimit(Table table) throws InterruptedException {
    // Table does not have a primary key. Let's chunk using limit.
    String fqnTable =
        connection.getDatabaseObjectName(table.getSchema())
            + '.'
            + connection.getDatabaseObjectName(table.getName());

    // Get Count
    String sql = String.format("SELECT COUNT(*) as cnt FROM %s", fqnTable);
    if (eventId != null) sql += " AS OF SCN " + eventId;

    long count = 0;
    Statement st = null;
    ResultSet rs = null;
    try {
      st = connection.createStatement();
      rs = st.executeQuery(sql);
      if (rs.next()) {
        count = rs.getLong("cnt");
      }
    } catch (Exception e) {
      logger.warn("Failed to retrieve row count values for table " + fqnTable, e);
    } finally {
      if (rs != null) {
        try {
          rs.close();
        } catch (SQLException e) {
          logger.warn("Error while closing resultset", e);
        }
      }
      if (st != null) {
        try {
          st.close();
        } catch (SQLException e) {
          logger.warn("Error while closing statement", e);
        }
      }
    }
    if (count == 0) return;

    if (count <= chunkSize) {
      chunks.put(new LimitChunk(table));
      return;
    }

    // Else (count > CHUNK_SIZE) : chunk again in smaller parts
    long nbBlocks = count / chunkSize;

    if (count % chunkSize > 0) nbBlocks++;

    long blockSize = (long) Math.ceil((double) count / (double) nbBlocks);

    try {
      generateChunkingPreparedStatement(table, blockSize);
    } catch (SQLException e) {
      logger.warn("Error while preparing chunking prepared statement", e);
    }

    Object[] fromValues = null, toValues = null;

    ResultSet result = null;
    try {
      result = pStmt.executeQuery();

      long chunkSize = 0;
      while (result.next()) {
        chunkSize++;
        if (chunkSize % blockSize == 0) {
          // We reached the desired chunk size : send the chunk
          // definition.
          toValues = new Object[result.getMetaData().getColumnCount()];
          for (int j = 0; j < toValues.length; j++) {
            toValues[j] = result.getObject(j + 1);
          }

          chunks.put(
              new LimitChunk(
                  table, 0, 0 + blockSize, nbBlocks, fromValues, toValues, whereClause, blockSize));

          fromValues = toValues;
        } else if (chunkSize >= count) {
          // Last chunk : we eventually did not reach the chunk size,
          // but the table was fully processed : send the last chunk
          // definition.
          chunks.put(
              new LimitChunk(
                  table, 0, 0 + blockSize, nbBlocks, fromValues, null, whereClause, blockSize));
        }
      }
    } catch (SQLException e1) {
      logger.warn("Error while executing chunking query", e1);
    } finally {
      try {
        result.close();
      } catch (SQLException e) {
        logger.warn("Error while closing resultset", e);
      }
      try {
        pStmt.close();
      } catch (SQLException e) {
        logger.warn("Error while closing chunking prepared statement", e);
      }
    }
  }
  private void chunkVarcharPK(Table table) throws InterruptedException {
    String pkName = table.getPrimaryKey().getColumns().get(0).getName();
    String fqnTable =
        connection.getDatabaseObjectName(table.getSchema())
            + '.'
            + connection.getDatabaseObjectName(table.getName());
    // Get Count
    String sql = String.format("SELECT COUNT(%s) as cnt FROM %s", pkName, fqnTable);
    if (eventId != null) sql += " AS OF SCN " + eventId;

    // if count <= Chunk size, we are done
    long count = 0;
    Statement st = null;
    ResultSet rs = null;
    try {
      st = connection.createStatement();
      rs = st.executeQuery(sql);
      if (rs.next()) {
        count = rs.getLong("cnt");
      }
    } catch (Exception e) {
      logger.warn("Failed to retrieve count value for PK " + pkName + " in table " + fqnTable, e);
    } finally {
      if (rs != null) {
        try {
          rs.close();
        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
      if (st != null) {
        try {
          st.close();
        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
    }

    if (count == 0) return;

    if (count <= chunkSize) {
      chunks.put(new StringChunk(table, null, null));
      return;
    }

    // Else (count > CHUNK_SIZE) : chunk again in smaller parts
    long nbBlocks = count / chunkSize;

    if (count % chunkSize > 0) nbBlocks++;

    long blockSize = count / nbBlocks;

    PreparedStatement pstmt = null;

    StringBuffer sqlBuf = new StringBuffer("SELECT MIN(");
    sqlBuf.append(pkName);
    sqlBuf.append(") as min, MAX(");
    sqlBuf.append(pkName);
    sqlBuf.append(") as max, COUNT(");
    sqlBuf.append(pkName);
    sqlBuf.append(") as cnt FROM ( select sub.*, ROWNUM rnum from ( SELECT ");
    sqlBuf.append(pkName);
    sqlBuf.append(" FROM ");
    sqlBuf.append(fqnTable);
    sqlBuf.append(" ORDER BY ");
    sqlBuf.append(pkName);

    if (eventId != null) {
      sqlBuf.append(" AS OF SCN ");
      sqlBuf.append(eventId);
    }

    sqlBuf.append(") sub where ROWNUM <= ? ) where rnum >= ?");

    sql = sqlBuf.toString();

    try {
      pstmt = connection.prepareStatement(sql);
    } catch (SQLException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }

    try {
      for (long i = 0; i < count; i += blockSize + 1) {
        try {
          pstmt.setLong(1, i + blockSize);
          pstmt.setLong(2, i);
          rs = pstmt.executeQuery();

          if (rs.next())
            chunks.put(new StringChunk(table, rs.getString("min"), rs.getString("max"), nbBlocks));

        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        } finally {
          try {
            rs.close();
          } catch (SQLException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
          }
        }
      }
    } finally {
      try {
        pstmt.close();
      } catch (SQLException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }
  }