/**
   * Prepare prefetch store. {@inheritDoc}
   *
   * @see
   *     com.continuent.tungsten.replicator.plugin.ReplicatorPlugin#prepare(com.continuent.tungsten.replicator.plugin.PluginContext)
   */
  public void prepare(PluginContext context) throws ReplicatorException {
    // Perform super-class prepare.
    super.prepare(context);

    logger.info("Preparing PrefetchStore for slave catalog schema: " + slaveCatalogSchema);
    // Load defaults for connection
    if (url == null) url = context.getJdbcUrl("tungsten_" + context.getServiceName());
    if (user == null) user = context.getJdbcUser();
    if (password == null) password = context.getJdbcPassword();

    // Connect.
    try {
      conn = DatabaseFactory.createDatabase(url, user, password);
      conn.connect(true);

      seqnoStatement =
          conn.prepareStatement(
              "select seqno, fragno, last_Frag, source_id, epoch_number, eventid, applied_latency from "
                  + slaveCatalogSchema
                  + "."
                  + CommitSeqnoTable.TABLE_NAME);
    } catch (SQLException e) {
      throw new ReplicatorException(e);
    }

    // Show that we have started.
    startTimeMillis = System.currentTimeMillis();
    prefetchState = PrefetchState.active;
  }
  /** Set up the heartbeat table on the master. */
  public void initializeHeartbeatTable(Database database) throws SQLException {
    if (logger.isDebugEnabled()) logger.debug("Initializing heartbeat table");

    // Create the table if it does not exist.
    if (database.findTungstenTable(hbTable.getSchema(), hbTable.getName()) == null) {
      database.createTable(this.hbTable, false, this.hbTable.getSchema(), tableType, serviceName);
    }

    // Add an initial heartbeat value if needed
    ResultSet res = null;
    PreparedStatement hbRowCount = null;
    int rows = 0;

    try {
      hbRowCount =
          database.prepareStatement(
              "SELECT count(*) from " + this.hbTable.getSchema() + "." + this.hbTable.getName());
      res = hbRowCount.executeQuery();
      if (res.next()) {
        rows = res.getInt(1);
      }
    } finally {
      if (res != null) {
        try {
          res.close();
        } catch (SQLException e) {
        }
      }
      if (hbRowCount != null) {
        try {
          hbRowCount.close();
        } catch (Exception e) {
        }
      }
    }

    if (rows == 0) {

      hbId.setValue(KEY);
      hbSourceTstamp.setValue(new Timestamp(System.currentTimeMillis()));
      hbSalt.setValue(saltValue.getAndIncrement());
      database.insert(hbTable);
    }
  }
  private void generateChunkingPreparedStatement(Table table, long blockSize) throws SQLException {
    String fqnTable =
        connection.getDatabaseObjectName(table.getSchema())
            + '.'
            + connection.getDatabaseObjectName(table.getName());

    StringBuffer sqlBuffer = new StringBuffer("SELECT ");
    StringBuffer colBuf = new StringBuffer();
    whereClause = new String();

    if (table.getPrimaryKey() != null) {
      // TODO
      // No dedicated chunking algorithm for this type of pk (either
      // composite or datatype not handled)
    } else {
      if (logger.isDebugEnabled()) logger.debug("Handling table " + table.toExtendedString());
      // This is a unique key that can be used
      Key key = table.getPKFromUniqueIndex();

      if (key == null) {
        logger.info("getPKFromUniqueIndex returned null key");
      }
      ArrayList<Column> colsList = key.getColumns();

      if (logger.isDebugEnabled()) logger.debug("colsList = " + colsList);

      Column[] columns = new Column[colsList.size()];
      int i = 0;
      for (Column column : colsList) {
        columns[i] = column;
        i++;
      }

      whereClause = buildWhereClause(columns, 0);

      for (int j = 0; j < columns.length; j++) {
        if (j > 0) {
          colBuf.append(", ");
        }
        colBuf.append(columns[j].getName());
      }
      sqlBuffer.append(colBuf);
    }

    sqlBuffer.append(" FROM ");
    sqlBuffer.append(fqnTable);
    if (eventId != null) {
      sqlBuffer.append(" AS OF SCN ");
      sqlBuffer.append(eventId);
    }

    sqlBuffer.append(" ORDER BY ");
    sqlBuffer.append(colBuf);

    String sql = sqlBuffer.toString();
    if (logger.isDebugEnabled()) logger.debug("Generated statement :" + sql);
    pStmt = connection.prepareStatement(sql);

    // TODO : have a setting ?
    pStmt.setFetchSize(100);
  }
  private void chunkVarcharPK(Table table) throws InterruptedException {
    String pkName = table.getPrimaryKey().getColumns().get(0).getName();
    String fqnTable =
        connection.getDatabaseObjectName(table.getSchema())
            + '.'
            + connection.getDatabaseObjectName(table.getName());
    // Get Count
    String sql = String.format("SELECT COUNT(%s) as cnt FROM %s", pkName, fqnTable);
    if (eventId != null) sql += " AS OF SCN " + eventId;

    // if count <= Chunk size, we are done
    long count = 0;
    Statement st = null;
    ResultSet rs = null;
    try {
      st = connection.createStatement();
      rs = st.executeQuery(sql);
      if (rs.next()) {
        count = rs.getLong("cnt");
      }
    } catch (Exception e) {
      logger.warn("Failed to retrieve count value for PK " + pkName + " in table " + fqnTable, e);
    } finally {
      if (rs != null) {
        try {
          rs.close();
        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
      if (st != null) {
        try {
          st.close();
        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
    }

    if (count == 0) return;

    if (count <= chunkSize) {
      chunks.put(new StringChunk(table, null, null));
      return;
    }

    // Else (count > CHUNK_SIZE) : chunk again in smaller parts
    long nbBlocks = count / chunkSize;

    if (count % chunkSize > 0) nbBlocks++;

    long blockSize = count / nbBlocks;

    PreparedStatement pstmt = null;

    StringBuffer sqlBuf = new StringBuffer("SELECT MIN(");
    sqlBuf.append(pkName);
    sqlBuf.append(") as min, MAX(");
    sqlBuf.append(pkName);
    sqlBuf.append(") as max, COUNT(");
    sqlBuf.append(pkName);
    sqlBuf.append(") as cnt FROM ( select sub.*, ROWNUM rnum from ( SELECT ");
    sqlBuf.append(pkName);
    sqlBuf.append(" FROM ");
    sqlBuf.append(fqnTable);
    sqlBuf.append(" ORDER BY ");
    sqlBuf.append(pkName);

    if (eventId != null) {
      sqlBuf.append(" AS OF SCN ");
      sqlBuf.append(eventId);
    }

    sqlBuf.append(") sub where ROWNUM <= ? ) where rnum >= ?");

    sql = sqlBuf.toString();

    try {
      pstmt = connection.prepareStatement(sql);
    } catch (SQLException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }

    try {
      for (long i = 0; i < count; i += blockSize + 1) {
        try {
          pstmt.setLong(1, i + blockSize);
          pstmt.setLong(2, i);
          rs = pstmt.executeQuery();

          if (rs.next())
            chunks.put(new StringChunk(table, rs.getString("min"), rs.getString("max"), nbBlocks));

        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        } finally {
          try {
            rs.close();
          } catch (SQLException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
          }
        }
      }
    } finally {
      try {
        pstmt.close();
      } catch (SQLException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }
  }