/**
   * Prepare prefetch store. {@inheritDoc}
   *
   * @see
   *     com.continuent.tungsten.replicator.plugin.ReplicatorPlugin#prepare(com.continuent.tungsten.replicator.plugin.PluginContext)
   */
  public void prepare(PluginContext context) throws ReplicatorException {
    // Perform super-class prepare.
    super.prepare(context);

    logger.info("Preparing PrefetchStore for slave catalog schema: " + slaveCatalogSchema);
    // Load defaults for connection
    if (url == null) url = context.getJdbcUrl("tungsten_" + context.getServiceName());
    if (user == null) user = context.getJdbcUser();
    if (password == null) password = context.getJdbcPassword();

    // Connect.
    try {
      conn = DatabaseFactory.createDatabase(url, user, password);
      conn.connect(true);

      seqnoStatement =
          conn.prepareStatement(
              "select seqno, fragno, last_Frag, source_id, epoch_number, eventid, applied_latency from "
                  + slaveCatalogSchema
                  + "."
                  + CommitSeqnoTable.TABLE_NAME);
    } catch (SQLException e) {
      throw new ReplicatorException(e);
    }

    // Show that we have started.
    startTimeMillis = System.currentTimeMillis();
    prefetchState = PrefetchState.active;
  }
  /**
   * Retrieve maximum or minimum value of a table's primary key. Table must have a single-column
   * numeric key for this to work correctly.
   */
  private MinMax retrieveMinMaxCountPK(Database conn, Table table) throws ReplicatorException {

    String pkName = table.getPrimaryKey().getColumns().get(0).getName();
    String sql =
        String.format(
            "SELECT MIN(%s),MAX(%s), COUNT(%s) FROM %s",
            pkName,
            pkName,
            pkName,
            conn.getDatabaseObjectName(table.getSchema())
                + '.'
                + conn.getDatabaseObjectName(table.getName()));
    if (eventId != null) sql += " AS OF SCN " + eventId;

    Statement st = null;
    ResultSet rs = null;
    try {
      st = conn.createStatement();
      rs = st.executeQuery(sql);
      if (rs.next()) {
        Object min = rs.getObject(1);
        Object max = rs.getObject(2);
        if (min instanceof Long && max instanceof Long) {
          return new MinMax(((Long) min), ((Long) max), rs.getLong(3));

        } else if (min instanceof BigDecimal && max instanceof BigDecimal) {
          return new MinMax(((BigDecimal) min), ((BigDecimal) max), rs.getLong(3));
        }
        return null;
      }
    } catch (Exception e) {
      logger.warn(
          "Failed to retrieve min, max and count values for PK "
              + pkName
              + " in table "
              + conn.getDatabaseObjectName(table.getSchema())
              + '.'
              + conn.getDatabaseObjectName(table.getName()));
    } finally {
      if (rs != null) {
        try {
          rs.close();
        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
      if (st != null) {
        try {
          st.close();
        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
    }
    return null;
  }
  public void initializeHeartbeatStageTable(Database database) throws SQLException {
    if (logger.isDebugEnabled()) logger.debug("Initializing heartbeat staging table");

    // Create the table if it does not exist.
    if (database.findTable(hbStageTable.getSchema(), hbStageTable.getName()) == null) {
      database.createTable(
          this.hbStageTable, false, this.hbStageTable.getSchema(), tableType, serviceName);
    }
  }
 /** Wrapper for startHeartbeat() call. */
 public void startHeartbeat(
     String url, String user, String password, String name, String initScript)
     throws SQLException {
   Database db = null;
   try {
     db = DatabaseFactory.createDatabase(url, user, password);
     if (initScript != null) db.setInitScript(initScript);
     db.connect();
     startHeartbeat(db, name);
   } finally {
     db.close();
   }
 }
  /**
   * Execute this call to fill in heartbeat data on the slave. This call must be invoked after a
   * heartbeat event is applied.
   */
  public void completeHeartbeat(Database database, long seqno, String eventId) throws SQLException {
    if (logger.isDebugEnabled()) logger.debug("Processing slave heartbeat update");

    Statement st = null;
    ResultSet rs = null;
    Timestamp sts = new Timestamp(0);
    Timestamp now = new Timestamp(System.currentTimeMillis());
    ArrayList<Column> whereClause = new ArrayList<Column>();
    ArrayList<Column> values = new ArrayList<Column>();

    if (logger.isDebugEnabled()) logger.debug("Processing slave heartbeat update: " + now);

    // Get the source timestamp.
    try {
      st = database.createStatement();
      rs = st.executeQuery(sourceTsQuery);
      if (rs.next()) sts = rs.getTimestamp(1);
    } finally {
      if (rs != null) {
        try {
          rs.close();
        } catch (SQLException e) {
        }
      }
      if (st != null) {
        try {
          st.close();
        } catch (SQLException e) {
        }
      }
    }

    // Compute the difference between source and target.
    long lag_millis = now.getTime() - sts.getTime();

    // Update the heartbeat record with target time and difference.
    hbId.setValue(KEY);
    whereClause.add(hbId);

    hbSeqno.setValue(seqno);
    hbEventId.setValue(eventId);
    hbTargetTstamp.setValue(now);
    hbLagMillis.setValue(lag_millis);
    values.add(hbSeqno);
    values.add(hbEventId);
    values.add(hbTargetTstamp);
    values.add(hbLagMillis);

    database.update(hbTable, whereClause, values);
  }
 /**
  * Release queue. {@inheritDoc}
  *
  * @see
  *     com.continuent.tungsten.replicator.plugin.ReplicatorPlugin#release(com.continuent.tungsten.replicator.plugin.PluginContext)
  */
 public void release(PluginContext context) throws ReplicatorException {
   queue = null;
   if (conn != null) {
     conn.close();
     conn = null;
   }
 }
  /** Set up the heartbeat table on the master. */
  public void initializeHeartbeatTable(Database database) throws SQLException {
    if (logger.isDebugEnabled()) logger.debug("Initializing heartbeat table");

    // Create the table if it does not exist.
    if (database.findTungstenTable(hbTable.getSchema(), hbTable.getName()) == null) {
      database.createTable(this.hbTable, false, this.hbTable.getSchema(), tableType, serviceName);
    }

    // Add an initial heartbeat value if needed
    ResultSet res = null;
    PreparedStatement hbRowCount = null;
    int rows = 0;

    try {
      hbRowCount =
          database.prepareStatement(
              "SELECT count(*) from " + this.hbTable.getSchema() + "." + this.hbTable.getName());
      res = hbRowCount.executeQuery();
      if (res.next()) {
        rows = res.getInt(1);
      }
    } finally {
      if (res != null) {
        try {
          res.close();
        } catch (SQLException e) {
        }
      }
      if (hbRowCount != null) {
        try {
          hbRowCount.close();
        } catch (Exception e) {
        }
      }
    }

    if (rows == 0) {

      hbId.setValue(KEY);
      hbSourceTstamp.setValue(new Timestamp(System.currentTimeMillis()));
      hbSalt.setValue(saltValue.getAndIncrement());
      database.insert(hbTable);
    }
  }
  /**
   * {@inheritDoc}
   *
   * @see
   *     com.continuent.tungsten.replicator.plugin.ReplicatorPlugin#release(com.continuent.tungsten.replicator.plugin.PluginContext)
   */
  @Override
  public void release(PluginContext context) throws ReplicatorException, InterruptedException {
    // Step 10 End the subscription.
    if (connection != null) {
      executeQuery(
          "BEGIN DBMS_CDC_SUBSCRIBE.DROP_SUBSCRIPTION("
              + "subscription_name => 'TUNGSTEN_PUB');END;",
          false);
    }

    if (connection != null) {
      connection.close();
      connection = null;
    }
  }
  /**
   * Applies a heartbeat update on the slave. This call is designed for data warehouses that cannot
   * apply a heartbeat using batch loading mechanisms.
   */
  public void applyHeartbeat(Database database, Timestamp sourceTimestamp, String name)
      throws SQLException {
    ArrayList<Column> whereClause = new ArrayList<Column>();
    ArrayList<Column> values = new ArrayList<Column>();

    if (logger.isDebugEnabled())
      logger.debug(
          "Applying heartbeat to slave: name=" + name + " sourceTstamp=" + sourceTimestamp);

    hbId.setValue(KEY);
    whereClause.add(hbId);
    hbSourceTstamp.setValue(sourceTimestamp);
    hbName.setValue(name);
    values.add(hbSourceTstamp);
    values.add(hbName);
    database.update(hbTable, whereClause, values);
  }
 /** executeStoredProcedure definition. */
 private void executeQuery(String query, boolean ignoreError) throws ReplicatorException {
   Statement stmt = null;
   ResultSet resultset = null;
   try {
     stmt = connection.getConnection().createStatement();
     resultset = stmt.executeQuery(query);
   } catch (SQLException e) {
     if (!ignoreError) throw new ReplicatorException("Failed to execute query " + query, e);
     else logger.warn("Ignoring exception : " + e.getMessage());
   } finally {
     try {
       if (resultset != null) resultset.close();
       if (stmt != null) stmt.close();
     } catch (SQLException ignore) {
       if (logger.isDebugEnabled()) logger.debug("Failed to close resultset", ignore);
     }
   }
 }
  /**
   * Execute this call to start a named heartbeat on the master. The heartbeat table update must be
   * logged as we will expect to see it as a DBMSEvent.
   */
  public void startHeartbeat(Database database, String name) throws SQLException {
    ArrayList<Column> whereClause = new ArrayList<Column>();
    ArrayList<Column> values = new ArrayList<Column>();
    Timestamp now = new Timestamp(System.currentTimeMillis());

    if (logger.isDebugEnabled())
      logger.debug("Processing master heartbeat update: name=" + name + " time=" + now);

    hbId.setValue(KEY);
    whereClause.add(hbId);
    hbSourceTstamp.setValue(now);
    hbSalt.setValue(saltValue.getAndIncrement());
    hbName.setValue(name);
    values.add(hbSourceTstamp);
    values.add(hbSalt);
    values.add(hbName);

    database.update(hbTable, whereClause, values);
  }
  /**
   * Generates chunk definitions for the whole given schema.
   *
   * @param schemaName Name of the schema for which chunk definitions are generated.
   */
  private void generateChunksForSchema(String schemaName) {
    try {
      if (logger.isDebugEnabled()) logger.debug("Getting list of tables from " + schemaName);

      ArrayList<Table> tablesFromSchema = connection.getTables(schemaName, true, true);
      if (logger.isDebugEnabled()) logger.debug("Tables : " + tablesFromSchema);
      if (tablesFromSchema != null && tablesFromSchema.size() > 0) {
        for (Table table : tablesFromSchema) {
          ChunkRequest tableReq = new ChunkRequest(table.getSchema(), table.getName());
          if (!ignoreTablesDefinition.getChunksDefinitions().contains(tableReq)) {
            generateChunksForTable(table, -1, null);
          }
        }
      }
    } catch (Exception e) {
      // TODO: handle exception
      e.printStackTrace();
    }
  }
  /**
   * {@inheritDoc}
   *
   * @see
   *     com.continuent.tungsten.replicator.plugin.ReplicatorPlugin#prepare(com.continuent.tungsten.replicator.plugin.PluginContext)
   */
  @Override
  public void prepare(PluginContext context) throws ReplicatorException, InterruptedException {

    try {
      // Oracle JDBC URL, for example :
      // jdbc:oracle:thin:@192.168.0.60:1521:ORCL
      connection = DatabaseFactory.createDatabase(url, user, password);
    } catch (SQLException e) {
    }

    try {
      connection.connect();
    } catch (SQLException e) {
      throw new ReplicatorException("Unable to connect to Oracle", e);
    }

    Statement stmt = null;
    try {
      stmt = connection.createStatement();
    } catch (SQLException e) {
      throw new ReplicatorException("Unable to create a statement object", e);
    }

    // Step 1 Find the source tables for which the subscriber has access
    // privileges.
    ResultSet rs = null;
    try {
      rs = stmt.executeQuery("SELECT * FROM ALL_SOURCE_TABLES");
      sources = new ArrayList<OracleCDCSource>();
      while (rs.next()) {
        String srcSchema = rs.getString("SOURCE_SCHEMA_NAME");
        String srcTable = rs.getString("SOURCE_TABLE_NAME");
        if (logger.isDebugEnabled()) logger.debug("Subscribing to " + srcSchema + "." + srcTable);
        sources.add(new OracleCDCSource(srcSchema, srcTable));
      }
    } catch (SQLException e) {
      throw new ReplicatorException("Unable to connect to query source tables", e);
    } finally {
      if (rs != null) {
        try {
          rs.close();
        } catch (SQLException ignore) {
          if (logger.isDebugEnabled()) logger.debug("Failed to close resultset", ignore);
        }
        rs = null;
      }
    }

    Set<String> changeSets = new LinkedHashSet<String>();

    // Step 2 Find the change set names and columns for which the subscriber
    // has access privileges.
    for (Iterator<OracleCDCSource> iterator = sources.iterator(); iterator.hasNext(); ) {
      OracleCDCSource src = iterator.next();
      try {
        if (logger.isDebugEnabled())
          logger.debug(
              "Executing"
                  + "SELECT UNIQUE CHANGE_SET_NAME, PUB.COLUMN_NAME,"
                  + " PUB_ID, COL.COLUMN_ID "
                  + " FROM ALL_PUBLISHED_COLUMNS PUB, ALL_TAB_COLUMNS COL "
                  + " WHERE SOURCE_SCHEMA_NAME = '"
                  + src.getSchema()
                  + "'"
                  + " AND SOURCE_TABLE_NAME = '"
                  + src.getTable()
                  + "'"
                  + " AND SOURCE_SCHEMA_NAME = COL.OWNER "
                  + " AND SOURCE_TABLE_NAME = COL.TABLE_NAME"
                  + " AND PUB.COLUMN_NAME = COL.COLUMN_NAME"
                  + " ORDER BY COL.COLUMN_ID");

        rs =
            stmt.executeQuery(
                "SELECT UNIQUE CHANGE_SET_NAME, PUB.COLUMN_NAME,"
                    + " PUB_ID, COL.COLUMN_ID "
                    + " FROM ALL_PUBLISHED_COLUMNS PUB, ALL_TAB_COLUMNS COL "
                    + " WHERE SOURCE_SCHEMA_NAME = '"
                    + src.getSchema()
                    + "'"
                    + " AND SOURCE_TABLE_NAME = '"
                    + src.getTable()
                    + "'"
                    + " AND SOURCE_SCHEMA_NAME = COL.OWNER "
                    + " AND SOURCE_TABLE_NAME = COL.TABLE_NAME"
                    + " AND PUB.COLUMN_NAME = COL.COLUMN_NAME"
                    + " ORDER BY COL.COLUMN_ID");

        while (rs.next()) {
          String changeSetName = rs.getString("CHANGE_SET_NAME");
          String columnName = rs.getString("COLUMN_NAME");
          long pubId = rs.getLong("PUB_ID");
          src.addPublication(changeSetName, columnName, pubId);

          changeSets.add(changeSetName);
          if (logger.isDebugEnabled())
            logger.debug("Found column " + changeSetName + "\t" + columnName + "\t" + pubId);
        }
      } catch (SQLException e) {
        throw new ReplicatorException("Unable to fetch change set definition", e);

      } finally {
        if (rs != null)
          try {
            rs.close();
          } catch (SQLException ignore) {
            if (logger.isDebugEnabled()) logger.debug("Failed to close resultset", ignore);
          }
      }
    }

    if (stmt != null)
      try {
        stmt.close();
      } catch (SQLException ignore) {
        if (logger.isDebugEnabled()) logger.debug("Failed to close statement object", ignore);
      }

    // Step 3 Create subscriptions.

    // For each publication, create the subscription to the publication if
    // not already done.
    // Then, subscribe
    int i = 1;
    subscriberViews = new HashMap<String, OracleCDCSource>();

    for (Iterator<OracleCDCSource> iterator = sources.iterator(); iterator.hasNext(); ) {
      OracleCDCSource src = iterator.next();
      Map<Long, OracleCDCPublication> publications = src.getPublications();

      StringBuffer subscribeStmt = new StringBuffer();
      for (OracleCDCPublication pub : publications.values()) {
        if (changeSets.remove(pub.getPublicationName())) {
          if (logger.isDebugEnabled())
            logger.debug("Creating subscription to " + pub.getPublicationName());

          /*
           * Dropping subscription if it already exists : this can
           * happen if release code was not called
           */
          executeQuery(
              "BEGIN DBMS_CDC_SUBSCRIBE.DROP_SUBSCRIPTION("
                  + "subscription_name => 'TUNGSTEN_PUB');END;",
              true);

          executeQuery(
              "BEGIN DBMS_CDC_SUBSCRIBE.CREATE_SUBSCRIPTION("
                  + "change_set_name => '"
                  + pub.getPublicationName()
                  + "', description => 'Change data used by Tungsten', "
                  + "subscription_name => 'TUNGSTEN_PUB"
                  + "');end;",
              false);
        }

        // Step 4 Subscribe to a source table and the columns in the
        // source table.
        String viewName = "VW_TUNGSTEN_CDC" + i;
        subscribeStmt.append(
            "DBMS_CDC_SUBSCRIBE.SUBSCRIBE(subscription_name => 'TUNGSTEN_PUB"
                + "', "
                + "publication_id    => "
                + pub.getPublicationId()
                + ","
                + "column_list => '"
                + pub.getColumnList()
                + "',"
                + "subscriber_view => '"
                + viewName
                + "');");

        subscriberViews.put(viewName, src);
        src.setSubscriptionView(viewName, pub.getPublicationId());

        if (logger.isDebugEnabled())
          logger.debug(
              "Creating change view "
                  + viewName
                  + " - Now handling "
                  + subscriberViews.keySet().size()
                  + " views");

        i++;
      }

      executeQuery("BEGIN " + subscribeStmt.toString() + " END;", false);
    }
    // Step 5 Activate the subscription.
    executeQuery(
        "BEGIN DBMS_CDC_SUBSCRIBE.ACTIVATE_SUBSCRIPTION("
            + "subscription_name => 'TUNGSTEN_PUB'"
            + ");END;",
        false);
  }
  /** TODO: runTask definition. */
  private void runTask() {
    connection = null;
    try {
      connection = DatabaseFactory.createDatabase(url, user, password);
    } catch (SQLException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }

    try {
      connection.connect();
    } catch (SQLException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }

    if (ignoreTablesFile != null) {
      ignoreTablesDefinition = new ChunkDefinitions(ignoreTablesFile);
      try {
        ignoreTablesDefinition.parseFile();
      } catch (Exception e) {
        e.printStackTrace();
      }
    }

    // Check whether we have to use a chunk definition file
    if (chunkDefFile != null) {
      logger.info("Using definition from file " + chunkDefFile);
      chunkDefinition = new ChunkDefinitions(chunkDefFile);
      try {
        chunkDefinition.parseFile();
      } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      } catch (ReplicatorException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }

      LinkedList<ChunkRequest> chunksDefinitions = chunkDefinition.getChunksDefinitions();
      for (ChunkRequest chunkRequest : chunksDefinitions) {
        if (chunkRequest.getTable() != null) {
          try {
            Table table =
                connection.findTable(chunkRequest.getSchema(), chunkRequest.getTable(), true);

            if (table != null)
              generateChunksForTable(table, chunkRequest.getChunkSize(), chunkRequest.getColumns());
            else
              logger.warn(
                  "Failed while processing table "
                      + chunkRequest.getSchema()
                      + "."
                      + chunkRequest.getTable()
                      + " : table not found.");
          } catch (SQLException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
          } catch (ReplicatorException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
          } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
          }
        } else if (chunkRequest.getSchema() != null) {
          generateChunksForSchema(chunkRequest.getSchema());
        }
      }
    } else {
      try {
        DatabaseMetaData databaseMetaData = connection.getDatabaseMetaData();
        ResultSet schemasRs = databaseMetaData.getSchemas();
        while (schemasRs.next()) {
          String schemaName = schemasRs.getString("TABLE_SCHEM");
          // TODO: System schemas could be needed -> this needs a
          // setting
          if (!connection.isSystemSchema(schemaName)) {
            generateChunksForSchema(schemaName);
          }
        }
        schemasRs.close();
      } catch (SQLException e) {
        logger.error(e);
      } catch (Exception e) {
        logger.error(e);
      }
    }

    // Stop threads
    for (int i = 0; i < extractChannels; i++) {
      logger.info("Posting job complete request " + i);
      try {
        chunks.put(new NumericChunk());
      } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }

    if (logger.isDebugEnabled()) logger.debug(this.getName() + " done.");
  }
  /**
   * {@inheritDoc}
   *
   * @see com.continuent.tungsten.replicator.extractor.RawExtractor#extract()
   */
  @Override
  public DBMSEvent extract() throws ReplicatorException, InterruptedException {
    ArrayList<DBMSData> data = new ArrayList<DBMSData>();
    long maxSCN = -1;
    Timestamp sourceTStamp = null;

    boolean noData = true;

    try {
      if (logger.isDebugEnabled()) logger.debug("Extending Window");

      executeQuery(
          "BEGIN DBMS_CDC_SUBSCRIBE.EXTEND_WINDOW(subscription_name => 'TUNGSTEN_PUB');END;",
          false);

      if (logger.isDebugEnabled())
        logger.debug("Handling " + subscriberViews.keySet().size() + "views");
      for (String view : subscriberViews.keySet()) {
        OracleCDCSource cdcSource = subscriberViews.get(view);

        Statement stmt = connection.getConnection().createStatement();

        String statement;
        if (lastSCN != null)
          statement =
              "SELECT "
                  + cdcSource.getPublication(view).getColumnList()
                  + " , CSCN$, COMMIT_TIMESTAMP$, OPERATION$"
                  + " from "
                  + view
                  + " where cscn$ > "
                  + lastSCN
                  + "  order by cscn$, rsid$";
        else
          statement =
              "SELECT "
                  + cdcSource.getPublication(view).getColumnList()
                  + " , CSCN$, COMMIT_TIMESTAMP$, OPERATION$"
                  + " from "
                  + view
                  + "  order by cscn$, rsid$";

        resultset = stmt.executeQuery(statement);

        int userColumns = cdcSource.getPublication(view).getColumnsCount();

        if (logger.isDebugEnabled()) logger.debug("Running " + statement);
        OneRowChange updateRowChange = null;
        OneRowChange oneRowChange = null;

        while (resultset.next()) {
          noData = false;
          long currentSCN = resultset.getLong("CSCN$");
          if (maxSCN < currentSCN) maxSCN = currentSCN;

          if (sourceTStamp == null) sourceTStamp = resultset.getTimestamp("COMMIT_TIMESTAMP$");

          if (logger.isDebugEnabled()) {
            logger.debug("Receiving data");
            StringBuffer buffer = new StringBuffer();

            for (int i = 1; i <= resultset.getMetaData().getColumnCount(); i++) {
              if (buffer.length() > 0) buffer.append('\t');
              buffer.append(resultset.getString(i));
            }
            logger.debug("Received : " + buffer.toString());
          }

          String operation = resultset.getString("OPERATION$").trim();

          RowChangeData rowData = new RowChangeData();

          if (operation.equals("I")) {
            if (oneRowChange == null || !oneRowChange.getAction().equals(ActionType.INSERT)) {
              oneRowChange =
                  new OneRowChange(cdcSource.getSchema(), cdcSource.getTable(), ActionType.INSERT);
              rowData.appendOneRowChange(oneRowChange);
              data.add(rowData);
            }
            parseRowEvent(oneRowChange, false, userColumns);
          } else if (operation.equals("D")) {
            if (oneRowChange == null || !oneRowChange.getAction().equals(ActionType.DELETE)) {
              oneRowChange =
                  new OneRowChange(cdcSource.getSchema(), cdcSource.getTable(), ActionType.DELETE);
              rowData.appendOneRowChange(oneRowChange);
              data.add(rowData);
            }
            parseRowEvent(oneRowChange, true, userColumns);
          } else if (operation.startsWith("U")) {
            if (updateRowChange == null) {
              updateRowChange =
                  new OneRowChange(cdcSource.getSchema(), cdcSource.getTable(), ActionType.UPDATE);
              rowData.appendOneRowChange(updateRowChange);
              data.add(rowData);
              if (operation.equals("UO")) {
                parseRowEvent(updateRowChange, true, userColumns);
              } else if (operation.equals("UN")) {
                parseRowEvent(updateRowChange, false, userColumns);
              }
            } else {
              if (operation.equals("UO")) {
                parseRowEvent(updateRowChange, true, userColumns);
              } else if (operation.equals("UN")) {
                parseRowEvent(updateRowChange, false, userColumns);
              }
            }
          } else {
            logger.error(
                "Unable to extract data from CDC (operation should be I, D, UO or UN - found "
                    + operation
                    + ")");
          }
        }
        resultset.close();
        resultset = null;
        stmt.close();
      }
      lastSCN = null;
    } catch (SQLException e) {
      throw new ReplicatorException(e);
    } finally {
      if (resultset != null)
        try {
          resultset.close();
        } catch (SQLException e) {
          logger.warn("Failed to close resultset");
        }
      resultset = null;
    }
    if (noData) {
      logger.warn("Retrieved empty resultset... no data available... sleeping");
      Thread.sleep(1000);
    }
    if (logger.isDebugEnabled()) logger.debug("Purging window");

    executeQuery(
        "BEGIN DBMS_CDC_SUBSCRIBE.PURGE_WINDOW(subscription_name => 'TUNGSTEN_PUB');END;", false);

    if (data.size() > 0) {
      DBMSEvent event = new DBMSEvent(String.valueOf(maxSCN), data, sourceTStamp);

      // Mark the event as coming from Oracle.
      event.setMetaDataOption(ReplOptionParams.DBMS_TYPE, Database.ORACLE);

      // Strings are converted to UTF8 rather than using bytes for this
      // extractor.
      event.setMetaDataOption(ReplOptionParams.STRINGS, "utf8");

      return event;
    } else {
      return null;
    }
  }
  private void chunkVarcharPK(Table table) throws InterruptedException {
    String pkName = table.getPrimaryKey().getColumns().get(0).getName();
    String fqnTable =
        connection.getDatabaseObjectName(table.getSchema())
            + '.'
            + connection.getDatabaseObjectName(table.getName());
    // Get Count
    String sql = String.format("SELECT COUNT(%s) as cnt FROM %s", pkName, fqnTable);
    if (eventId != null) sql += " AS OF SCN " + eventId;

    // if count <= Chunk size, we are done
    long count = 0;
    Statement st = null;
    ResultSet rs = null;
    try {
      st = connection.createStatement();
      rs = st.executeQuery(sql);
      if (rs.next()) {
        count = rs.getLong("cnt");
      }
    } catch (Exception e) {
      logger.warn("Failed to retrieve count value for PK " + pkName + " in table " + fqnTable, e);
    } finally {
      if (rs != null) {
        try {
          rs.close();
        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
      if (st != null) {
        try {
          st.close();
        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
    }

    if (count == 0) return;

    if (count <= chunkSize) {
      chunks.put(new StringChunk(table, null, null));
      return;
    }

    // Else (count > CHUNK_SIZE) : chunk again in smaller parts
    long nbBlocks = count / chunkSize;

    if (count % chunkSize > 0) nbBlocks++;

    long blockSize = count / nbBlocks;

    PreparedStatement pstmt = null;

    StringBuffer sqlBuf = new StringBuffer("SELECT MIN(");
    sqlBuf.append(pkName);
    sqlBuf.append(") as min, MAX(");
    sqlBuf.append(pkName);
    sqlBuf.append(") as max, COUNT(");
    sqlBuf.append(pkName);
    sqlBuf.append(") as cnt FROM ( select sub.*, ROWNUM rnum from ( SELECT ");
    sqlBuf.append(pkName);
    sqlBuf.append(" FROM ");
    sqlBuf.append(fqnTable);
    sqlBuf.append(" ORDER BY ");
    sqlBuf.append(pkName);

    if (eventId != null) {
      sqlBuf.append(" AS OF SCN ");
      sqlBuf.append(eventId);
    }

    sqlBuf.append(") sub where ROWNUM <= ? ) where rnum >= ?");

    sql = sqlBuf.toString();

    try {
      pstmt = connection.prepareStatement(sql);
    } catch (SQLException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }

    try {
      for (long i = 0; i < count; i += blockSize + 1) {
        try {
          pstmt.setLong(1, i + blockSize);
          pstmt.setLong(2, i);
          rs = pstmt.executeQuery();

          if (rs.next())
            chunks.put(new StringChunk(table, rs.getString("min"), rs.getString("max"), nbBlocks));

        } catch (SQLException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        } finally {
          try {
            rs.close();
          } catch (SQLException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
          }
        }
      }
    } finally {
      try {
        pstmt.close();
      } catch (SQLException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }
  }
  private void generateChunkingPreparedStatement(Table table, long blockSize) throws SQLException {
    String fqnTable =
        connection.getDatabaseObjectName(table.getSchema())
            + '.'
            + connection.getDatabaseObjectName(table.getName());

    StringBuffer sqlBuffer = new StringBuffer("SELECT ");
    StringBuffer colBuf = new StringBuffer();
    whereClause = new String();

    if (table.getPrimaryKey() != null) {
      // TODO
      // No dedicated chunking algorithm for this type of pk (either
      // composite or datatype not handled)
    } else {
      if (logger.isDebugEnabled()) logger.debug("Handling table " + table.toExtendedString());
      // This is a unique key that can be used
      Key key = table.getPKFromUniqueIndex();

      if (key == null) {
        logger.info("getPKFromUniqueIndex returned null key");
      }
      ArrayList<Column> colsList = key.getColumns();

      if (logger.isDebugEnabled()) logger.debug("colsList = " + colsList);

      Column[] columns = new Column[colsList.size()];
      int i = 0;
      for (Column column : colsList) {
        columns[i] = column;
        i++;
      }

      whereClause = buildWhereClause(columns, 0);

      for (int j = 0; j < columns.length; j++) {
        if (j > 0) {
          colBuf.append(", ");
        }
        colBuf.append(columns[j].getName());
      }
      sqlBuffer.append(colBuf);
    }

    sqlBuffer.append(" FROM ");
    sqlBuffer.append(fqnTable);
    if (eventId != null) {
      sqlBuffer.append(" AS OF SCN ");
      sqlBuffer.append(eventId);
    }

    sqlBuffer.append(" ORDER BY ");
    sqlBuffer.append(colBuf);

    String sql = sqlBuffer.toString();
    if (logger.isDebugEnabled()) logger.debug("Generated statement :" + sql);
    pStmt = connection.prepareStatement(sql);

    // TODO : have a setting ?
    pStmt.setFetchSize(100);
  }
  private void chunkLimit(Table table) throws InterruptedException {
    // Table does not have a primary key. Let's chunk using limit.
    String fqnTable =
        connection.getDatabaseObjectName(table.getSchema())
            + '.'
            + connection.getDatabaseObjectName(table.getName());

    // Get Count
    String sql = String.format("SELECT COUNT(*) as cnt FROM %s", fqnTable);
    if (eventId != null) sql += " AS OF SCN " + eventId;

    long count = 0;
    Statement st = null;
    ResultSet rs = null;
    try {
      st = connection.createStatement();
      rs = st.executeQuery(sql);
      if (rs.next()) {
        count = rs.getLong("cnt");
      }
    } catch (Exception e) {
      logger.warn("Failed to retrieve row count values for table " + fqnTable, e);
    } finally {
      if (rs != null) {
        try {
          rs.close();
        } catch (SQLException e) {
          logger.warn("Error while closing resultset", e);
        }
      }
      if (st != null) {
        try {
          st.close();
        } catch (SQLException e) {
          logger.warn("Error while closing statement", e);
        }
      }
    }
    if (count == 0) return;

    if (count <= chunkSize) {
      chunks.put(new LimitChunk(table));
      return;
    }

    // Else (count > CHUNK_SIZE) : chunk again in smaller parts
    long nbBlocks = count / chunkSize;

    if (count % chunkSize > 0) nbBlocks++;

    long blockSize = (long) Math.ceil((double) count / (double) nbBlocks);

    try {
      generateChunkingPreparedStatement(table, blockSize);
    } catch (SQLException e) {
      logger.warn("Error while preparing chunking prepared statement", e);
    }

    Object[] fromValues = null, toValues = null;

    ResultSet result = null;
    try {
      result = pStmt.executeQuery();

      long chunkSize = 0;
      while (result.next()) {
        chunkSize++;
        if (chunkSize % blockSize == 0) {
          // We reached the desired chunk size : send the chunk
          // definition.
          toValues = new Object[result.getMetaData().getColumnCount()];
          for (int j = 0; j < toValues.length; j++) {
            toValues[j] = result.getObject(j + 1);
          }

          chunks.put(
              new LimitChunk(
                  table, 0, 0 + blockSize, nbBlocks, fromValues, toValues, whereClause, blockSize));

          fromValues = toValues;
        } else if (chunkSize >= count) {
          // Last chunk : we eventually did not reach the chunk size,
          // but the table was fully processed : send the last chunk
          // definition.
          chunks.put(
              new LimitChunk(
                  table, 0, 0 + blockSize, nbBlocks, fromValues, null, whereClause, blockSize));
        }
      }
    } catch (SQLException e1) {
      logger.warn("Error while executing chunking query", e1);
    } finally {
      try {
        result.close();
      } catch (SQLException e) {
        logger.warn("Error while closing resultset", e);
      }
      try {
        pStmt.close();
      } catch (SQLException e) {
        logger.warn("Error while closing chunking prepared statement", e);
      }
    }
  }