/**
   * Prepare prefetch store. {@inheritDoc}
   *
   * @see
   *     com.continuent.tungsten.replicator.plugin.ReplicatorPlugin#prepare(com.continuent.tungsten.replicator.plugin.PluginContext)
   */
  public void prepare(PluginContext context) throws ReplicatorException {
    // Perform super-class prepare.
    super.prepare(context);

    logger.info("Preparing PrefetchStore for slave catalog schema: " + slaveCatalogSchema);
    // Load defaults for connection
    if (url == null) url = context.getJdbcUrl("tungsten_" + context.getServiceName());
    if (user == null) user = context.getJdbcUser();
    if (password == null) password = context.getJdbcPassword();

    // Connect.
    try {
      conn = DatabaseFactory.createDatabase(url, user, password);
      conn.connect(true);

      seqnoStatement =
          conn.prepareStatement(
              "select seqno, fragno, last_Frag, source_id, epoch_number, eventid, applied_latency from "
                  + slaveCatalogSchema
                  + "."
                  + CommitSeqnoTable.TABLE_NAME);
    } catch (SQLException e) {
      throw new ReplicatorException(e);
    }

    // Show that we have started.
    startTimeMillis = System.currentTimeMillis();
    prefetchState = PrefetchState.active;
  }
  /**
   * Prepares connection to the filtering server and parses definition file. {@inheritDoc}
   *
   * @see
   *     com.continuent.tungsten.replicator.plugin.ReplicatorPlugin#prepare(com.continuent.tungsten.replicator.plugin.PluginContext)
   */
  public void prepare(PluginContext context) throws ReplicatorException {
    messageGenerator = new ClientMessageGenerator(context.getServiceName());

    initDefinitionsFile();
    initConnection();
    doHandshake(context.getServiceName());
  }
 /**
  * Connect to underlying queue. {@inheritDoc}
  *
  * @see
  *     com.continuent.tungsten.replicator.plugin.ReplicatorPlugin#prepare(com.continuent.tungsten.replicator.plugin.PluginContext)
  */
 public void prepare(PluginContext context) throws ReplicatorException {
   try {
     queueStore = (InMemoryMultiQueue) context.getStore(storeName);
   } catch (ClassCastException e) {
     throw new ReplicatorException(
         "Invalid storage class; configuration may be in error: "
             + context.getStore(storeName).getClass().getName());
   }
   if (queueStore == null)
     throw new ReplicatorException(
         "Unknown storage name; configuration may be in error: " + storeName);
 }
 /**
  * {@inheritDoc}
  *
  * @see
  *     com.continuent.tungsten.replicator.plugin.ReplicatorPlugin#configure(com.continuent.tungsten.replicator.plugin.PluginContext)
  */
 public void configure(PluginContext context) throws ReplicatorException {
   if (tungstenSchema == null) {
     tungstenSchema = context.getReplicatorProperties().getString(ReplicatorConf.METADATA_SCHEMA);
   }
   if (definitionsFile == null) {
     throw new ReplicatorException(
         "definitionsFile property not set - specify a path to JSON file");
   }
 }
  /**
   * {@inheritDoc}
   *
   * @see
   *     com.continuent.tungsten.replicator.plugin.ReplicatorPlugin#prepare(com.continuent.tungsten.replicator.plugin.PluginContext)
   */
  public synchronized void prepare(PluginContext context)
      throws ReplicatorException, InterruptedException {
    // Prepare database connection.
    if (url != null && url.trim().length() > 0) {
      logger.info("Preparing SQL catalog tables");
      ReplicatorRuntime runtime = (ReplicatorRuntime) context;
      String metadataSchema = context.getReplicatorSchemaName();
      catalog = new CatalogManager(runtime);
      catalog.connect(url, user, password, metadataSchema, vendor);
      catalog.prepareSchema();
    } else logger.info("SQL catalog tables are disabled");

    // Configure and prepare the log.
    diskLog = new DiskLog();
    diskLog.setDoChecksum(doChecksum);
    diskLog.setEventSerializerClass(eventSerializer);
    diskLog.setLogDir(logDir);
    diskLog.setLogFileSize(logFileSize);
    diskLog.setLogFileRetainMillis(logFileRetainMillis);
    diskLog.setLogConnectionTimeoutMillis(logConnectionTimeout * 1000);
    diskLog.setBufferSize(bufferSize);
    diskLog.setFsyncOnFlush(fsyncOnFlush);
    if (fsyncOnFlush) {
      // Only used with fsync.
      diskLog.setFlushIntervalMillis(flushIntervalMillis);
    }
    diskLog.setReadOnly(readOnly);
    diskLog.prepare();
    logger.info("Log preparation is complete");

    // Start server for THL connections.
    if (context.isRemoteService() == false) {
      try {
        server = new Server(context, sequencer, this);
        server.start();
      } catch (IOException e) {
        throw new ReplicatorException("Unable to start THL server", e);
      }
    }
  }