/** @param sinceSCN */
  @Override
  public synchronized void start(long sinceSCN) {
    _log.info("Start golden gate evert producer requested.");
    if (_currentState == State.RUNNING) {
      _log.error("Thread already running! ");
      return;
    }
    _scn.set(TrailFilePositionSetter.USE_LATEST_SCN);

    if (sinceSCN > 0) {
      _scn.set(sinceSCN);
    } else {
      if (getMaxScnReaderWriter() != null) {
        try {
          long scn = getMaxScnReaderWriter().getMaxScn();

          // If the max scn is greater than 0, then honor it.
          if (scn > 0) {
            // apply the restart SCN offset
            long newScn =
                (scn >= _pConfig.getRestartScnOffset()) ? scn - _pConfig.getRestartScnOffset() : 0;
            _log.info(
                "Checkpoint read = "
                    + scn
                    + " restartScnOffset= "
                    + _pConfig.getRestartScnOffset()
                    + " Adjusted SCN= "
                    + newScn);
            if (newScn > 0) {
              _scn.set(newScn);
            }
          } else // If the max scn is set to <0, this is a special case that we use to let the trail
          // file notifier that you want to override the default behaviour of starting with
          // the latest scn.
          {
            _log.info(
                "Overridding default behaviour (start with latest scn), using scn : "
                    + scn
                    + " to start the relay");
            if (scn != TrailFilePositionSetter.USE_EARLIEST_SCN
                && scn != TrailFilePositionSetter.USE_LATEST_SCN)
              throw new DatabusException(
                  "The scn you have passed is neither EARLIEST or LATEST  setting, cannot proceed with using this scn");

            _scn.set(scn);
          }

        } catch (DatabusException e) {
          _log.warn("Could not read saved maxScn: Defaulting to startSCN=" + _scn.get());
        }
      }
    }

    if (_worker == null) {
      _log.info("Starting with scn = " + _scn.get());
      _worker = new WorkerThread();
      _worker.setDaemon(true);
      _worker.start();
    }
  }
  /**
   * @param pConfig The physical source config for which the event producer is configured.
   * @param schemaRegistryService Schema registry to fetch schemas
   * @param dbusEventBuffer An event buffer to which the producer can write/append events.
   * @param statsCollector Reporting stats
   * @param maxScnReaderWriters To read/write the maxScn from maxScn file
   * @throws DatabusException
   */
  public GoldenGateEventProducer(
      PhysicalSourceStaticConfig pConfig,
      SchemaRegistryService schemaRegistryService,
      DbusEventBufferAppendable dbusEventBuffer,
      DbusEventsStatisticsCollector statsCollector,
      MaxSCNReaderWriter maxScnReaderWriters)
      throws DatabusException {
    super(dbusEventBuffer, maxScnReaderWriters, pConfig, null);
    _pConfig = pConfig;
    _schemaRegistryService = schemaRegistryService;
    _statsCollector = statsCollector;
    _currentState = State.INIT;
    _partitionFunctionHashMap = new HashMap<Integer, PartitionFunction>();
    _eventsLog = Logger.getLogger("com.linkedin.databus2.producers.db.events." + pConfig.getName());

    if (_pConfig != null) {
      long eventRatePerSec = pConfig.getEventRatePerSec();
      long maxThrottleDurationInSecs = pConfig.getMaxThrottleDurationInSecs();

      if ((eventRatePerSec > 0) && (maxThrottleDurationInSecs > 0)) {
        _rc = new RateControl(eventRatePerSec, maxThrottleDurationInSecs);
      } else {
        // Disable rate control
        _rc = new RateControl(Long.MIN_VALUE, Long.MIN_VALUE);
      }
    }

    final String MODULE = GoldenGateEventProducer.class.getName();
    _log = Logger.getLogger(MODULE + "." + getName());

    // Create a hashmap for logical source id ==> PartitionFunction, this will be used as the
    // logical partition Id for the event creation
    // also create a list(map) of MonitoredSourceInfo objects to monitor GGEventProducer progress
    for (int i = 0; i < _pConfig.getSources().length; i++) {
      LogicalSourceStaticConfig logicalSourceStaticConfig = _pConfig.getSources()[i];
      GGMonitoredSourceInfo source =
          buildGGMonitoredSourceInfo(logicalSourceStaticConfig, _pConfig);
      _monitoredSources.put(source.getSourceId(), source);
    }

    // get one fake global source for total stats
    LogicalSourceStaticConfig logicalSourceStaticConfig =
        new LogicalSourceStaticConfig(
            GLOBAL_SOURCE_ID,
            _pConfig.getName(),
            "",
            "constant:1",
            (short) 0,
            false,
            null,
            null,
            null);
    GGMonitoredSourceInfo source = buildGGMonitoredSourceInfo(logicalSourceStaticConfig, _pConfig);
    _monitoredSources.put(source.getSourceId(), source);

    // create stats collector for parser
    _ggParserStats = new GGParserStatistics(_pConfig.getName());
    registerParserMbean(_ggParserStats);
  }
  /**
   * The method takes the an inputstream as an input and wraps it around with xml tags, sets the xml
   * encoding and xml version specified in the physical sources config.
   *
   * @param compositeInputStream The inputstream to be wrapped with the xml tags
   * @return
   */
  private InputStream wrapStreamWithXmlTags(InputStream compositeInputStream) {

    String xmlVersion = _pConfig.getXmlVersion();
    String xmlEncoding = _pConfig.getXmlEncoding();
    String xmlStart =
        "<?xml version=\"" + xmlVersion + "\" encoding=\"" + xmlEncoding + "\"?>\n<root>";
    String xmlEnd = "</root>";
    _log.info("The xml start tag used is:" + xmlStart);
    List xmlTagsList =
        Arrays.asList(
            new InputStream[] {
              new ByteArrayInputStream(xmlStart.getBytes(Charset.forName(xmlEncoding))),
              compositeInputStream,
              new ByteArrayInputStream(xmlEnd.getBytes(Charset.forName(xmlEncoding))),
            });
    Enumeration<InputStream> streams = Collections.enumeration(xmlTagsList);
    SequenceInputStream seqStream = new SequenceInputStream(streams);
    return seqStream;
  }
  private void registerParserMbean(GGParserStatisticsMBean parserBean) throws DatabusException {
    try {
      Hashtable<String, String> props = new Hashtable<String, String>();
      props.put("type", "GGParserStatistics");
      props.put("name", _pConfig.getName());
      ObjectName objectName = new ObjectName(ServerContainer.JMX_DOMAIN, props);

      if (_mbeanServer.isRegistered(objectName)) {
        _log.warn("Unregistering old ggparser statistics mbean: " + objectName);
        _mbeanServer.unregisterMBean(objectName);
      }

      _mbeanServer.registerMBean(parserBean, objectName);
      _log.info("Registered gg-parser statistics mbean: " + objectName);
      _registeredMbeans.add(objectName);
    } catch (Exception ex) {
      _log.error(
          "Failed to register the GGparser statistics mbean for db = "
              + _pConfig.getName()
              + " due to an exception.",
          ex);
      throw new DatabusException("Failed to initialize GGparser statistics mbean.", ex);
    }
  }
  /**
   * @param dbUpdates The dbUpdates present in the current transaction
   * @param ti The meta information about the transaction. (See TransactionInfo class for more
   *     details).
   * @throws DatabusException
   * @throws UnsupportedKeyException
   */
  protected void addEventToBuffer(
      List<TransactionState.PerSourceTransactionalUpdate> dbUpdates, TransactionInfo ti)
      throws DatabusException, UnsupportedKeyException {
    if (dbUpdates.size() == 0) throw new DatabusException("Cannot handle empty dbUpdates");

    long scn = ti.getScn();
    long timestamp = ti.getTransactionTimeStampNs();
    EventSourceStatistics globalStats = getSource(GLOBAL_SOURCE_ID).getStatisticsBean();

    /**
     * We skip the start scn of the relay, we have already added a EOP for this SCN in the buffer.
     * Why is this not a problem ? There are two cases: 1. When we use the earliest/latest scn if
     * there is no maxScn (We don't really have a start point). So it's really OK to miss the first
     * event. 2. If it's the maxSCN, then event was already seen by the relay.
     */
    if (scn == _startPrevScn.get()) {
      _log.info("Skipping this transaction, EOP already send for this event");
      return;
    }

    getEventBuffer().startEvents();

    int eventsInTransactionCount = 0;

    List<EventReaderSummary> summaries = new ArrayList<EventReaderSummary>();

    for (int i = 0; i < dbUpdates.size(); ++i) {
      GenericRecord record = null;
      TransactionState.PerSourceTransactionalUpdate perSourceUpdate = dbUpdates.get(i);
      short sourceId = (short) perSourceUpdate.getSourceId();
      // prepare stats collection per source
      EventSourceStatistics perSourceStats = getSource(sourceId).getStatisticsBean();

      Iterator<DbUpdateState.DBUpdateImage> dbUpdateIterator =
          perSourceUpdate.getDbUpdatesSet().iterator();
      int eventsInDbUpdate = 0;
      long dbUpdatesEventsSize = 0;
      long startDbUpdatesMs = System.currentTimeMillis();

      while (dbUpdateIterator
          .hasNext()) // TODO verify if there is any case where we need to rollback.
      {
        DbUpdateState.DBUpdateImage dbUpdate = dbUpdateIterator.next();

        // Construct the Databus Event key, determine the key type and construct the key
        Object keyObj = obtainKey(dbUpdate);
        DbusEventKey eventKey = new DbusEventKey(keyObj);

        // Get the logicalparition id
        PartitionFunction partitionFunction = _partitionFunctionHashMap.get((int) sourceId);
        short lPartitionId = partitionFunction.getPartition(eventKey);

        record = dbUpdate.getGenericRecord();
        // Write the event to the buffer
        if (record == null)
          throw new DatabusException("Cannot write event to buffer because record = " + record);

        if (record.getSchema() == null)
          throw new DatabusException("The record does not have a schema (null schema)");

        try {
          // Collect stats on number of dbUpdates for one source
          eventsInDbUpdate++;

          // Count of all the events in the current transaction
          eventsInTransactionCount++;
          // Serialize the row
          ByteArrayOutputStream bos = new ByteArrayOutputStream();
          Encoder encoder = new BinaryEncoder(bos);
          GenericDatumWriter<GenericRecord> writer =
              new GenericDatumWriter<GenericRecord>(record.getSchema());
          writer.write(record, encoder);
          byte[] serializedValue = bos.toByteArray();

          // Get the md5 for the schema
          SchemaId schemaId = SchemaId.createWithMd5(dbUpdate.getSchema());

          // Determine the operation type and convert to dbus opcode
          DbusOpcode opCode;
          if (dbUpdate.getOpType() == DbUpdateState.DBUpdateImage.OpType.INSERT
              || dbUpdate.getOpType() == DbUpdateState.DBUpdateImage.OpType.UPDATE) {
            opCode = DbusOpcode.UPSERT;
            if (_log.isDebugEnabled())
              _log.debug("The event with scn " + scn + " is INSERT/UPDATE");
          } else if (dbUpdate.getOpType() == DbUpdateState.DBUpdateImage.OpType.DELETE) {
            opCode = DbusOpcode.DELETE;
            if (_log.isDebugEnabled()) _log.debug("The event with scn " + scn + " is DELETE");
          } else {
            throw new DatabusException("Unknown opcode from dbUpdate for event with scn:" + scn);
          }

          // Construct the dbusEvent info
          DbusEventInfo dbusEventInfo =
              new DbusEventInfo(
                  opCode,
                  scn,
                  (short) _pConfig.getId(),
                  lPartitionId,
                  timestamp,
                  sourceId,
                  schemaId.getByteArray(),
                  serializedValue,
                  false,
                  false);
          dbusEventInfo.setReplicated(dbUpdate.isReplicated());

          perSourceStats.addEventCycle(1, ti.getTransactionTimeRead(), serializedValue.length, scn);
          globalStats.addEventCycle(1, ti.getTransactionTimeRead(), serializedValue.length, scn);

          long tsEnd = System.currentTimeMillis();
          perSourceStats.addTimeOfLastDBAccess(tsEnd);
          globalStats.addTimeOfLastDBAccess(tsEnd);

          // Append to the event buffer
          getEventBuffer().appendEvent(eventKey, dbusEventInfo, _statsCollector);
          _rc.incrementEventCount();
          dbUpdatesEventsSize += serializedValue.length;
        } catch (IOException io) {
          perSourceStats.addError();
          globalStats.addEmptyEventCycle();
          _log.error("Cannot create byte stream payload: " + dbUpdates.get(i).getSourceId());
        }
      }
      long endDbUpdatesMs = System.currentTimeMillis();
      long dbUpdatesElapsedTimeMs = endDbUpdatesMs - startDbUpdatesMs;

      // Log Event Summary at logical source level
      EventReaderSummary summary =
          new EventReaderSummary(
              sourceId,
              _monitoredSources.get(sourceId).getSourceName(),
              scn,
              eventsInDbUpdate,
              dbUpdatesEventsSize,
              -1L /* Not supported */,
              dbUpdatesElapsedTimeMs,
              timestamp,
              timestamp,
              -1L /* Not supported */);
      if (_eventsLog.isInfoEnabled()) {
        _eventsLog.info(summary.toString());
      }
      summaries.add(summary);

      if (_log.isDebugEnabled())
        _log.debug("There are " + eventsInDbUpdate + " events seen in the current dbUpdate");
    }

    // update stats
    _ggParserStats.addTransactionInfo(ti, eventsInTransactionCount);

    // Log Event Summary at Physical source level
    ReadEventCycleSummary summary =
        new ReadEventCycleSummary(
            _pConfig.getName(),
            summaries,
            scn,
            -1 /* Overall time including query time not calculated */);

    if (_eventsLog.isInfoEnabled()) {
      _eventsLog.info(summary.toString());
    }

    _log.info("Writing " + eventsInTransactionCount + " events from transaction with scn: " + scn);
    if (scn <= 0)
      throw new DatabusException(
          "Unable to write events to buffer because of negative/zero scn: " + scn);

    getEventBuffer().endEvents(scn, _statsCollector);
    _scn.set(scn);

    if (getMaxScnReaderWriter() != null) {
      try {
        getMaxScnReaderWriter().saveMaxScn(_scn.get());
      } catch (DatabusException e) {
        _log.error("Cannot save scn = " + _scn + " for physical source = " + getName(), e);
      }
    }
  }
 /** Returns the name of the source for which this relay is configured */
 @Override
 public String getName() {
   return (_pConfig != null) ? _pConfig.getName() : "NONE";
 }