@Override
 protected void doShutdown() throws Exception {
   if (workerPool != null) {
     workerPool.shutdown();
   }
   super.doShutdown();
 }
示例#2
0
 /*
  * (non-Javadoc)
  * @see org.apache.camel.impl.DefaultConsumer#doStart()
  */
 @Override
 protected void doStart() throws Exception {
   super.doStart();
   if (LOG.isInfoEnabled()) {
     LOG.info("Kafka Producer Component started");
   }
 }
 @Override
 protected void doStart() throws Exception {
   super.doStart();
   if (!lazySessionCreation) {
     openConnection();
   }
 }
示例#4
0
  @Override
  protected void doStart() throws Exception {
    super.doStart();
    LOG.debug("Starting Nats Producer");

    LOG.debug("Getting Nats Connection");
    connection = getConnection();
  }
 @Override
 protected void doStop() throws Exception {
   if (LOG.isDebugEnabled()) {
     LOG.debug("Stopping connector: {} at address: {}", connector, address);
   }
   closeConnection();
   super.doStop();
 }
  @Override
  protected void doStop() throws Exception {
    if (client != null) {
      client.close();
    }

    super.doStop();
  }
示例#7
0
 @Override
 protected void doStop() throws Exception {
   super.doStop();
   if (scheduler != null) {
     getEndpoint().getCamelContext().getExecutorServiceManager().shutdown(scheduler);
     scheduler = null;
   }
   if (ostream != null) {
     IOHelper.close(ostream, "output stream", log);
     ostream = null;
   }
 }
示例#8
0
  @Override
  protected void doStop() throws Exception {
    super.doStop();
    LOG.debug("Stopping Nats Producer");

    LOG.debug("Closing Nats Connection");
    if (connection != null && !connection.isClosed()) {
      if (getEndpoint().getNatsConfiguration().isFlushConnection()) {
        LOG.debug("Flushing Nats Connection");
        connection.flush(getEndpoint().getNatsConfiguration().getFlushTimeout());
      }
      connection.close();
    }
  }
示例#9
0
  @Override
  protected void doStart() throws Exception {
    // need to remember auth as Hadoop will override that, which otherwise means the Auth is broken
    // afterwards
    Configuration auth = HdfsComponent.getJAASConfiguration();
    try {
      super.doStart();

      // setup hdfs if configured to do on startup
      if (getEndpoint().getConfig().isConnectOnStartup()) {
        ostream = setupHdfs(true);
      }

      SplitStrategy idleStrategy = null;
      for (SplitStrategy strategy : config.getSplitStrategies()) {
        if (strategy.type == SplitStrategyType.IDLE) {
          idleStrategy = strategy;
          break;
        }
      }
      if (idleStrategy != null) {
        scheduler =
            getEndpoint()
                .getCamelContext()
                .getExecutorServiceManager()
                .newSingleThreadScheduledExecutor(this, "HdfsIdleCheck");
        log.debug(
            "Creating IdleCheck task scheduled to run every {} millis",
            config.getCheckIdleInterval());
        scheduler.scheduleAtFixedRate(
            new IdleCheck(idleStrategy),
            config.getCheckIdleInterval(),
            config.getCheckIdleInterval(),
            TimeUnit.MILLISECONDS);
      }
    } finally {
      HdfsComponent.setJAASConfiguration(auth);
    }
  }
 @Override
 protected void doStop() throws Exception {
   ServiceHelper.stopService(locks);
   super.doStop();
 }
 @Override
 protected void doStart() throws Exception {
   super.doStart();
   ServiceHelper.startService(locks);
 }