private static void doWork(String task) { for (char ch : task.toCharArray()) { if (ch == '.') { try { Thread.sleep(1000); } catch (InterruptedException _ignored) { Thread.currentThread().interrupt(); } } } }
private synchronized void checkConnection() { if (this.isConnected()) return; for (int tries = 0; (this.tries == NO_TRIES_LIMIT || tries < this.tries) && !this.isConnected(); ++tries) { if (this.verbose) Logger.getLogger(RabbitMQAutoConnection.class.getName()) .info( "Attempting to " + ((this.connection != null) ? "re" : "") + "connect to the rabbitmq server."); this.factory.setHost(this.hosts.get(this.random.nextInt(this.hosts.size()))); try { this.connection = this.factory.newConnection(); // TODO: call newConnection with null, Address[] ? } catch (IOException ignored) { } try { Thread.sleep(this.interval); } catch (InterruptedException ignored) { } } if (this.isConnected()) { if (this.verbose) Logger.getLogger(RabbitMQAutoConnection.class.getName()) .info("Connected to the rabbitmq server."); } else throw new NoRabbitMQConnectionException(); }
@Override public void close() { if (closed) { return; } logger.info("closing rabbitmq river"); closed = true; thread.interrupt(); }
@Override public void start() { connectionFactory = new ConnectionFactory(); connectionFactory.setUsername(rabbitUser); connectionFactory.setPassword(rabbitPassword); connectionFactory.setVirtualHost(rabbitVhost); connectionFactory.setRequestedHeartbeat(new Long(rabbitHeartbeat.getSeconds()).intValue()); logger.info( "creating rabbitmq river, addresses [{}], user [{}], vhost [{}]", rabbitAddresses, connectionFactory.getUsername(), connectionFactory.getVirtualHost()); thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "rabbitmq_river") .newThread(new Consumer()); thread.start(); }
@Override public void run() { while (true) { if (closed) { break; } try { connection = connectionFactory.newConnection(rabbitAddresses); channel = connection.createChannel(); } catch (Exception e) { if (!closed) { logger.warn("failed to created a connection / channel", e); } else { continue; } cleanup(0, "failed to connect"); try { Thread.sleep(5000); } catch (InterruptedException e1) { // ignore, if we are closing, we will exit later } } QueueingConsumer consumer = null; // define the queue try { if (rabbitQueueDeclare) { // only declare the queue if we should channel.queueDeclare( rabbitQueue /*queue*/, rabbitQueueDurable /*durable*/, false /*exclusive*/, rabbitQueueAutoDelete /*autoDelete*/, rabbitQueueArgs /*extra args*/); } if (rabbitExchangeDeclare) { // only declare the exchange if we should channel.exchangeDeclare( rabbitExchange /*exchange*/, rabbitExchangeType /*type*/, rabbitExchangeDurable); } channel.basicQos( rabbitQosPrefetchSize /*qos_prefetch_size*/, rabbitQosPrefetchCount /*qos_prefetch_count*/, false); if (rabbitQueueBind) { // only bind queue if we should channel.queueBind( rabbitQueue /*queue*/, rabbitExchange /*exchange*/, rabbitRoutingKey /*routingKey*/); } consumer = new QueueingConsumer(channel); channel.basicConsume(rabbitQueue /*queue*/, false /*noAck*/, consumer); } catch (Exception e) { if (!closed) { logger.warn("failed to create queue [{}]", e, rabbitQueue); } cleanup(0, "failed to create queue"); continue; } // now use the queue to listen for messages while (true) { if (closed) { break; } QueueingConsumer.Delivery task; try { task = consumer.nextDelivery(); } catch (Exception e) { if (!closed) { logger.error("failed to get next message, reconnecting...", e); } cleanup(0, "failed to get message"); break; } if (task != null && task.getBody() != null) { final List<Long> deliveryTags = Lists.newArrayList(); BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); try { processBody(task, bulkRequestBuilder); } catch (Exception e) { logger.warn( "failed to parse request for delivery tag [{}], ack'ing...", e, task.getEnvelope().getDeliveryTag()); try { channel.basicAck(task.getEnvelope().getDeliveryTag(), false); } catch (IOException e1) { logger.warn("failed to ack [{}]", e1, task.getEnvelope().getDeliveryTag()); } continue; } deliveryTags.add(task.getEnvelope().getDeliveryTag()); if (bulkRequestBuilder.numberOfActions() < bulkSize) { // try and spin some more of those without timeout, so we have a bigger bulk (bounded // by the bulk size) try { while ((task = consumer.nextDelivery(bulkTimeout.millis())) != null) { try { processBody(task, bulkRequestBuilder); deliveryTags.add(task.getEnvelope().getDeliveryTag()); } catch (Throwable e) { logger.warn( "failed to parse request for delivery tag [{}], ack'ing...", e, task.getEnvelope().getDeliveryTag()); try { channel.basicAck(task.getEnvelope().getDeliveryTag(), false); } catch (Exception e1) { logger.warn( "failed to ack on failure [{}]", e1, task.getEnvelope().getDeliveryTag()); } } if (bulkRequestBuilder.numberOfActions() >= bulkSize) { break; } } } catch (InterruptedException e) { if (closed) { break; } } catch (ShutdownSignalException sse) { logger.warn( "Received a shutdown signal! initiatedByApplication: [{}], hard error: [{}]", sse, sse.isInitiatedByApplication(), sse.isHardError()); if (!closed && sse.isInitiatedByApplication()) { logger.error("failed to get next message, reconnecting...", sse); } cleanup(0, "failed to get message"); break; } } if (logger.isTraceEnabled()) { logger.trace( "executing bulk with [{}] actions", bulkRequestBuilder.numberOfActions()); } // if we have no bulk actions we might have processed custom commands, so ack them if (ordered || bulkRequestBuilder.numberOfActions() == 0) { try { if (bulkRequestBuilder.numberOfActions() > 0) { BulkResponse response = bulkRequestBuilder.execute().actionGet(); if (response.hasFailures()) { // TODO write to exception queue? logger.warn("failed to execute: " + response.buildFailureMessage()); } } } catch (Exception e) { logger.warn("failed to execute bulk", e); } for (Long deliveryTag : deliveryTags) { try { channel.basicAck(deliveryTag, false); } catch (Exception e1) { logger.warn("failed to ack [{}]", e1, deliveryTag); } } } else { if (bulkRequestBuilder.numberOfActions() > 0) { bulkRequestBuilder.execute( new ActionListener<BulkResponse>() { @Override public void onResponse(BulkResponse response) { if (response.hasFailures()) { // TODO write to exception queue? logger.warn("failed to execute: " + response.buildFailureMessage()); } for (Long deliveryTag : deliveryTags) { try { channel.basicAck(deliveryTag, false); } catch (Exception e1) { logger.warn("failed to ack [{}]", e1, deliveryTag); } } } @Override public void onFailure(Throwable e) { logger.warn( "failed to execute bulk for delivery tags [{}], not ack'ing", e, deliveryTags); } }); } } } } } cleanup(0, "closing river"); }
@Override public synchronized void start() { setDaemon(true); super.start(); }