@Test public void testMeterDisabled() { // create timer with id "testMetricAlerts" and register with metric registry, bump up value to // 4. Meter m = MetricsConfigurator.createMeter(metrics, "testMeterDisabled", PIPELINE_NAME, REVISION); m.mark(1000); MetricsRuleDefinition metricsRuleDefinition = new MetricsRuleDefinition( "testMeterDisabled", "testMeterDisabled", "testMeterDisabled", MetricType.METER, MetricElement.METER_COUNT, "${value()>100}", false, false); MetricRuleEvaluator metricRuleEvaluator = new MetricRuleEvaluator( metricsRuleDefinition, metrics, new AlertManager( PIPELINE_NAME, REVISION, null, metrics, runtimeInfo, new EventListenerManager()), Collections.<String>emptyList()); metricRuleEvaluator.checkForAlerts(); // get alert gauge Gauge<Object> gauge = MetricsConfigurator.getGauge( metrics, AlertsUtil.getAlertGaugeName(metricsRuleDefinition.getId())); Assert.assertNull(gauge); }
private void markMeterForStatusCode(int status) { final Meter metric = metersByStatusCode.get(status); if (metric != null) { metric.mark(); } else { otherMeter.mark(); } }
@Override public String call() { Meter job_process_rate = metrics.getMetrics().meter(Constants.JOBS_PROCESS_RATE); JedisPool pool = null; Jedis jedis = null; try { pool = redis.getRedisConnectionPool(); jedis = pool.getResource(); List<String> payload = jedis.brpop(0, Constants.JOBS_QUEUE); // TODO perform some processing on the payload here // Mark processing of the job here in the metrics library // These metrics are aggregated and sent to graphite job_process_rate.mark(); } catch (JedisConnectionException e) { // returnBrokenResource when the state of the object is // unrecoverable if (null != jedis) { jedis.close(); } } finally { if (null != jedis) { jedis.close(); } } return "Done processing Callable Task "; }
private void assertMetricCount(String sourceId, String metricName, int expectedCount) { String key = sourceId + "." + metricName; SourceMetric sourceMetric = sourceMetrics.metrics.get(key); if (sourceMetric.isHistogram()) { Histogram histogram = (Histogram) sourceMetric.getMetric(); assertThat(histogram.getCount(), is((long) expectedCount)); } else { Meter meter = (Meter) sourceMetric.getMetric(); assertThat(meter.getCount(), is((long) expectedCount)); } }
private void updateFailedMeter(String topic) { failedMeterGlobal.mark(); if (!failedMeterByTopic.containsKey(topic)) { Meter failedMeter = HermesMetricsRegistry.getMetricRegistry() .meter( MetricRegistry.name( SubscriptionPushService.class, "SubscriptionPushService", topic, "Failed")); failedMeterByTopic.put(topic, failedMeter); } Meter failedMeter = failedMeterByTopic.get(topic); failedMeter.mark(); }
private void updateRequestMeter(String topic) { requestMeterGlobal.mark(); if (!requestMeterByTopic.containsKey(topic)) { Meter requestMeter = HermesMetricsRegistry.getMetricRegistry() .meter( MetricRegistry.name( SubscriptionPushService.class, "SubscriptionPushService", topic, "Request")); requestMeterByTopic.put(topic, requestMeter); } Meter requestMeter = requestMeterByTopic.get(topic); requestMeter.mark(); }
@Override public Long call() throws Exception { GraphManager gm = factory.createEdgeManager(scope); while (true) { // do a read to eventually trigger our group compaction. Take 2 pages of columns final long returnedEdgeCount = generator .doSearch(gm) .doOnNext(edge -> readMeter.mark()) .countLong() .toBlocking() .last(); logger.info("Completed reading {} edges", returnedEdgeCount); if (writeCount != returnedEdgeCount) { logger.warn( "Unexpected edge count returned!!! Expected {} but was {}", writeCount, returnedEdgeCount); } assertEquals("Expected to read same edge count", writeCount, returnedEdgeCount); } }
S3ObjectSummary findAndQueueObjects(AmazonS3Source.S3Offset s3offset, boolean checkCurrent) throws AmazonClientException { List<S3ObjectSummary> s3ObjectSummaries; ObjectOrdering objectOrdering = s3ConfigBean.s3FileConfig.objectOrdering; switch (objectOrdering) { case TIMESTAMP: s3ObjectSummaries = AmazonS3Util.listObjectsChronologically( s3Client, s3ConfigBean, pathMatcher, s3offset, objectQueue.remainingCapacity()); break; case LEXICOGRAPHICAL: s3ObjectSummaries = AmazonS3Util.listObjectsLexicographically( s3Client, s3ConfigBean, pathMatcher, s3offset, objectQueue.remainingCapacity()); break; default: throw new IllegalArgumentException("Unknown ordering: " + objectOrdering.getLabel()); } for (S3ObjectSummary objectSummary : s3ObjectSummaries) { addObjectToQueue(objectSummary, checkCurrent); } spoolQueueMeter.mark(objectQueue.size()); LOG.debug("Found '{}' files", objectQueue.size()); return (s3ObjectSummaries.isEmpty()) ? null : s3ObjectSummaries.get(s3ObjectSummaries.size() - 1); }
public static void updateDataRuleMeter( MetricRegistry metrics, DataRuleDefinition dataRuleDefinition, long matchingCount, String pipelineName, String revision) { if (dataRuleDefinition.isMeterEnabled() && matchingCount > 0) { Meter meter = MetricsConfigurator.getMeter(metrics, USER_PREFIX + dataRuleDefinition.getId()); if (meter == null) { meter = MetricsConfigurator.createMeter( metrics, USER_PREFIX + dataRuleDefinition.getId(), pipelineName, revision); } meter.mark(matchingCount); } }
@Test public void testMeter() { final Meter meter = registry.meter(name("foo", "bar")); meter.mark(10); meter.mark(20); reportAndRefresh(); SearchResponse searchResponse = client().prepareSearch(indexWithDate).setTypes("meter").execute().actionGet(); org.assertj.core.api.Assertions.assertThat(searchResponse.getHits().totalHits()).isEqualTo(1L); Map<String, Object> hit = searchResponse.getHits().getAt(0).sourceAsMap(); assertTimestamp(hit); assertKey(hit, "name", prefix + ".foo.bar"); assertKey(hit, "count", 30); assertKey(hit, "host", "localhost"); }
protected Map<String, Object> buildMeterMap(Meter m) { Map<String, Object> metrics = Maps.newHashMap(); if (m == null) { return metrics; } Map<String, Object> rate = Maps.newHashMap(); rate.put("one_minute", m.getOneMinuteRate()); rate.put("five_minute", m.getFiveMinuteRate()); rate.put("fifteen_minute", m.getFifteenMinuteRate()); rate.put("total", m.getCount()); rate.put("mean", m.getMeanRate()); metrics.put("rate_unit", "events/second"); metrics.put("rate", rate); return metrics; }
/** {@inheritDoc} */ @Override protected final void handleMessages(List<T> messages) { long start = System.nanoTime(); // Create a batch of message that we want to write. List<RegularStatement> statements = new ArrayList<>(); for (T t : messages) { try { handleMessage(statements, t); } catch (RuntimeException e) { LOG.warn("Failed to write message: " + t, e); // Just in case we cannot process a message } } // Try writing the batch try { Batch batch = QueryBuilder.batch(statements.toArray(new RegularStatement[statements.size()])); long beforeSend = System.nanoTime(); ResultSetFuture f = connection.getSession().executeAsync(batch); f.getUninterruptibly(); // throws QueryValidationExecption etc long total = System.nanoTime(); // Is this an abnormal slow batch? boolean isSlow = TimeUnit.MILLISECONDS.convert(total - start, TimeUnit.NANOSECONDS) > 200 || messages.size() >= getBatchSize(); if (isSlow || lastSlowBatch > 0) { LOG.info( "Total time: " + DurationFormatter.DEFAULT.formatNanos(total - start) + ", prepping=" + DurationFormatter.DEFAULT.formatNanos(beforeSend - start) + ", sending=" + DurationFormatter.DEFAULT.formatNanos(total - beforeSend) + ", size=" + messages.size()); // makes sure we write 10 info statements after the last slow batch we insert lastSlowBatch = isSlow ? 10 : lastSlowBatch - 1; } persistedCount.mark(messages.size()); // sink.onSucces(messages); } catch (QueryValidationException e) { LOG.error("Could not execute query, this is an internal error", e); } catch (Exception e) { onFailure(messages, e); try { sleepUntilShutdown(2, TimeUnit.SECONDS); } catch (InterruptedException ignore) { Thread.interrupted(); } } }
/** Tests GetAllMetrics method. */ @Test public void testGetAllMetrics() { Counter onosCounter = new Counter(); onosCounter.inc(); Meter onosMeter = new Meter(); onosMeter.mark(); Timer onosTimer = new Timer(); onosTimer.update(1, TimeUnit.MILLISECONDS); ImmutableMap<String, Metric> metrics = new ImmutableMap.Builder<String, Metric>() .put("onosCounter", onosCounter) .put("onosMeter", onosMeter) .put("onosTimer", onosTimer) .build(); expect(mockMetricsService.getMetrics()).andReturn(metrics).anyTimes(); replay(mockMetricsService); WebTarget wt = target(); String response = wt.path("metrics").request().get(String.class); assertThat(response, containsString("{\"metrics\":[")); JsonObject result = Json.parse(response).asObject(); assertThat(result, notNullValue()); JsonArray jsonMetrics = result.get("metrics").asArray(); assertThat(jsonMetrics, notNullValue()); assertThat(jsonMetrics.size(), is(3)); assertTrue( matchesMetric(metrics.get("onosCounter")).matchesSafely(jsonMetrics.get(0).asObject())); assertTrue( matchesMetric(metrics.get("onosMeter")).matchesSafely(jsonMetrics.get(1).asObject())); assertTrue( matchesMetric(metrics.get("onosTimer")).matchesSafely(jsonMetrics.get(2).asObject())); }
@Test public void testMeter() { System.out.println("******************************* METER *******************************"); meter = registry.meter("meter"); try { for (int i = 0; i < ITER_COUNT; i++) { meter.mark(); Thread.sleep(SLEEP_MS); } } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } }
private Status processOperation(Tx tx, Op op) { Timer.Context timer = operationProcessingTimer.time(); Status status = Status.OK; try { encodeAndSend(tx, op); } catch (RuntimeException re) { operationProcessingErrorMeter.mark(); log.error("Error processing operation: " + op.toString(), re); status = Status.ABEND; } timer.stop(); return status; }
/** * Check to see if a plaintext input matches a hash * * @param input the input * @param hashed the hash * @return <code>true</code> if it matches, <code>false</code> if not */ public static boolean matches(final String input, final String hashed) { checkNotNull(hashed, "Cannot compare NULL"); LOOKUPS.mark(); final Timer.Context context = GETS.time(); try { boolean result = false; try { result = CACHE.get(new TwoTuple<>(input, hashed)); } catch (ExecutionException e) { LOGGER.error("Failed to hash input password", e); } return result; } finally { context.stop(); } }
@Override public ThrowingConsumer<Context> select(final Context ctx) throws OpProcessorException { final RequestMessage message = ctx.getRequestMessage(); if (logger.isDebugEnabled()) logger.debug("Selecting processor for RequestMessage {}", message); final ThrowingConsumer<Context> op; switch (message.getOp()) { case Tokens.OPS_VERSION: op = ControlOps::versionOp; break; case Tokens.OPS_IMPORT: op = validateImportMessage(message).orElse(ControlOps::importOp); break; case Tokens.OPS_RESET: op = ControlOps::resetOp; break; case Tokens.OPS_SHOW: op = validateShowMessage(message).orElse(ControlOps::showOp); break; case Tokens.OPS_USE: op = validateUseMessage(message).orElse(ControlOps::useOp); break; case Tokens.OPS_INVALID: final String msgInvalid = String.format( "Message could not be parsed. Check the format of the request. [%s]", message); throw new OpProcessorException( msgInvalid, ResponseMessage.build(message) .code(ResultCode.REQUEST_ERROR_MALFORMED_REQUEST) .result(msgInvalid) .create()); default: final String msgDefault = String.format("Message with op code [%s] is not recognized.", message.getOp()); throw new OpProcessorException( msgDefault, ResponseMessage.build(message) .code(ResultCode.REQUEST_ERROR_MALFORMED_REQUEST) .result(msgDefault) .create()); } controlOpMeter.mark(); return op; }
@Override public void handle( String path, Request request, HttpServletRequest httpRequest, HttpServletResponse httpResponse) throws IOException, ServletException { activeDispatches.inc(); final long start; final HttpChannelState state = request.getHttpChannelState(); if (state.isInitial()) { // new request activeRequests.inc(); start = request.getTimeStamp(); } else { // resumed request start = System.currentTimeMillis(); activeSuspended.dec(); if (state.getState() == State.DISPATCHED) { asyncDispatches.mark(); } } try { super.handle(path, request, httpRequest, httpResponse); } finally { final long now = System.currentTimeMillis(); final long dispatched = now - start; activeDispatches.dec(); dispatches.update(dispatched, TimeUnit.MILLISECONDS); if (state.isSuspended()) { if (state.isInitial()) { state.addListener(listener); } activeSuspended.inc(); } else if (state.isInitial()) { requests.update(dispatched, TimeUnit.MILLISECONDS); updateResponses(request); } // else onCompletion will handle it. } }
void addObjectToQueue(S3ObjectSummary objectSummary, boolean checkCurrent) { Preconditions.checkNotNull(objectSummary, "file cannot be null"); if (checkCurrent) { Preconditions.checkState( currentObject == null || currentObject.getLastModified().compareTo(objectSummary.getLastModified()) < 0); } if (!objectQueue.contains(objectSummary)) { if (objectQueue.size() >= MAX_SPOOL_SIZE) { LOG.warn("Exceeded '{}' of queued files", objectQueue.size()); } objectQueue.add(objectSummary); spoolQueueMeter.mark(objectQueue.size()); } else { LOG.warn("Object '{}' already in queue, ignoring", objectSummary.getKey()); } }
@Override public ServiceResults postCollection(ServiceContext context) throws Exception { logger.info("NotificationService: start request."); Timer.Context timer = postTimer.time(); postMeter.mark(); try { validate(null, context.getPayload()); Notification.PathTokens pathTokens = getPathTokens(context.getRequest().getOriginalParameters()); context.getProperties().put("state", Notification.State.CREATED); context.getProperties().put("pathQuery", pathTokens); context.setOwner(sm.getApplication()); ServiceResults results = super.postCollection(context); Notification notification = (Notification) results.getEntity(); // update Notification properties if (notification.getStarted() == null || notification.getStarted() == 0) { long now = System.currentTimeMillis(); notification.setStarted(System.currentTimeMillis()); Map<String, Object> properties = new HashMap<String, Object>(2); properties.put("started", notification.getStarted()); properties.put("state", notification.getState()); notification.addProperties(properties); logger.info( "ApplicationQueueMessage: notification {} properties updated in duration {} ms", notification.getUuid(), System.currentTimeMillis() - now); } long now = System.currentTimeMillis(); notificationQueueManager.queueNotification(notification, null); logger.info( "NotificationService: notification {} post queue duration {} ms ", notification.getUuid(), System.currentTimeMillis() - now); // future: somehow return 202? return results; } catch (Exception e) { logger.error("serialization failed", e); throw e; } finally { timer.stop(); } }
private void collectMeterReports( List<DBObject> docs, SortedMap<String, Meter> meters, Date timestamp) { if (meters.isEmpty()) return; for (Map.Entry<String, Meter> entry : meters.entrySet()) { final BasicDBObject report = getBasicDBObject(timestamp, entry.getKey(), "meter"); final Meter v = entry.getValue(); report.put("count", v.getCount()); report.put("1-minute-rate", v.getOneMinuteRate()); report.put("5-minute-rate", v.getFiveMinuteRate()); report.put("15-minute-rate", v.getFifteenMinuteRate()); report.put("mean-rate", v.getMeanRate()); docs.add(report); } }
@Override public void append(Event event) throws IOException { meter.mark(); subSink.append(event); }
@Override protected void peek(T tuple) { meter.mark(); }
@SuppressWarnings("unused") public void timeMark(int reps) { for (int i = 0; i < reps; i++) { meter.mark(i); } }
@Override public boolean matchesSafely(JsonObject jsonObject) { JsonObject jsonMetric = jsonObject.get("metric").asObject(); JsonObject jsonCounter; JsonObject jsonMeter; JsonObject jsonTimer; Counter counter; Meter meter; Timer timer; // check counter metric if (jsonMetric.get("counter") != null) { jsonCounter = jsonMetric.get("counter").asObject(); counter = (Counter) metric; if (jsonCounter.get("counter").asLong() != counter.getCount()) { reason = "counter " + counter.getCount(); return false; } } // check meter metric if (jsonMetric.get("meter") != null) { jsonMeter = jsonMetric.get("meter").asObject(); meter = (Meter) metric; if (jsonMeter.get("counter").asLong() != meter.getCount()) { reason = "counter " + meter.getCount(); return false; } if (jsonMeter.get("1_min_rate").asDouble() != meter.getOneMinuteRate()) { reason = "1 minute rate " + meter.getOneMinuteRate(); return false; } if (jsonMeter.get("5_min_rate").asDouble() != meter.getOneMinuteRate()) { reason = "5 minute rate " + meter.getFiveMinuteRate(); return false; } if (jsonMeter.get("15_min_rate").asDouble() != meter.getFifteenMinuteRate()) { reason = "15 minute rate " + meter.getFifteenMinuteRate(); return false; } } if (jsonMetric.get("timer") != null) { jsonTimer = jsonMetric.get("timer").asObject(); timer = (Timer) metric; if (jsonTimer.get("counter").asLong() != timer.getCount()) { reason = "counter " + timer.getCount(); return false; } if (jsonTimer.get("1_min_rate").asDouble() != timer.getOneMinuteRate()) { reason = "1 minute rate " + timer.getOneMinuteRate(); return false; } if (jsonTimer.get("5_min_rate").asDouble() != timer.getOneMinuteRate()) { reason = "5 minute rate " + timer.getFiveMinuteRate(); return false; } if (jsonTimer.get("15_min_rate").asDouble() != timer.getFifteenMinuteRate()) { reason = "15 minute rate " + timer.getFifteenMinuteRate(); return false; } if (jsonTimer.get("mean").asDouble() != nanoToMs(timer.getSnapshot().getMean())) { reason = "mean " + timer.getSnapshot().getMean(); return false; } if (jsonTimer.get("min").asDouble() != nanoToMs(timer.getSnapshot().getMin())) { reason = "min " + timer.getSnapshot().getMin(); return false; } if (jsonTimer.get("max").asDouble() != nanoToMs(timer.getSnapshot().getMax())) { reason = "max " + timer.getSnapshot().getMax(); return false; } if (jsonTimer.get("stddev").asDouble() != nanoToMs(timer.getSnapshot().getStdDev())) { reason = "stddev " + timer.getSnapshot().getStdDev(); return false; } } return true; }
@Override public void recordConnectionTimeout() { connectionTimeoutMeter.mark(); }
public Map<String, Object> toElasticSearchObject(@Nonnull final Meter invalidTimestampMeter) { final Map<String, Object> obj = Maps.newHashMapWithExpectedSize(REQUIRED_FIELDS.size() + fields.size()); for (Map.Entry<String, Object> entry : fields.entrySet()) { final String key = entry.getKey(); // Elasticsearch does not allow "." characters in keys since version 2.0. // See: // https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking_20_mapping_changes.html#_field_names_may_not_contain_dots if (key != null && key.contains(".")) { final String newKey = key.replace('.', KEY_REPLACEMENT_CHAR); // If the message already contains the transformed key, we skip the field and emit a // warning. // This is still not optimal but better than implementing expensive logic with multiple // replacement // character options. Conflicts should be rare... if (!obj.containsKey(newKey)) { obj.put(newKey, entry.getValue()); } else { LOG.warn( "Keys must not contain a \".\" character! Ignoring field \"{}\"=\"{}\" in message [{}] - Unable to replace \".\" with a \"{}\" because of key conflict: \"{}\"=\"{}\"", key, entry.getValue(), getId(), KEY_REPLACEMENT_CHAR, newKey, obj.get(newKey)); LOG.debug("Full message with \".\" in message key: {}", this); } } else { if (key != null && obj.containsKey(key)) { final String newKey = key.replace(KEY_REPLACEMENT_CHAR, '.'); // Deliberate warning duplicates because the key with the "." might be transformed before // reaching // the duplicate original key with a "_". Otherwise we would silently overwrite the // transformed key. LOG.warn( "Keys must not contain a \".\" character! Ignoring field \"{}\"=\"{}\" in message [{}] - Unable to replace \".\" with a \"{}\" because of key conflict: \"{}\"=\"{}\"", newKey, fields.get(newKey), getId(), KEY_REPLACEMENT_CHAR, key, entry.getValue()); LOG.debug("Full message with \".\" in message key: {}", this); } obj.put(key, entry.getValue()); } } obj.put(FIELD_MESSAGE, getMessage()); obj.put(FIELD_SOURCE, getSource()); final Object timestampValue = getField(FIELD_TIMESTAMP); DateTime dateTime; if (timestampValue instanceof Date) { dateTime = new DateTime(timestampValue); } else if (timestampValue instanceof DateTime) { dateTime = (DateTime) timestampValue; } else if (timestampValue instanceof String) { // if the timestamp value is a string, we try to parse it in the correct format. // we fall back to "now", this avoids losing messages which happen to have the wrong timestamp // format try { dateTime = ES_DATE_FORMAT_FORMATTER.parseDateTime((String) timestampValue); } catch (IllegalArgumentException e) { LOG.trace( "Invalid format for field timestamp '{}' in message {}, forcing to current time.", timestampValue, getId()); invalidTimestampMeter.mark(); dateTime = Tools.nowUTC(); } } else { // don't allow any other types for timestamp, force to "now" LOG.trace( "Invalid type for field timestamp '{}' in message {}, forcing to current time.", timestampValue.getClass().getSimpleName(), getId()); invalidTimestampMeter.mark(); dateTime = Tools.nowUTC(); } if (dateTime != null) { obj.put(FIELD_TIMESTAMP, buildElasticSearchTimeFormat(dateTime.withZone(UTC))); } // Manually converting stream ID to string - caused strange problems without it. if (getStreams().isEmpty()) { obj.put(FIELD_STREAMS, Collections.emptyList()); } else { final List<String> streamIds = Lists.newArrayListWithCapacity(streams.size()); for (Stream stream : streams) { streamIds.add(stream.getId()); } obj.put(FIELD_STREAMS, streamIds); } return obj; }