@Override public void clusterChanged(Map<Integer, List<InetSocketAddress>> clusterView) { logger.info("clusterChanged(): Received new clusterView from zu " + clusterView); Set<InetSocketAddress> nodeAddresses = getNodesAddresses(clusterView); synchronized (SenseiBroker.class) { numberOfNodesInTheCluster.clear(); numberOfNodesInTheCluster.inc(nodeAddresses.size()); } }
public void handleClusterConnected(Set<Node> nodes) { // _loadBalancer = _loadBalancerFactory.newLoadBalancer(nodes); _partitions = getPartitions(nodes); numberOfNodesInTheCluster.clear(); numberOfNodesInTheCluster.inc(getNumberOfNodes()); logger.info( "handleClusterConnected(): Received the list of nodes from norbert " + nodes.toString()); logger.info( "handleClusterConnected(): Received the list of partitions from router " + _partitions.toString()); }
public void commitPengingEvents() { log.info("Flushing pending kafka events to the persistent cache"); long time = System.currentTimeMillis(); int numberOfBatches = 0; for (PersistentCache persistentCache : persistentCaches.values()) { persistentCache.commitPengingEvents(); numberOfBatches += persistentCache.numberOfAvailableBatches(); } numberOfBatchesCounter.clear(); numberOfBatchesCounter.inc(numberOfBatches); timer.update(System.currentTimeMillis() - time, TimeUnit.MILLISECONDS); }
/** * Deletes documents from the activity engine * * @param uids */ public void delete(long... uids) { boolean needToFlush = false; if (uids.length == 0) { return; } for (long uid : uids) { if (uid == Long.MIN_VALUE) { continue; } Lock writeLock = globalLock.writeLock(); try { writeLock.lock(); if (!uidToArrayIndex.containsKey(uid)) { continue; } deletedDocumentsCounter.inc(); int index = uidToArrayIndex.remove(uid); for (ActivityValues activityIntValues : valuesMap.values()) { activityIntValues.delete(index); } needToFlush = needToFlush | pendingDeletes.addFieldUpdate(new Update(index, Long.MIN_VALUE)); } finally { writeLock.unlock(); } } if (needToFlush) { flush(); } }
@Override public void reset() { final Map<MetricName, Metric> metricMap = registry.allMetrics(); final Set<Entry<MetricName, Metric>> entrySet = metricMap.entrySet(); for (final Entry<MetricName, Metric> entry : entrySet) { final MetricName name = entry.getKey(); final Metric metric = entry.getValue(); if (metric instanceof Counter) { ((Counter) metric).clear(); } if (metric instanceof Timer) { ((Timer) metric).clear(); } if (metric instanceof Histogram) { ((Histogram) metric).clear(); } if (metric instanceof Clearable) { ((Clearable) metric).clear(); } } }
@Override protected void connectionClosed(final Connection connection) { super.connectionClosed(connection); disconnects.mark(); final long duration = System.currentTimeMillis() - connection.getTimeStamp(); this.duration.update(duration, TimeUnit.MILLISECONDS); connections.dec(); }
public int update(long uid, final String version, Map<String, Object> map) { if (valuesMap.isEmpty()) { return -1; } if (versionComparator.compare(lastVersion, version) > 0) { versionRejectionCounter.inc(); return -1; } if (map.isEmpty()) { lastVersion = version; return -1; } int index = -1; Lock writeLock = globalLock.writeLock(); boolean needToFlush = false; try { writeLock.lock(); totalUpdatesCounter.inc(); if (uidToArrayIndex.containsKey(uid)) { index = uidToArrayIndex.get(uid); } else { insertedDocumentsCounter.inc(); synchronized (deletedIndexes) { if (deletedIndexes.size() > 0) { index = deletedIndexes.removeInt(deletedIndexes.size() - 1); } else { index = indexSize.getAndIncrement(); } } uidToArrayIndex.put(uid, index); recentlyAddedUids.add(uid); needToFlush = updateBatch.addFieldUpdate(new Update(index, uid)); } boolean currentUpdate = updateActivities(map, index); needToFlush = needToFlush || currentUpdate; lastVersion = version; } finally { writeLock.unlock(); } if (needToFlush) { flush(); } return index; }
static { benchTime.inc(time); benchSize.inc(size); }
public void processCounter(MetricName name, Counter counter, Long epoch) throws Exception { pushCounter(name, counter.count(), epoch); }
@Override public void processCounter(MetricName name, Counter counter, Long epoch) { newEvent().service(service(name)).metric(counter.count()).time(epoch).send(); }
@Override protected void connectionOpened(final Connection connection) { connections.inc(); super.connectionOpened(connection); connects.mark(); }
@Override public void handleUpdatedDiskVersion(String version) { versionUpdateCount.inc(); persistentCache.updateDiskVersion(version); }
@Override public void processCounter(MetricName name, Counter counter, PrintStream stream) { stream.printf(locale, " count = %d\n", counter.getCount()); }
@Override public void processCounter(MetricName name, Counter counter, Long epoch) throws Exception { sendInt(sanitizeName(name) + ".count", StatType.GAUGE, counter.count()); }