@Override public void writeExternal(ObjectOutput out) throws IOException { String threadName = Thread.currentThread().getName(); if (!holdUp.get()) { log.debug("In streaming..."); holdUp.compareAndSet(false, true); log.debug("Holding up..."); TestingUtil.sleepThread(1000); // Sleep for 2 seconds to hold up state transfer } out.writeInt(value); }
@Override public boolean equals(Object obj) { if (obj == null) { log.debug("null -> false"); return false; } log.debug(obj.getClass()); if (getClass() != obj.getClass()) { log.debug("class not same -> false"); return false; } final CustomPojo other = (CustomPojo) obj; return this.name.equals(other.name); }
private void shutDownGracefully() { if (log.isDebugEnabled()) log.debugf( "Wait for on-going transactions to finish for %s.", Util.prettyPrintTime( configuration.transaction().cacheStopTimeout(), TimeUnit.MILLISECONDS)); long failTime = currentMillisFromNanotime() + configuration.transaction().cacheStopTimeout(); boolean txsOnGoing = areTxsOnGoing(); while (txsOnGoing && currentMillisFromNanotime() < failTime) { try { Thread.sleep(30); txsOnGoing = areTxsOnGoing(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); if (clustered) { log.debugf( "Interrupted waiting for on-going transactions to finish. %s local transactions and %s remote transactions", localTransactions.size(), remoteTransactions.size()); } else { log.debugf( "Interrupted waiting for %s on-going transactions to finish.", localTransactions.size()); } } } if (txsOnGoing) { log.unfinishedTransactionsRemain( localTransactions == null ? 0 : localTransactions.size(), remoteTransactions == null ? 0 : remoteTransactions.size()); } else { log.debug("All transactions terminated"); } }
private Configuration configureTransactionManager( Configuration regionOverrides, String templateCacheName, Properties properties) { // Get existing configuration to verify whether a tm was configured or not. Configuration templateConfig = manager.defineConfiguration(templateCacheName, new Configuration()); String ispnTmLookupClassName = templateConfig.getTransactionManagerLookupClass(); String hbTmLookupClassName = org.hibernate.cache.infinispan.tm.HibernateTransactionManagerLookup.class.getName(); if (ispnTmLookupClassName != null && !ispnTmLookupClassName.equals(hbTmLookupClassName)) { log.debug( "Infinispan is configured [" + ispnTmLookupClassName + "] with a different transaction manager lookup " + "class than Hibernate [" + hbTmLookupClassName + "]"); } else { regionOverrides.setTransactionManagerLookup(transactionManagerlookup); } String useSyncProp = ConfigurationHelper.extractPropertyValue(INFINISPAN_USE_SYNCHRONIZATION_PROP, properties); boolean useSync = useSyncProp == null ? DEF_USE_SYNCHRONIZATION : Boolean.parseBoolean(useSyncProp); regionOverrides.fluent().transaction().useSynchronization(useSync); return regionOverrides; }
private Cache getCache(String regionName, String typeKey, Properties properties) { TypeOverrides regionOverride = typeOverrides.get(regionName); if (!definedConfigurations.contains(regionName)) { String templateCacheName = null; Configuration regionCacheCfg = null; if (regionOverride != null) { if (log.isDebugEnabled()) log.debug("Cache region specific configuration exists: " + regionOverride); regionOverride = overrideStatisticsIfPresent(regionOverride, properties); regionCacheCfg = regionOverride.createInfinispanConfiguration(); String cacheName = regionOverride.getCacheName(); if (cacheName != null) // Region specific override with a given cache name templateCacheName = cacheName; else // Region specific override without cache name, so template cache name is generic for // data type. templateCacheName = typeOverrides.get(typeKey).getCacheName(); } else { // No region specific overrides, template cache name is generic for data type. templateCacheName = typeOverrides.get(typeKey).getCacheName(); regionCacheCfg = typeOverrides.get(typeKey).createInfinispanConfiguration(); } // Configure transaction manager regionCacheCfg = configureTransactionManager(regionCacheCfg, templateCacheName, properties); // Apply overrides manager.defineConfiguration(regionName, templateCacheName, regionCacheCfg); definedConfigurations.add(regionName); } Cache cache = manager.getCache(regionName); if (!cache.getStatus().allowInvocations()) { cache.start(); } return createCacheWrapper(cache.getAdvancedCache()); }
protected Object invalidateAcrossCluster( boolean synchronous, InvocationContext ctx, Object[] keys, boolean useFuture, final Object retvalForFuture) throws Throwable { if (!isLocalModeForced(ctx)) { // increment invalidations counter if statistics maintained incrementInvalidations(); final InvalidateCommand command = commandsFactory.buildInvalidateCommand(keys); if (log.isDebugEnabled()) log.debug("Cache [" + rpcManager.getTransport().getAddress() + "] replicating " + command); // voila, invalidated! if (useFuture) { NotifyingNotifiableFuture<Object> future = new NotifyingFutureImpl(retvalForFuture); rpcManager.broadcastRpcCommandInFuture(command, future); return future; } else { rpcManager.broadcastRpcCommand(command, synchronous, false); } } return retvalForFuture; }
private void invalidateAcrossCluster(boolean synchronous, Object[] keys, InvocationContext ctx) throws Throwable { // increment invalidations counter if statistics maintained incrementInvalidations(); final InvalidateCommand invalidateCommand = commandsFactory.buildInvalidateCommand(InfinispanCollections.<Flag>emptySet(), keys); if (log.isDebugEnabled()) log.debug("Cache [" + rpcManager.getAddress() + "] replicating " + invalidateCommand); ReplicableCommand command = invalidateCommand; if (ctx.isInTxScope()) { TxInvocationContext txCtx = (TxInvocationContext) ctx; // A Prepare command containing the invalidation command in its 'modifications' list is sent // to the remote nodes // so that the invalidation is executed in the same transaction and locks can be acquired and // released properly. // This is 1PC on purpose, as an optimisation, even if the current TX is 2PC. // If the cache uses 2PC it's possible that the remotes will commit the invalidation and the // originator rolls back, // but this does not impact consistency and the speed benefit is worth it. command = commandsFactory.buildPrepareCommand( txCtx.getGlobalTransaction(), Collections.<WriteCommand>singletonList(invalidateCommand), true); } rpcManager.invokeRemotely(null, command, rpcManager.getDefaultRpcOptions(synchronous)); }
public void updateAccountBalance(Integer id, Integer newBalance) throws Exception { log.debug("Updating account " + id + " to balance " + newBalance); tm.begin(); try { Session session = sessionFactory.getCurrentSession(); Object account = session.get(acctClass, id); setBalance.invoke(account, newBalance); session.update(account); tm.commit(); } catch (Exception e) { log.error("rolling back", e); tm.rollback(); throw e; } log.debug("Updated account " + id + " to balance " + newBalance); }
public void testChangesOnAtomicMapNoLocks() { AtomicMap<String, String> map = AtomicMapLookup.getAtomicMap(cache, "key"); assert map.isEmpty(); // InvocationContextContainer icc = TestingUtil.extractComponent(cache, // InvocationContextContainer.class); // InvocationContext ic = icc.createInvocationContext(false, -1); // ic.setFlags(SKIP_LOCKING); log.debug("Doing a put"); // assert icc.getInvocationContext(true).hasFlag(SKIP_LOCKING); map.put("a", "b"); log.debug("Put complete"); assert map.get("a").equals("b"); // now re-retrieve the map and make sure we see the diffs assert AtomicMapLookup.getAtomicMap(cache, "key").get("a").equals("b"); }
@CacheEntryRemoved @CacheEntryModified public void callback(TransactionalEvent e) { System.out.println("Callback got event " + e); log.debug("Callback got event " + e); assertFalse( "entry was removed on remote cache so isLocal should be false", e.isOriginLocal()); }
@Override public Object call() throws Exception { log.debug("active status modifier started"); scl.activeStatusChanged(true); scl.pushStateFuture.get(); return null; }
public void updateAccountBranch(Integer id, String branch) throws Exception { log.debug("Updating account " + id + " to branch " + branch); tm.begin(); try { Session session = sessionFactory.getCurrentSession(); Object account = session.get(acctClass, id); log.debug("Set branch " + branch); setBranch.invoke(account, branch); session.update(account); tm.commit(); } catch (Exception e) { log.error("rolling back", e); tm.rollback(); throw e; } log.debug("Updated account " + id + " to branch " + branch); }
public void waitForChannelToConnect() { if (channel == null) return; log.debug("Waiting on view being accepted"); try { channelConnectedLatch.await(); } catch (InterruptedException e) { log.interruptedWaitingForCoordinator(e); } }
/** {@inheritDoc} */ public TimestampsRegion buildTimestampsRegion(String regionName, Properties properties) throws CacheException { if (log.isDebugEnabled()) log.debug("Building timestamps cache region [" + regionName + "]"); Cache cache = getCache(regionName, TIMESTAMPS_KEY, properties); CacheAdapter cacheAdapter = CacheAdapterImpl.newInstance(cache); TimestampsRegionImpl region = createTimestampsRegion(cacheAdapter, regionName); region.start(); return region; }
private void safeRollback() { try { transactionManager.rollback(); } catch (Exception e) { // ignored! if (debug) { log.debug("Error rollbacking transaction.", e); } } }
private Object realRemoteGet( InvocationContext ctx, Object key, boolean storeInL1, boolean isWrite) throws Throwable { if (trace) log.tracef("Doing a remote get for key %s", key); boolean acquireRemoteLock = false; if (ctx.isInTxScope()) { TxInvocationContext txContext = (TxInvocationContext) ctx; acquireRemoteLock = isWrite && isPessimisticCache && !txContext.getAffectedKeys().contains(key); } // attempt a remote lookup InternalCacheEntry ice = dm.retrieveFromRemoteSource(key, ctx, acquireRemoteLock); if (acquireRemoteLock) { ((TxInvocationContext) ctx).addAffectedKey(key); } if (ice != null) { if (storeInL1) { if (isL1CacheEnabled) { if (trace) log.tracef("Caching remotely retrieved entry for key %s in L1", key); // This should be fail-safe try { long lifespan = ice.getLifespan() < 0 ? configuration.getL1Lifespan() : Math.min(ice.getLifespan(), configuration.getL1Lifespan()); PutKeyValueCommand put = cf.buildPutKeyValueCommand( ice.getKey(), ice.getValue(), lifespan, -1, ctx.getFlags()); lockAndWrap(ctx, key, ice); invokeNextInterceptor(ctx, put); } catch (Exception e) { // Couldn't store in L1 for some reason. But don't fail the transaction! log.infof("Unable to store entry %s in L1 cache", key); log.debug("Inability to store in L1 caused by", e); } } else { CacheEntry ce = ctx.lookupEntry(key); if (ce == null || ce.isNull() || ce.isLockPlaceholder() || ce.getValue() == null) { if (ce != null && ce.isChanged()) { ce.setValue(ice.getValue()); } else { if (isWrite) lockAndWrap(ctx, key, ice); else ctx.putLookedUpEntry(key, ice); } } } } else { if (trace) log.tracef("Not caching remotely retrieved entry for key %s in L1", key); } return ice.getValue(); } return null; }
@Override public void stop() { try { DataSources.destroy(pooledDataSource); if (log.isTraceEnabled()) { log.debug("Successfully stopped PooledConnectionFactory."); } } catch (SQLException sqle) { log.warn("Could not destroy C3P0 connection pool: " + pooledDataSource, sqle); } }
/** {@inheritDoc} */ public EntityRegion buildEntityRegion( String regionName, Properties properties, CacheDataDescription metadata) throws CacheException { if (log.isDebugEnabled()) log.debug("Building entity cache region [" + regionName + "]"); Cache cache = getCache(regionName, ENTITY_KEY, properties); CacheAdapter cacheAdapter = CacheAdapterImpl.newInstance(cache); EntityRegionImpl region = new EntityRegionImpl(cacheAdapter, regionName, metadata, transactionManager, this); region.start(); return region; }
public boolean waitForRehashToComplete(int viewId) throws InterruptedException, TimeoutException { long endTime = System.currentTimeMillis() + configuration.getRehashRpcTimeout(); synchronized (rehashInProgressMonitor) { while (!receivedRehashCompletedNotification && lastViewId == viewId && System.currentTimeMillis() < endTime) { rehashInProgressMonitor.wait(configuration.getRehashRpcTimeout()); } } if (!receivedRehashCompletedNotification) { if (lastViewId != viewId) { log.debug("Received a new view while waiting for cluster-wide rehash to finish"); return false; } else { throw new TimeoutException("Timeout waiting for cluster-wide rehash to finish"); } } else { log.debug("Cluster-wide rehash finished successfully."); } return true; }
public void createAccount(Object holder, Integer id, Integer openingBalance, String branch) throws Exception { log.debug("Creating account " + id); tm.begin(); try { Object account = acctClass.newInstance(); setId.invoke(account, id); setHolder.invoke(account, holder); setBalance.invoke(account, openingBalance); log.debug("Set branch " + branch); setBranch.invoke(account, branch); sessionFactory.getCurrentSession().persist(account); tm.commit(); } catch (Exception e) { log.error("rolling back", e); tm.rollback(); throw e; } log.debug("Created account " + id); }
final void putForExternalRead( K key, V value, EnumSet<Flag> explicitFlags, ClassLoader explicitClassLoader) { Transaction ongoingTransaction = null; try { ongoingTransaction = getOngoingTransaction(); if (ongoingTransaction != null) transactionManager.suspend(); EnumSet<Flag> flags = EnumSet.of( FAIL_SILENTLY, FORCE_ASYNCHRONOUS, ZERO_LOCK_ACQUISITION_TIMEOUT, PUT_FOR_EXTERNAL_READ); if (explicitFlags != null && !explicitFlags.isEmpty()) { flags.addAll(explicitFlags); } // if the entry exists then this should be a no-op. putIfAbsent( key, value, defaultLifespan, TimeUnit.MILLISECONDS, defaultMaxIdleTime, TimeUnit.MILLISECONDS, flags, explicitClassLoader); } catch (Exception e) { if (log.isDebugEnabled()) log.debug("Caught exception while doing putForExternalRead()", e); } finally { try { if (ongoingTransaction != null) transactionManager.resume(ongoingTransaction); } catch (Exception e) { if (log.isDebugEnabled()) log.debug("Had problems trying to resume a transaction after putForExternalRead()", e); } } }
private void broadcastInvalidateForPrepare( List<WriteCommand> modifications, InvocationContext ctx) throws Throwable { // A prepare does not carry flags, so skip checking whether is local or not if (ctx.isInTxScope()) { if (modifications.isEmpty()) return; InvalidationFilterVisitor filterVisitor = new InvalidationFilterVisitor(modifications.size()); filterVisitor.visitCollection(null, modifications); if (filterVisitor.containsPutForExternalRead) { log.debug("Modification list contains a putForExternalRead operation. Not invalidating."); } else if (filterVisitor.containsLocalModeFlag) { log.debug("Modification list contains a local mode flagged operation. Not invalidating."); } else { try { invalidateAcrossCluster(defaultSynchronous, filterVisitor.result.toArray(), ctx); } catch (Throwable t) { log.unableToRollbackEvictionsDuringPrepare(t); if (t instanceof RuntimeException) throw (RuntimeException) t; else throw new RuntimeException("Unable to broadcast invalidation messages", t); } } } }
public Account getAccount(Integer id) throws Exception { log.debug("Getting account " + id); tm.begin(); try { Session session = sessionFactory.getCurrentSession(); Account acct = (Account) session.get(acctClass, id); tm.commit(); return acct; } catch (Exception e) { log.error("rolling back", e); tm.rollback(); throw e; } }
private void broadcastInvalidateForPrepare( List<WriteCommand> modifications, Transaction tx, InvocationContext ctx) throws Throwable { if (ctx.isInTxScope() && !isLocalModeForced(ctx)) { if (modifications == null || modifications.isEmpty()) return; InvalidationFilterVisitor filterVisitor = new InvalidationFilterVisitor(modifications.size()); filterVisitor.visitCollection(null, modifications); if (filterVisitor.containsPutForExternalRead) { log.debug("Modification list contains a putForExternalRead operation. Not invalidating."); } else if (filterVisitor.containsLocalModeFlag) { log.debug("Modification list contains a local mode flagged operation. Not invalidating."); } else { try { invalidateAcrossCluster( defaultSynchronous, ctx, filterVisitor.result.toArray(), false, null); } catch (Throwable t) { log.warn("Unable to broadcast evicts as a part of the prepare phase. Rolling back.", t); if (t instanceof RuntimeException) throw (RuntimeException) t; else throw new RuntimeException("Unable to broadcast invalidation messages", t); } } } }
/** {@inheritDoc} */ public QueryResultsRegion buildQueryResultsRegion(String regionName, Properties properties) throws CacheException { if (log.isDebugEnabled()) log.debug("Building query results cache region [" + regionName + "]"); String cacheName = typeOverrides.get(QUERY_KEY).getCacheName(); // If region name is not default one, lookup a cache for that region name if (!regionName.equals("org.hibernate.cache.internal.StandardQueryCache")) cacheName = regionName; Cache cache = getCache(cacheName, QUERY_KEY, properties); CacheAdapter cacheAdapter = CacheAdapterImpl.newInstance(cache); QueryResultsRegionImpl region = new QueryResultsRegionImpl(cacheAdapter, regionName, properties, transactionManager, this); region.start(); return region; }
/** calculates the object request list to request to each member */ public final synchronized void calculateAccesses() { if (hasAccessesCalculated) { return; } hasAccessesCalculated = true; if (log.isTraceEnabled()) { log.trace("Calculating accessed keys for data placement optimization"); } RemoteTopKeyRequest request = new RemoteTopKeyRequest(streamLibContainer.getCapacity() * 2); request.merge(streamLibContainer.getTopKFrom(REMOTE_PUT, maxNumberOfKeysToRequest), 2); request.merge(streamLibContainer.getTopKFrom(REMOTE_GET, maxNumberOfKeysToRequest), 1); sortObjectsByPrimaryOwner(request.toRequestMap(maxNumberOfKeysToRequest), true); request.clear(); LocalTopKeyRequest localTopKeyRequest = new LocalTopKeyRequest(); localTopKeyRequest.merge(streamLibContainer.getTopKFrom(LOCAL_PUT), 2); localTopKeyRequest.merge(streamLibContainer.getTopKFrom(LOCAL_GET), 1); sortObjectsByPrimaryOwner(localTopKeyRequest.toRequestMap(), false); request.clear(); if (log.isTraceEnabled()) { StringBuilder stringBuilder = new StringBuilder("Accesses:\n"); for (int i = 0; i < accessesByPrimaryOwner.length; ++i) { stringBuilder .append(clusterSnapshot.get(i)) .append(" ==> ") .append(accessesByPrimaryOwner[i]) .append("\n"); } log.debug(stringBuilder); } streamLibContainer.resetStat(REMOTE_GET); streamLibContainer.resetStat(LOCAL_GET); streamLibContainer.resetStat(REMOTE_PUT); streamLibContainer.resetStat(LOCAL_PUT); }
/** {@inheritDoc} */ public void start(Settings settings, Properties properties) throws CacheException { log.debug("Starting Infinispan region factory"); try { transactionManagerlookup = createTransactionManagerLookup(settings, properties); transactionManager = transactionManagerlookup.getTransactionManager(); manager = createCacheManager(properties); initGenericDataTypeOverrides(); Enumeration keys = properties.propertyNames(); while (keys.hasMoreElements()) { String key = (String) keys.nextElement(); int prefixLoc = -1; if ((prefixLoc = key.indexOf(PREFIX)) != -1) { dissectProperty(prefixLoc, key, properties); } } defineGenericDataTypeCacheConfigurations(settings, properties); } catch (CacheException ce) { throw ce; } catch (Throwable t) { throw new CacheException("Unable to start region factory", t); } }
/** * Helper method that binds the a non serializable object to the JNDI tree. * * @param jndiName Name under which the object must be bound * @param who Object to bind in JNDI * @param classType Class type under which should appear the bound object * @param ctx Naming context under which we bind the object * @throws Exception Thrown if a naming exception occurs during binding */ private void bind(String jndiName, Object who, Class<?> classType, Context ctx) throws Exception { // Ah ! This service isn't serializable, so we use a helper class NonSerializableFactory.bind(jndiName, who); Name n = ctx.getNameParser("").parse(jndiName); while (n.size() > 1) { String ctxName = n.get(0); try { ctx = (Context) ctx.lookup(ctxName); } catch (NameNotFoundException e) { log.debug("creating Subcontext " + ctxName); ctx = ctx.createSubcontext(ctxName); } n = n.getSuffix(1); } // The helper class NonSerializableFactory uses address type nns, we go on to // use the helper class to bind the service object in JNDI StringRefAddr addr = new StringRefAddr("nns", jndiName); Reference ref = new Reference(classType.getName(), addr, NonSerializableFactory.class.getName(), null); ctx.rebind(n.get(0), ref); }
private static void populateMetricsAndOperations( List<Class<?>> classes, Props props, boolean withNamePrefix) throws Exception { props.setHasOperations(true); props.setHasMetrics(true); for (Class<?> clazz : classes) { MBean mbean = clazz.getAnnotation(MBean.class); String prefix = withNamePrefix ? mbean.objectName() + '.' : ""; CtClass ctClass = classPool.get(clazz.getName()); CtMethod[] ctMethods = ctClass.getMethods(); for (CtMethod ctMethod : ctMethods) { ManagedAttribute managedAttr = (ManagedAttribute) ctMethod.getAnnotation(ManagedAttribute.class); ManagedOperation managedOp = (ManagedOperation) ctMethod.getAnnotation(ManagedOperation.class); Metric rhqMetric = (Metric) ctMethod.getAnnotation(Metric.class); if (rhqMetric != null) { debug("Metric annotation found " + rhqMetric); // Property and description resolution are the reason why annotation scanning is done // here. // These two fields are calculated from either the method name or the Managed* // annotations, // and so, only the infinispan side knows about that. String property = prefix + getPropertyFromBeanConvention(ctMethod); if (!rhqMetric.property().isEmpty()) { property = prefix + rhqMetric.property(); } MetricProps metric = new MetricProps(property); String displayName = withNamePrefix ? "[" + mbean.objectName() + "] " + rhqMetric.displayName() : rhqMetric.displayName(); metric.setDisplayName(displayName); metric.setDisplayType(rhqMetric.displayType()); metric.setDataType(rhqMetric.dataType()); metric.setUnits(rhqMetric.units()); if (managedAttr != null) { debug("Metric has ManagedAttribute annotation " + managedAttr); metric.setDescription(managedAttr.description()); } else if (managedOp != null) { debug("Metric has ManagedOperation annotation " + managedOp); metric.setDescription(managedOp.description()); } else { log.debug( "Metric has no managed annotations, so take the description from the display name."); metric.setDescription(rhqMetric.displayName()); } props.getMetrics().add(metric); } Operation rhqOperation = (Operation) ctMethod.getAnnotation(Operation.class); if (rhqOperation != null) { debug("Operation annotation found " + rhqOperation); String name; if (!rhqOperation.name().isEmpty()) { name = prefix + rhqOperation.name(); } else { name = prefix + ctMethod.getName(); } OperationProps operation = new OperationProps(name); String displayName = withNamePrefix ? "[" + mbean.objectName() + "] " + rhqOperation.displayName() : rhqOperation.displayName(); operation.setDisplayName(displayName); if (managedAttr != null) { debug("Operation has ManagedAttribute annotation " + managedAttr); operation.setDescription(managedAttr.description()); } else if (managedOp != null) { debug("Operation has ManagedOperation annotation " + managedOp); operation.setDescription(managedOp.description()); } else { debug( "Operation has no managed annotations, so take the description from the display name."); operation.setDescription(rhqOperation.displayName()); } Object[][] paramAnnotations = ctMethod.getParameterAnnotations(); int i = 0; for (Object[] paramAnnotationsInEach : paramAnnotations) { boolean hadParameter = false; for (Object annot : paramAnnotationsInEach) { debug("Parameter annotation " + annot); if (annot instanceof Parameter) { Parameter param = (Parameter) annot; SimpleProperty prop = new SimpleProperty(param.name()); prop.setDescription(param.description()); operation.getParams().add(prop); hadParameter = true; } } if (!hadParameter) { operation.getParams().add(new SimpleProperty("p" + i++)); } } CtClass returnType = ctMethod.getReturnType(); if (!returnType.equals(CtClass.voidType)) { if (!returnType.equals(Void.TYPE)) { SimpleProperty prop = new SimpleProperty("operationResult"); operation.setResult(prop); } } props.getOperations().add(operation); } } CtField[] ctFields = ctClass.getDeclaredFields(); for (CtField ctField : ctFields) { debug("Inspecting field " + ctField); Metric rhqMetric = (Metric) ctField.getAnnotation(Metric.class); if (rhqMetric != null) { debug("Field " + ctField + " contains Metric annotation " + rhqMetric); String property; if (!rhqMetric.property().isEmpty()) { property = prefix + rhqMetric.property(); } else { property = prefix + getPropertyFromBeanConvention(ctField); } MetricProps metric = new MetricProps(property); String displayName = withNamePrefix ? "[" + mbean.objectName() + "] " + rhqMetric.displayName() : rhqMetric.displayName(); metric.setDisplayName(displayName); metric.setDisplayType(rhqMetric.displayType()); metric.setDataType(rhqMetric.dataType()); metric.setUnits(rhqMetric.units()); ManagedAttribute managedAttr = (ManagedAttribute) ctField.getAnnotation(ManagedAttribute.class); if (managedAttr != null) { debug("Metric has ManagedAttribute annotation " + managedAttr); metric.setDescription(managedAttr.description()); } else { log.debug( "Metric has no managed annotations, so take the description from the display name."); metric.setDescription(rhqMetric.displayName()); } props.getMetrics().add(metric); } } } }
private void updateClusterTable(List<Address> members) { log.debug("Updating cluster table with new member list " + members); clusterTableModel.setMembers(members); updateTitleBar(); }