public class DownloadProducerFromCloudsoftRepo implements Function<DownloadRequirement, DownloadTargets> { @SuppressWarnings("unused") private static final Logger LOG = LoggerFactory.getLogger(DownloadProducerFromCloudsoftRepo.class); public static final ConfigKey<String> CLOUDSOFT_REPO_URL = BasicConfigKey.builder(String.class) .name(DownloadProducerFromProperties.DOWNLOAD_CONF_PREFIX + "repo.cloudsoft.url") .description( "Whether to use the cloudsoft repo for downloading entities, during installs") .defaultValue("http://downloads.cloudsoftcorp.com/brooklyn/repository") .build(); public static final ConfigKey<Boolean> CLOUDSOFT_REPO_ENABLED = BasicConfigKey.builder(Boolean.class) .name(DownloadProducerFromProperties.DOWNLOAD_CONF_PREFIX + "repo.cloudsoft.enabled") .description( "Whether to use the cloudsoft repo for downloading entities, during installs") .defaultValue(true) .build(); public static final String CLOUDSOFT_REPO_URL_PATTERN = "%s/" + "${simpletype}/${version}/" + "<#if filename??>" + "${filename}" + "<#else>" + "<#if addon??>" + "${simpletype?lower_case}-${addon?lower_case}-${addonversion?lower_case}.${fileSuffix!\"tar.gz\"}" + "<#else>" + "${simpletype?lower_case}-${version?lower_case}.${fileSuffix!\"tar.gz\"}" + "</#if>" + "</#if>"; private final StringConfigMap config; public DownloadProducerFromCloudsoftRepo(StringConfigMap config) { this.config = config; } public DownloadTargets apply(DownloadRequirement req) { Boolean enabled = config.getConfig(CLOUDSOFT_REPO_ENABLED); String baseUrl = config.getConfig(CLOUDSOFT_REPO_URL); String url = String.format(CLOUDSOFT_REPO_URL_PATTERN, baseUrl); if (enabled) { Map<String, ?> subs = DownloadSubstituters.getBasicSubstitutions(req); String result = DownloadSubstituters.substitute(url, subs); return BasicDownloadTargets.builder().addPrimary(result).build(); } else { return BasicDownloadTargets.empty(); } } }
// @Catalog(name="Service Failure Detector", description="HA policy for deteting failure of a // service") public class ServiceFailureDetector extends ServiceStateLogic.ComputeServiceState { // TODO Remove duplication between this and MemberFailureDetectionPolicy. // The latter could be re-written to use this. Or could even be deprecated // in favour of this. public enum LastPublished { NONE, FAILED, RECOVERED; } private static final Logger LOG = LoggerFactory.getLogger(ServiceFailureDetector.class); private static final long MIN_PERIOD_BETWEEN_EXECS_MILLIS = 100; public static final BasicNotificationSensor<FailureDescriptor> ENTITY_FAILED = HASensors.ENTITY_FAILED; @SetFromFlag("onlyReportIfPreviouslyUp") public static final ConfigKey<Boolean> ENTITY_FAILED_ONLY_IF_PREVIOUSLY_UP = ConfigKeys.newBooleanConfigKey( "onlyReportIfPreviouslyUp", "Prevents the policy from emitting ENTITY_FAILED if the entity fails on startup (ie has never been up)", true); public static final ConfigKey<Boolean> MONITOR_SERVICE_PROBLEMS = ConfigKeys.newBooleanConfigKey( "monitorServiceProblems", "Whether to monitor service problems, and emit on failures there (if set to false, this monitors only service up)", true); @SetFromFlag("serviceOnFireStabilizationDelay") public static final ConfigKey<Duration> SERVICE_ON_FIRE_STABILIZATION_DELAY = BasicConfigKey.builder(Duration.class) .name("serviceOnFire.stabilizationDelay") .description( "Time period for which the service must be consistently down for (e.g. doesn't report down-up-down) before concluding ON_FIRE") .defaultValue(Duration.ZERO) .build(); @SetFromFlag("entityFailedStabilizationDelay") public static final ConfigKey<Duration> ENTITY_FAILED_STABILIZATION_DELAY = BasicConfigKey.builder(Duration.class) .name("entityFailed.stabilizationDelay") .description( "Time period for which the service must be consistently down for (e.g. doesn't report down-up-down) before emitting ENTITY_FAILED") .defaultValue(Duration.ZERO) .build(); @SetFromFlag("entityRecoveredStabilizationDelay") public static final ConfigKey<Duration> ENTITY_RECOVERED_STABILIZATION_DELAY = BasicConfigKey.builder(Duration.class) .name("entityRecovered.stabilizationDelay") .description( "For a failed entity, time period for which the service must be consistently up for (e.g. doesn't report up-down-up) before emitting ENTITY_RECOVERED") .defaultValue(Duration.ZERO) .build(); protected Long firstUpTime; protected Long currentFailureStartTime = null; protected Long currentRecoveryStartTime = null; protected Long publishEntityFailedTime = null; protected Long publishEntityRecoveredTime = null; protected Long setEntityOnFireTime = null; protected LastPublished lastPublished = LastPublished.NONE; private final AtomicBoolean executorQueued = new AtomicBoolean(false); private volatile long executorTime = 0; /** * TODO Really don't want this mutex! ServiceStateLogic.setExpectedState() will call into * `onEvent(null)`, so could get concurrent calls. How to handle that? I don't think * `ServiceStateLogic.setExpectedState` should be making the call, but presumably that is their to * remove a race condition so it is set before method returns. Caller shouldn't rely on that * though. e.g. see `ServiceFailureDetectorTest.testNotifiedOfFailureOnStateOnFire`, where we get * two notifications. */ private final Object mutex = new Object(); public ServiceFailureDetector() { this(new ConfigBag()); } public ServiceFailureDetector(Map<String, ?> flags) { this(new ConfigBag().putAll(flags)); } public ServiceFailureDetector(ConfigBag configBag) { // TODO hierarchy should use ConfigBag, and not change flags super(configBag.getAllConfigMutable()); } @Override public void onEvent(SensorEvent<Object> event) { if (firstUpTime == null) { if (event != null && Attributes.SERVICE_UP.equals(event.getSensor()) && Boolean.TRUE.equals(event.getValue())) { firstUpTime = event.getTimestamp(); } else if (event == null && Boolean.TRUE.equals(entity.getAttribute(Attributes.SERVICE_UP))) { // If this enricher is registered after the entity is up, then we'll get a "synthetic" // onEvent(null) firstUpTime = System.currentTimeMillis(); } } super.onEvent(event); } @Override protected void setActualState(Lifecycle state) { long now = System.currentTimeMillis(); synchronized (mutex) { if (state == Lifecycle.ON_FIRE) { if (lastPublished == LastPublished.FAILED) { if (currentRecoveryStartTime != null) { if (LOG.isDebugEnabled()) LOG.debug( "{} health-check for {}, component was recovering, now failing: {}", new Object[] {this, entity, getExplanation(state)}); currentRecoveryStartTime = null; publishEntityRecoveredTime = null; } else { if (LOG.isTraceEnabled()) LOG.trace( "{} health-check for {}, component still failed: {}", new Object[] {this, entity, getExplanation(state)}); } } else { if (firstUpTime == null && getConfig(ENTITY_FAILED_ONLY_IF_PREVIOUSLY_UP)) { // suppress; won't publish } else if (currentFailureStartTime == null) { if (LOG.isDebugEnabled()) LOG.debug( "{} health-check for {}, component now failing: {}", new Object[] {this, entity, getExplanation(state)}); currentFailureStartTime = now; publishEntityFailedTime = currentFailureStartTime + getConfig(ENTITY_FAILED_STABILIZATION_DELAY).toMilliseconds(); } else { if (LOG.isTraceEnabled()) LOG.trace( "{} health-check for {}, component continuing failing: {}", new Object[] {this, entity, getExplanation(state)}); } } if (setEntityOnFireTime == null) { setEntityOnFireTime = now + getConfig(SERVICE_ON_FIRE_STABILIZATION_DELAY).toMilliseconds(); } currentRecoveryStartTime = null; publishEntityRecoveredTime = null; } else if (state == Lifecycle.RUNNING) { if (lastPublished == LastPublished.FAILED) { if (currentRecoveryStartTime == null) { if (LOG.isDebugEnabled()) LOG.debug( "{} health-check for {}, component now recovering: {}", new Object[] {this, entity, getExplanation(state)}); currentRecoveryStartTime = now; publishEntityRecoveredTime = currentRecoveryStartTime + getConfig(ENTITY_RECOVERED_STABILIZATION_DELAY).toMilliseconds(); } else { if (LOG.isTraceEnabled()) LOG.trace( "{} health-check for {}, component continuing recovering: {}", new Object[] {this, entity, getExplanation(state)}); } } else { if (currentFailureStartTime != null) { if (LOG.isDebugEnabled()) LOG.debug( "{} health-check for {}, component was failing, now healthy: {}", new Object[] {this, entity, getExplanation(state)}); } else { if (LOG.isTraceEnabled()) LOG.trace( "{} health-check for {}, component still healthy: {}", new Object[] {this, entity, getExplanation(state)}); } } currentFailureStartTime = null; publishEntityFailedTime = null; setEntityOnFireTime = null; } else { if (LOG.isTraceEnabled()) LOG.trace( "{} health-check for {}, in unconfirmed sate: {}", new Object[] {this, entity, getExplanation(state)}); } long recomputeIn = Long.MAX_VALUE; // For whether to call recomputeAfterDelay if (publishEntityFailedTime != null) { long delayBeforeCheck = publishEntityFailedTime - now; if (delayBeforeCheck <= 0) { if (LOG.isDebugEnabled()) LOG.debug( "{} publishing failed (state={}; currentFailureStartTime={}; now={}", new Object[] { this, state, Time.makeDateString(currentFailureStartTime), Time.makeDateString(now) }); publishEntityFailedTime = null; lastPublished = LastPublished.FAILED; entity.emit( HASensors.ENTITY_FAILED, new HASensors.FailureDescriptor(entity, getFailureDescription(now))); } else { recomputeIn = Math.min(recomputeIn, delayBeforeCheck); } } else if (publishEntityRecoveredTime != null) { long delayBeforeCheck = publishEntityRecoveredTime - now; if (delayBeforeCheck <= 0) { if (LOG.isDebugEnabled()) LOG.debug( "{} publishing recovered (state={}; currentRecoveryStartTime={}; now={}", new Object[] { this, state, Time.makeDateString(currentRecoveryStartTime), Time.makeDateString(now) }); publishEntityRecoveredTime = null; lastPublished = LastPublished.RECOVERED; entity.emit(HASensors.ENTITY_RECOVERED, new HASensors.FailureDescriptor(entity, null)); } else { recomputeIn = Math.min(recomputeIn, delayBeforeCheck); } } if (setEntityOnFireTime != null) { long delayBeforeCheck = setEntityOnFireTime - now; if (delayBeforeCheck <= 0) { if (LOG.isDebugEnabled()) LOG.debug( "{} setting on-fire, now that deferred period has passed (state={})", new Object[] {this, state}); setEntityOnFireTime = null; super.setActualState(state); } else { recomputeIn = Math.min(recomputeIn, delayBeforeCheck); } } else { super.setActualState(state); } if (recomputeIn < Long.MAX_VALUE) { recomputeAfterDelay(recomputeIn); } } } protected String getExplanation(Lifecycle state) { Duration serviceFailedStabilizationDelay = getConfig(ENTITY_FAILED_STABILIZATION_DELAY); Duration serviceRecoveredStabilizationDelay = getConfig(ENTITY_RECOVERED_STABILIZATION_DELAY); return String.format( "location=%s; status=%s; lastPublished=%s; timeNow=%s; " + "currentFailurePeriod=%s; currentRecoveryPeriod=%s", entity.getLocations(), (state != null ? state : "<unreported>"), lastPublished, Time.makeDateString(System.currentTimeMillis()), (currentFailureStartTime != null ? getTimeStringSince(currentFailureStartTime) : "<none>") + " (stabilization " + Time.makeTimeStringRounded(serviceFailedStabilizationDelay) + ")", (currentRecoveryStartTime != null ? getTimeStringSince(currentRecoveryStartTime) : "<none>") + " (stabilization " + Time.makeTimeStringRounded(serviceRecoveredStabilizationDelay) + ")"); } private String getFailureDescription(long now) { String description = null; Map<String, Object> serviceProblems = entity.getAttribute(Attributes.SERVICE_PROBLEMS); if (serviceProblems != null && !serviceProblems.isEmpty()) { Entry<String, Object> problem = serviceProblems.entrySet().iterator().next(); description = problem.getKey() + ": " + problem.getValue(); if (serviceProblems.size() > 1) { description = serviceProblems.size() + " service problems, including " + description; } else { description = "service problem: " + description; } } else if (Boolean.FALSE.equals(entity.getAttribute(Attributes.SERVICE_UP))) { description = "service not up"; } else { description = "service failure detected"; } if (publishEntityFailedTime != null && currentFailureStartTime != null && publishEntityFailedTime > currentFailureStartTime) description = " (stabilized for " + Duration.of(now - currentFailureStartTime, TimeUnit.MILLISECONDS) + ")"; return description; } @SuppressWarnings({"unchecked", "rawtypes"}) protected void recomputeAfterDelay(long delay) { if (isRunning() && executorQueued.compareAndSet(false, true)) { long now = System.currentTimeMillis(); delay = Math.max(0, Math.max(delay, (executorTime + MIN_PERIOD_BETWEEN_EXECS_MILLIS) - now)); if (LOG.isTraceEnabled()) LOG.trace("{} scheduling publish in {}ms", this, delay); Runnable job = new Runnable() { @Override public void run() { try { executorTime = System.currentTimeMillis(); executorQueued.set(false); onEvent(null); } catch (Exception e) { if (isRunning()) { LOG.error("Error in enricher " + this + ": " + e, e); } else { if (LOG.isDebugEnabled()) LOG.debug("Error in enricher " + this + " (but no longer running): " + e, e); } } catch (Throwable t) { LOG.error("Error in enricher " + this + ": " + t, t); throw Exceptions.propagate(t); } } }; ScheduledTask task = new ScheduledTask( MutableMap.of("delay", Duration.of(delay, TimeUnit.MILLISECONDS)), new BasicTask(job)); ((EntityInternal) entity).getExecutionContext().submit(task); } } private String getTimeStringSince(Long time) { return time == null ? null : Time.makeTimeStringRounded(System.currentTimeMillis() - time); } }
/** * Monitors a given {@link HostAndPort}, to emit HASensors.CONNECTION_FAILED and * HASensors.CONNECTION_RECOVERED if the connection is lost/restored. */ @Catalog( name = "Connection Failure Detector", description = "HA policy for monitoring a host:port, " + "emitting an event if the connection is lost/restored") public class ConnectionFailureDetector extends AbstractPolicy { // TODO Remove duplication from ServiceFailureDetector, particularly for the stabilisation delays. public enum LastPublished { NONE, FAILED, RECOVERED; } private static final Logger LOG = LoggerFactory.getLogger(ConnectionFailureDetector.class); private static final long MIN_PERIOD_BETWEEN_EXECS_MILLIS = 100; public static final ConfigKey<HostAndPort> ENDPOINT = ConfigKeys.newConfigKey(HostAndPort.class, "connectionFailureDetector.endpoint"); public static final ConfigKey<Duration> POLL_PERIOD = ConfigKeys.newConfigKey( Duration.class, "connectionFailureDetector.pollPeriod", "", Duration.ONE_SECOND); public static final BasicNotificationSensor<FailureDescriptor> CONNECTION_FAILED = HASensors.CONNECTION_FAILED; public static final BasicNotificationSensor<FailureDescriptor> CONNECTION_RECOVERED = HASensors.CONNECTION_RECOVERED; @SetFromFlag("connectionFailedStabilizationDelay") public static final ConfigKey<Duration> CONNECTION_FAILED_STABILIZATION_DELAY = BasicConfigKey.builder(Duration.class) .name("connectionFailureDetector.serviceFailedStabilizationDelay") .description( "Time period for which the connection must be consistently down for " + "(e.g. doesn't report down-up-down) before concluding failure. " + "Note that long TCP timeouts mean there can be long (e.g. 70 second) " + "delays in noticing a connection refused condition.") .defaultValue(Duration.ZERO) .build(); @SetFromFlag("connectionRecoveredStabilizationDelay") public static final ConfigKey<Duration> CONNECTION_RECOVERED_STABILIZATION_DELAY = BasicConfigKey.builder(Duration.class) .name("connectionFailureDetector.serviceRecoveredStabilizationDelay") .description( "For a failed connection, time period for which the connection must be consistently up for (e.g. doesn't report up-down-up) before concluding recovered") .defaultValue(Duration.ZERO) .build(); protected final AtomicReference<Long> connectionLastUp = new AtomicReference<Long>(); protected final AtomicReference<Long> connectionLastDown = new AtomicReference<Long>(); protected Long currentFailureStartTime = null; protected Long currentRecoveryStartTime = null; protected LastPublished lastPublished = LastPublished.NONE; private final AtomicBoolean executorQueued = new AtomicBoolean(false); private volatile long executorTime = 0; private Callable<Task<?>> pollingTaskFactory; private Task<?> scheduledTask; public ConnectionFailureDetector() {} @Override public void init() { getRequiredConfig(ENDPOINT); // just to confirm it's set, failing fast pollingTaskFactory = new Callable<Task<?>>() { @Override public Task<?> call() { BasicTask<Void> task = new BasicTask<Void>( new Runnable() { @Override public void run() { checkHealth(); } }); BrooklynTaskTags.setTransient(task); return task; } }; } @Override public void setEntity(EntityLocal entity) { super.setEntity(entity); if (isRunning()) { doStartPolling(); } } @Override public void suspend() { scheduledTask.cancel(true); super.suspend(); } @Override public void resume() { currentFailureStartTime = null; currentRecoveryStartTime = null; lastPublished = LastPublished.NONE; executorQueued.set(false); executorTime = 0; super.resume(); doStartPolling(); } protected void doStartPolling() { if (scheduledTask == null || scheduledTask.isDone()) { ScheduledTask task = new ScheduledTask(MutableMap.of("period", getConfig(POLL_PERIOD)), pollingTaskFactory); scheduledTask = ((EntityInternal) entity).getExecutionContext().submit(task); } } private Duration getConnectionFailedStabilizationDelay() { return getConfig(CONNECTION_FAILED_STABILIZATION_DELAY); } private Duration getConnectionRecoveredStabilizationDelay() { return getConfig(CONNECTION_RECOVERED_STABILIZATION_DELAY); } private synchronized void checkHealth() { CalculatedStatus status = calculateStatus(); boolean connected = status.connected; long now = System.currentTimeMillis(); if (connected) { connectionLastUp.set(now); if (lastPublished == LastPublished.FAILED) { if (currentRecoveryStartTime == null) { LOG.info( "{} connectivity-check for {}, now recovering: {}", new Object[] {this, entity, status.getDescription()}); currentRecoveryStartTime = now; schedulePublish(); } else { if (LOG.isTraceEnabled()) LOG.trace( "{} connectivity-check for {}, continuing recovering: {}", new Object[] {this, entity, status.getDescription()}); } } else { if (currentFailureStartTime != null) { LOG.info( "{} connectivity-check for {}, now healthy: {}", new Object[] {this, entity, status.getDescription()}); currentFailureStartTime = null; } else { if (LOG.isTraceEnabled()) LOG.trace( "{} connectivity-check for {}, still healthy: {}", new Object[] {this, entity, status.getDescription()}); } } } else { connectionLastDown.set(now); if (lastPublished != LastPublished.FAILED) { if (currentFailureStartTime == null) { LOG.info( "{} connectivity-check for {}, now failing: {}", new Object[] {this, entity, status.getDescription()}); currentFailureStartTime = now; schedulePublish(); } else { if (LOG.isTraceEnabled()) LOG.trace( "{} connectivity-check for {}, continuing failing: {}", new Object[] {this, entity, status.getDescription()}); } } else { if (currentRecoveryStartTime != null) { LOG.info( "{} connectivity-check for {}, now failing: {}", new Object[] {this, entity, status.getDescription()}); currentRecoveryStartTime = null; } else { if (LOG.isTraceEnabled()) LOG.trace( "{} connectivity-check for {}, still failed: {}", new Object[] {this, entity, status.getDescription()}); } } } } protected CalculatedStatus calculateStatus() { return new CalculatedStatus(); } protected void schedulePublish() { schedulePublish(0); } protected void schedulePublish(long delay) { if (isRunning() && executorQueued.compareAndSet(false, true)) { long now = System.currentTimeMillis(); delay = Math.max(0, Math.max(delay, (executorTime + MIN_PERIOD_BETWEEN_EXECS_MILLIS) - now)); if (LOG.isTraceEnabled()) LOG.trace("{} scheduling publish in {}ms", this, delay); Runnable job = new Runnable() { @Override public void run() { try { executorTime = System.currentTimeMillis(); executorQueued.set(false); publishNow(); } catch (Exception e) { if (isRunning()) { LOG.error("Problem resizing: " + e, e); } else { if (LOG.isDebugEnabled()) LOG.debug("Problem resizing, but no longer running: " + e, e); } } catch (Throwable t) { LOG.error("Problem in service-failure-detector: " + t, t); throw Exceptions.propagate(t); } } }; ScheduledTask task = new ScheduledTask( MutableMap.of("delay", Duration.of(delay, TimeUnit.MILLISECONDS)), new BasicTask(job)); ((EntityInternal) entity).getExecutionContext().submit(task); } } private synchronized void publishNow() { if (!isRunning()) return; CalculatedStatus calculatedStatus = calculateStatus(); boolean connected = calculatedStatus.connected; Long lastUpTime = connectionLastUp.get(); Long lastDownTime = connectionLastDown.get(); long serviceFailedStabilizationDelay = getConnectionFailedStabilizationDelay().toMilliseconds(); long serviceRecoveredStabilizationDelay = getConnectionRecoveredStabilizationDelay().toMilliseconds(); long now = System.currentTimeMillis(); if (connected) { if (lastPublished == LastPublished.FAILED) { // only publish if consistently up for serviceRecoveredStabilizationDelay long currentRecoveryPeriod = getTimeDiff(now, currentRecoveryStartTime); long sinceLastDownPeriod = getTimeDiff(now, lastDownTime); if (currentRecoveryPeriod > serviceRecoveredStabilizationDelay && sinceLastDownPeriod > serviceRecoveredStabilizationDelay) { String description = calculatedStatus.getDescription(); LOG.warn( "{} connectivity-check for {}, publishing recovered: {}", new Object[] {this, entity, description}); entity.emit(CONNECTION_RECOVERED, new HASensors.FailureDescriptor(entity, description)); lastPublished = LastPublished.RECOVERED; currentFailureStartTime = null; } else { long nextAttemptTime = Math.max( serviceRecoveredStabilizationDelay - currentRecoveryPeriod, serviceRecoveredStabilizationDelay - sinceLastDownPeriod); schedulePublish(nextAttemptTime); } } } else { if (lastPublished != LastPublished.FAILED) { // only publish if consistently down for serviceFailedStabilizationDelay long currentFailurePeriod = getTimeDiff(now, currentFailureStartTime); long sinceLastUpPeriod = getTimeDiff(now, lastUpTime); if (currentFailurePeriod > serviceFailedStabilizationDelay && sinceLastUpPeriod > serviceFailedStabilizationDelay) { String description = calculatedStatus.getDescription(); LOG.warn( "{} connectivity-check for {}, publishing failed: {}", new Object[] {this, entity, description}); entity.emit(CONNECTION_FAILED, new HASensors.FailureDescriptor(entity, description)); lastPublished = LastPublished.FAILED; currentRecoveryStartTime = null; } else { long nextAttemptTime = Math.max( serviceFailedStabilizationDelay - currentFailurePeriod, serviceFailedStabilizationDelay - sinceLastUpPeriod); schedulePublish(nextAttemptTime); } } } } public class CalculatedStatus { public final boolean connected; public CalculatedStatus() { HostAndPort endpoint = getConfig(ENDPOINT); connected = Networking.isReachable(endpoint); } public String getDescription() { Long lastUpTime = connectionLastUp.get(); Long lastDownTime = connectionLastDown.get(); Duration serviceFailedStabilizationDelay = getConnectionFailedStabilizationDelay(); Duration serviceRecoveredStabilizationDelay = getConnectionRecoveredStabilizationDelay(); return String.format( "endpoint=%s; connected=%s; timeNow=%s; lastUp=%s; lastDown=%s; lastPublished=%s; " + "currentFailurePeriod=%s; currentRecoveryPeriod=%s", getConfig(ENDPOINT), connected, Time.makeDateString(System.currentTimeMillis()), (lastUpTime != null ? Time.makeDateString(lastUpTime) : "<never>"), (lastDownTime != null ? Time.makeDateString(lastDownTime) : "<never>"), lastPublished, (currentFailureStartTime != null ? getTimeStringSince(currentFailureStartTime) : "<none>") + " (stabilization " + makeTimeStringRounded(serviceFailedStabilizationDelay) + ")", (currentRecoveryStartTime != null ? getTimeStringSince(currentRecoveryStartTime) : "<none>") + " (stabilization " + makeTimeStringRounded(serviceRecoveredStabilizationDelay) + ")"); } } private long getTimeDiff(Long recent, Long previous) { return (previous == null) ? recent : (recent - previous); } private String getTimeStringSince(Long time) { return time == null ? null : Time.makeTimeStringRounded(System.currentTimeMillis() - time); } }