@Override
    protected boolean doProcess(Record record) {
      Timer.Context timerContext = elapsedTime.time();

      try {
        XContentBuilder documentBuilder = jsonBuilder().startObject();
        Map<String, Collection<Object>> map = record.getFields().asMap();
        for (Map.Entry<String, Collection<Object>> entry : map.entrySet()) {
          String key = entry.getKey();
          Iterator<Object> iterator = entry.getValue().iterator();
          while (iterator.hasNext()) {
            documentBuilder.field(key, iterator.next());
          }
        }
        documentBuilder.endObject();
        loader.addDocument(documentBuilder.bytes(), indexName, indexType, ttl);
      } catch (Exception e) {
        throw new MorphlineRuntimeException(e);
      } finally {
        timerContext.stop();
      }

      // pass record to next command in chain:
      return super.doProcess(record);
    }
 @Override
 public void run() {
   Timer.Context rollupTimerContext = rollupTimer.time();
   try {
     Rollup.Type rollupComputer =
         RollupRunnable.getRollupComputer(RollupType.BF_BASIC, Granularity.FULL);
     Rollup rollup = rollupComputer.compute(points);
     writer.enqueueRollupForWrite(
         new SingleRollupWriteContext(
             rollup, loc, Granularity.MIN_5, CassandraModel.CF_METRICS_5M, range.getStart()));
     log.info("Calculated and queued rollup for " + loc + " within range " + range);
   } catch (Exception e) {
     // I want to be very harsh with exceptions encountered while validating and computing rollups.
     // Just stop everything.
     log.error("Error encountered while validating and calculating rollups", e);
     rollupValidationAndComputeFailed.inc();
     RollupGenerator.rollupExecutors.shutdownNow();
     OutOFBandRollup.getRollupGeneratorThread().interrupt();
     // Stop the monitoring thread
     OutOFBandRollup.getMonitoringThread().interrupt();
     // Stop the file handler thread pool from sending data to buildstore
     FileHandler.handlerThreadPool.shutdownNow();
     throw new RuntimeException(e);
   } finally {
     rollupTimerContext.stop();
   }
 }
  /** Offer the data and wait for the response */
  public QueueEvent offer(byte[] data, long timeout) throws KeeperException, InterruptedException {
    Timer.Context time = stats.time(dir + "_offer");
    try {
      // Create and watch the response node before creating the request node;
      // otherwise we may miss the response.
      String watchID = createResponseNode();

      Object lock = new Object();
      LatchWatcher watcher = new LatchWatcher(lock);
      Stat stat = zookeeper.exists(watchID, watcher, true);

      // create the request node
      createRequestNode(data, watchID);

      synchronized (lock) {
        if (stat != null && watcher.getWatchedEvent() == null) {
          watcher.await(timeout);
        }
      }
      byte[] bytes = zookeeper.getData(watchID, null, null, true);
      // create the event before deleting the node, otherwise we can get the deleted
      // event from the watcher.
      QueueEvent event = new QueueEvent(watchID, bytes, watcher.getWatchedEvent());
      zookeeper.delete(watchID, -1, true);
      return event;
    } finally {
      time.stop();
    }
  }
 @Override
 public Allocation allocate(
     ApplicationAttemptId attemptId,
     List<ResourceRequest> resourceRequests,
     List<ContainerId> containerIds,
     List<String> strings,
     List<String> strings2) {
   if (metricsON) {
     final Timer.Context context = schedulerAllocateTimer.time();
     Allocation allocation = null;
     try {
       allocation =
           scheduler.allocate(attemptId, resourceRequests, containerIds, strings, strings2);
       return allocation;
     } finally {
       context.stop();
       schedulerAllocateCounter.inc();
       try {
         updateQueueWithAllocateRequest(allocation, attemptId, resourceRequests, containerIds);
       } catch (IOException e) {
         e.printStackTrace();
       }
     }
   } else {
     return scheduler.allocate(attemptId, resourceRequests, containerIds, strings, strings2);
   }
 }
  private void sendMessage(byte[] encodedMessage) {
    Timer.Context sendMessageTimer = messageSendingTimer.time();
    messageProducer.produce(encodedMessage);
    sendMessageTimer.stop();

    if (log.isDebugEnabled()) {
      log.debug("Completed send of message: " + new String(encodedMessage));
    }
  }
Esempio n. 6
0
 @Override
 public void filter(
     ContainerRequestContext requestContext, ContainerResponseContext responseContext)
     throws IOException {
   Timer.Context context = (Timer.Context) requestContext.getProperty("metrics.timeContext");
   if (context != null) {
     context.stop();
   }
 }
  private byte[] encodeMessage(Tx tx, Op op) {
    Timer.Context encodingTimer = messageEncodingTimer.time();
    byte[] encodedMessage = messageEncoder.encode(tx, op);
    encodingTimer.stop();

    if (log.isTraceEnabled()) {
      log.trace("Result of message encoding is = " + new String(encodedMessage));
    }

    return encodedMessage;
  }
Esempio n. 8
0
  /** Take message from SQS */
  private List<QueueMessage> take() {

    final Timer.Context timer = this.readTimer.time();

    try {
      return queue.getMessages(MAX_TAKE, AsyncEvent.class);
    } finally {
      // stop our timer
      timer.stop();
    }
  }
Esempio n. 9
0
  private void offerBatch(final List operations) {
    final Timer.Context timer = this.writeTimer.time();

    try {
      // signal to SQS
      this.queue.sendMessages(operations);
    } catch (IOException e) {
      throw new RuntimeException("Unable to queue message", e);
    } finally {
      timer.stop();
    }
  }
Esempio n. 10
0
  private void offerTopic(final Serializable operation) {
    final Timer.Context timer = this.writeTimer.time();

    try {
      // signal to SQS
      this.queue.sendMessageToTopic(operation);
    } catch (IOException e) {
      throw new RuntimeException("Unable to queue message", e);
    } finally {
      timer.stop();
    }
  }
Esempio n. 11
0
 public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
     throws IOException, ServletException {
   final StatusExposingServletResponse wrappedResponse =
       new StatusExposingServletResponse((HttpServletResponse) response);
   activeRequests.inc();
   final Timer.Context context = requestTimer.time();
   try {
     chain.doFilter(request, wrappedResponse);
   } finally {
     context.stop();
     activeRequests.dec();
     markMeterForStatusCode(wrappedResponse.getStatus());
   }
 }
  private Status processOperation(Tx tx, Op op) {
    Timer.Context timer = operationProcessingTimer.time();
    Status status = Status.OK;

    try {
      encodeAndSend(tx, op);
    } catch (RuntimeException re) {
      operationProcessingErrorMeter.mark();
      log.error("Error processing operation: " + op.toString(), re);
      status = Status.ABEND;
    }

    timer.stop();
    return status;
  }
Esempio n. 13
0
  @Test
  public void testTimer() {
    System.out.println("******************************* TIMER *******************************");
    timer = registry.timer("timer");
    try {
      for (int i = 0; i < ITER_COUNT; i++) {
        final Timer.Context context = timer.time();
        Thread.sleep(SLEEP_MS);
        context.stop();
      }

    } catch (InterruptedException ex) {
      Thread.currentThread().interrupt();
    }
  }
Esempio n. 14
0
 /**
  * Check to see if a plaintext input matches a hash
  *
  * @param input the input
  * @param hashed the hash
  * @return <code>true</code> if it matches, <code>false</code> if not
  */
 public static boolean matches(final String input, final String hashed) {
   checkNotNull(hashed, "Cannot compare NULL");
   LOOKUPS.mark();
   final Timer.Context context = GETS.time();
   try {
     boolean result = false;
     try {
       result = CACHE.get(new TwoTuple<>(input, hashed));
     } catch (ExecutionException e) {
       LOGGER.error("Failed to hash input password", e);
     }
     return result;
   } finally {
     context.stop();
   }
 }
Esempio n. 15
0
  /** Ack message in SQS */
  public void ack(final List<QueueMessage> messages) {

    final Timer.Context timer = this.ackTimer.time();

    try {
      queue.commitMessages(messages);

      // decrement our in-flight counter
      inFlight.decrementAndGet();

    } catch (Exception e) {
      throw new RuntimeException("Unable to ack messages", e);
    } finally {
      timer.stop();
    }
  }
 public void printMetrics() {
   logger.info("Metrics");
   Metrics metrics = session.getCluster().getMetrics();
   Gauge<Integer> gauge = metrics.getConnectedToHosts();
   Integer numberOfHosts = gauge.getValue();
   logger.info("Number of hosts: " + numberOfHosts);
   Metrics.Errors errors = metrics.getErrorMetrics();
   Counter counter = errors.getReadTimeouts();
   logger.info("Number of read timeouts:" + counter.getCount());
   com.codahale.metrics.Timer timer = metrics.getRequestsTimer();
   Timer.Context context = timer.time();
   try {
     long numberUserRequests = timer.getCount();
     logger.info("Number of user requests:" + numberUserRequests);
   } finally {
     context.stop();
   }
 }
  @Override
  public ServiceResults postCollection(ServiceContext context) throws Exception {
    logger.info("NotificationService: start request.");
    Timer.Context timer = postTimer.time();
    postMeter.mark();
    try {
      validate(null, context.getPayload());
      Notification.PathTokens pathTokens =
          getPathTokens(context.getRequest().getOriginalParameters());
      context.getProperties().put("state", Notification.State.CREATED);
      context.getProperties().put("pathQuery", pathTokens);
      context.setOwner(sm.getApplication());
      ServiceResults results = super.postCollection(context);
      Notification notification = (Notification) results.getEntity();

      // update Notification properties
      if (notification.getStarted() == null || notification.getStarted() == 0) {
        long now = System.currentTimeMillis();
        notification.setStarted(System.currentTimeMillis());
        Map<String, Object> properties = new HashMap<String, Object>(2);
        properties.put("started", notification.getStarted());
        properties.put("state", notification.getState());
        notification.addProperties(properties);
        logger.info(
            "ApplicationQueueMessage: notification {} properties updated in duration {} ms",
            notification.getUuid(),
            System.currentTimeMillis() - now);
      }

      long now = System.currentTimeMillis();
      notificationQueueManager.queueNotification(notification, null);
      logger.info(
          "NotificationService: notification {} post queue duration {} ms ",
          notification.getUuid(),
          System.currentTimeMillis() - now);
      // future: somehow return 202?
      return results;
    } catch (Exception e) {
      logger.error("serialization failed", e);
      throw e;
    } finally {
      timer.stop();
    }
  }
  @Test
  @SuppressWarnings("squid:S2925")
  public void testTimer() throws Exception {
    final Timer timer = registry.timer(name("foo", "bar"));
    final Timer.Context timerContext = timer.time();
    Thread.sleep(200);
    timerContext.stop();
    reportAndRefresh();

    SearchResponse searchResponse =
        client().prepareSearch(indexWithDate).setTypes("timer").execute().actionGet();
    org.assertj.core.api.Assertions.assertThat(searchResponse.getHits().totalHits()).isEqualTo(1L);

    Map<String, Object> hit = searchResponse.getHits().getAt(0).sourceAsMap();
    assertTimestamp(hit);
    assertKey(hit, "name", prefix + ".foo.bar");
    assertKey(hit, "count", 1);
    assertKey(hit, "host", "localhost");
  }
Esempio n. 19
0
  protected ExecuteResult flushBufferedDocs(DocBuffer b) {
    int numDocsInBatch = b.buffer.size();
    if (numDocsInBatch == 0) {
      b.reset();
      return ExecuteResult.ACK;
    }

    Timer.Context timer = (sendBatchToSolr != null) ? sendBatchToSolr.time() : null;
    try {
      sendBatchToSolr(b);
    } finally {
      if (timer != null) timer.stop();

      if (indexedCounter != null) indexedCounter.inc(numDocsInBatch);

      b.reset();
    }

    return ExecuteResult.ACK;
  }
  public List<QueueEvent> peekTopN(int n, Predicate<String> excludeSet, long waitMillis)
      throws KeeperException, InterruptedException {
    ArrayList<QueueEvent> topN = new ArrayList<>();

    LOG.debug("Peeking for top {} elements. ExcludeSet: {}", n, excludeSet);
    Timer.Context time;
    if (waitMillis == Long.MAX_VALUE) time = stats.time(dir + "_peekTopN_wait_forever");
    else time = stats.time(dir + "_peekTopN_wait" + waitMillis);

    try {
      for (Pair<String, byte[]> element :
          peekElements(n, waitMillis, child -> !excludeSet.test(dir + "/" + child))) {
        topN.add(new QueueEvent(dir + "/" + element.first(), element.second(), null));
      }
      printQueueEventsListElementIds(topN);
      return topN;
    } finally {
      time.stop();
    }
  }
 /** Remove the event and save the response into the other path. */
 public void remove(QueueEvent event) throws KeeperException, InterruptedException {
   Timer.Context time = stats.time(dir + "_remove_event");
   try {
     String path = event.getId();
     String responsePath = dir + "/" + response_prefix + path.substring(path.lastIndexOf("-") + 1);
     if (zookeeper.exists(responsePath, true)) {
       zookeeper.setData(responsePath, event.getBytes(), true);
     } else {
       LOG.info(
           "Response ZK path: "
               + responsePath
               + " doesn't exist."
               + "  Requestor may have disconnected from ZooKeeper");
     }
     try {
       zookeeper.delete(path, -1, true);
     } catch (KeeperException.NoNodeException ignored) {
     }
   } finally {
     time.stop();
   }
 }
Esempio n. 22
0
 @Override
 public final AttributeSet resolve(ResolverContext context) throws Exception {
   checkArgument(context.getDescriptor().getId().equals(descriptor.getId()));
   if (log.isDebugEnabled()) {
     log.debug(
         "Retrieving attributes via resolver id=\"{}\" name=\"{}\"",
         descriptor.getId(),
         descriptor.getName());
   }
   Timer.Context timerCtx = timer.time();
   try {
     return AttributeSet.builder(descriptor)
         .attributes(doResolve(context))
         .ticker(context.getTicker())
         .build();
   } catch (Exception e) {
     if (log.isDebugEnabled()) {
       log.debug(e.getMessage(), e);
     }
     throw e;
   } finally {
     histogram.update(timerCtx.stop());
   }
 }
Esempio n. 23
0
  private ExtensionResponse tryExecuteGremlinScript(
      final RexsterResourceContext rexsterResourceContext,
      final Graph graph,
      final Vertex vertex,
      final Edge edge,
      final String script) {

    final MetricRegistry metricRegistry = rexsterResourceContext.getMetricRegistry();
    final Timer scriptTimer = metricRegistry.timer(MetricRegistry.name("http", "script-engine"));
    final Counter successfulExecutions =
        metricRegistry.counter(MetricRegistry.name("http", "script-engine", "success"));
    final Counter failedExecutions =
        metricRegistry.counter(MetricRegistry.name("http", "script-engine", "fail"));

    ExtensionResponse extensionResponse;

    final JSONObject requestObject = rexsterResourceContext.getRequestObject();

    // can't initialize this statically because the configure() method won't get called before it.
    // need to think a bit on how to best initialized the controller.
    final EngineController engineController = EngineController.getInstance();

    final boolean showTypes = RequestObjectHelper.getShowTypes(requestObject);
    final long offsetStart = RequestObjectHelper.getStartOffset(requestObject);
    final long offsetEnd = RequestObjectHelper.getEndOffset(requestObject);

    final GraphSONMode mode = showTypes ? GraphSONMode.EXTENDED : GraphSONMode.NORMAL;
    final Set<String> returnKeys = RequestObjectHelper.getReturnKeys(requestObject, WILDCARD);

    final String languageToExecuteWith = getLanguageToExecuteWith(requestObject);
    final EngineHolder engineHolder;
    final ScriptEngine scriptEngine;
    try {
      if (!engineController.isEngineAvailable(languageToExecuteWith)) {
        return ExtensionResponse.error("language requested is not available on the server");
      }

      engineHolder = engineController.getEngineByLanguageName(languageToExecuteWith);
      scriptEngine = engineHolder.getEngine();
    } catch (ScriptException se) {
      return ExtensionResponse.error("could not get request script engine");
    }

    final Bindings bindings = createBindings(graph, vertex, edge, scriptEngine);

    // add all keys not defined by this request as bindings to the script engine
    placeParametersOnBinding(requestObject, bindings, showTypes);

    // get the list of "stored procedures" to run
    final RexsterApplicationGraph rag = rexsterResourceContext.getRexsterApplicationGraph();

    final ExtensionMethod extensionMethod = rexsterResourceContext.getExtensionMethod();
    Map configurationMap = null;

    Iterator<String> scriptsToRun = null;
    try {
      final ExtensionConfiguration extensionConfiguration =
          rag != null ? rag.findExtensionConfiguration(EXTENSION_NAMESPACE, EXTENSION_NAME) : null;
      if (extensionConfiguration != null) {
        configurationMap = extensionConfiguration.tryGetMapFromConfiguration();
        scriptsToRun = getScriptsToRun(requestObject, configurationMap);
      }
    } catch (IOException ioe) {
      return ExtensionResponse.error(
          ioe, generateErrorJson(extensionMethod.getExtensionApiAsJson()));
    }

    if ((script == null || script.isEmpty()) && scriptsToRun == null) {
      return ExtensionResponse.badRequest(
          "no scripts provided", generateErrorJson(extensionMethod.getExtensionApiAsJson()));
    }

    final Timer.Context context = scriptTimer.time();
    try {
      // result is either the ad-hoc script on the query string or the last "stored procedure"
      Object result = null;
      if (scriptsToRun != null) {
        while (scriptsToRun.hasNext()) {
          result = engineHolder.getEngine().eval(scriptsToRun.next(), bindings);
        }
      }

      if (isClientScriptAllowed(configurationMap) && script != null && !script.isEmpty()) {
        result = engineHolder.getEngine().eval(script, bindings);
      }

      final JSONArray results =
          new JSONResultConverter(mode, offsetStart, offsetEnd, returnKeys).convert(result);

      final HashMap<String, Object> resultMap = new HashMap<String, Object>();
      resultMap.put(Tokens.SUCCESS, true);
      resultMap.put(Tokens.RESULTS, results);

      final JSONObject resultObject = new JSONObject(resultMap);
      extensionResponse = ExtensionResponse.ok(resultObject);

      successfulExecutions.inc();

    } catch (Exception e) {
      logger.error(String.format("Gremlin Extension: %s", e.getMessage()), e);
      extensionResponse =
          ExtensionResponse.error(e, generateErrorJson(extensionMethod.getExtensionApiAsJson()));

      failedExecutions.inc();
    } finally {
      context.stop();
    }

    return extensionResponse;
  }
  @Override
  public void handle(SchedulerEvent schedulerEvent) {
    // metrics off
    if (!metricsON) {
      scheduler.handle(schedulerEvent);
      return;
    }
    if (!running) running = true;

    // metrics on
    Timer.Context handlerTimer = null;
    Timer.Context operationTimer = null;

    NodeUpdateSchedulerEventWrapper eventWrapper;
    try {
      // if (schedulerEvent instanceof NodeUpdateSchedulerEvent) {
      if (schedulerEvent.getType() == SchedulerEventType.NODE_UPDATE
          && schedulerEvent instanceof NodeUpdateSchedulerEvent) {
        eventWrapper =
            new NodeUpdateSchedulerEventWrapper((NodeUpdateSchedulerEvent) schedulerEvent);
        schedulerEvent = eventWrapper;
        updateQueueWithNodeUpdate(eventWrapper);
      } else if (schedulerEvent.getType() == SchedulerEventType.APP_ATTEMPT_REMOVED
          && schedulerEvent instanceof AppAttemptRemovedSchedulerEvent) {
        // check if having AM Container, update resource usage information
        AppAttemptRemovedSchedulerEvent appRemoveEvent =
            (AppAttemptRemovedSchedulerEvent) schedulerEvent;
        ApplicationAttemptId appAttemptId = appRemoveEvent.getApplicationAttemptID();
        String queue = appQueueMap.get(appAttemptId.getApplicationId());
        SchedulerAppReport app = scheduler.getSchedulerAppInfo(appAttemptId);
        if (!app.getLiveContainers().isEmpty()) { // have 0 or 1
          // should have one container which is AM container
          RMContainer rmc = app.getLiveContainers().iterator().next();
          updateQueueMetrics(
              queue,
              rmc.getContainer().getResource().getMemory(),
              rmc.getContainer().getResource().getVirtualCores());
        }
      }

      handlerTimer = schedulerHandleTimer.time();
      operationTimer = schedulerHandleTimerMap.get(schedulerEvent.getType()).time();

      scheduler.handle(schedulerEvent);
    } finally {
      if (handlerTimer != null) handlerTimer.stop();
      if (operationTimer != null) operationTimer.stop();
      schedulerHandleCounter.inc();
      schedulerHandleCounterMap.get(schedulerEvent.getType()).inc();

      if (schedulerEvent.getType() == SchedulerEventType.APP_REMOVED
          && schedulerEvent instanceof AppRemovedSchedulerEvent) {
        SLSRunner.decreaseRemainingApps();
        AppRemovedSchedulerEvent appRemoveEvent = (AppRemovedSchedulerEvent) schedulerEvent;
        appQueueMap.remove(appRemoveEvent.getApplicationID());
      } else if (schedulerEvent.getType() == SchedulerEventType.APP_ADDED
          && schedulerEvent instanceof AppAddedSchedulerEvent) {
        AppAddedSchedulerEvent appAddEvent = (AppAddedSchedulerEvent) schedulerEvent;
        String queueName = appAddEvent.getQueue();
        appQueueMap.put(appAddEvent.getApplicationId(), queueName);
      }
    }
  }
  @Override
  public void handle(ChannelHandlerContext ctx, HttpRequest request) {

    Tracker.getInstance().track(request);

    final String tenantId = request.getHeader("tenantId");

    if (!(request instanceof HTTPRequestWithDecodedQueryParams)) {
      sendResponse(
          ctx, request, "Missing query params: from, to, points", HttpResponseStatus.BAD_REQUEST);
      return;
    }

    final String body = request.getContent().toString(Constants.DEFAULT_CHARSET);

    if (body == null || body.isEmpty()) {
      sendResponse(
          ctx,
          request,
          "Invalid body. Expected JSON array of metrics.",
          HttpResponseStatus.BAD_REQUEST);
      return;
    }

    List<String> locators = new ArrayList<String>();
    try {
      locators.addAll(getLocatorsFromJSONBody(tenantId, body));
    } catch (Exception ex) {
      log.debug(ex.getMessage(), ex);
      sendResponse(ctx, request, ex.getMessage(), HttpResponseStatus.BAD_REQUEST);
      return;
    }

    if (locators.size() > maxMetricsPerRequest) {
      sendResponse(
          ctx,
          request,
          "Too many metrics fetch in a single call. Max limit is " + maxMetricsPerRequest + ".",
          HttpResponseStatus.BAD_REQUEST);
      return;
    }

    HTTPRequestWithDecodedQueryParams requestWithParams =
        (HTTPRequestWithDecodedQueryParams) request;
    final Timer.Context httpBatchMetricsFetchTimerContext = httpBatchMetricsFetchTimer.time();
    try {
      RollupsQueryParams params = PlotRequestParser.parseParams(requestWithParams.getQueryParams());
      Map<Locator, MetricData> results =
          getRollupByGranularity(
              tenantId,
              locators,
              params.getRange().getStart(),
              params.getRange().getStop(),
              params.getGranularity(tenantId));
      JSONObject metrics = serializer.transformRollupData(results, params.getStats());
      final JsonElement element = parser.parse(metrics.toString());
      final String jsonStringRep = gson.toJson(element);
      sendResponse(ctx, request, jsonStringRep, HttpResponseStatus.OK);
    } catch (InvalidRequestException e) {
      log.debug(e.getMessage());
      sendResponse(ctx, request, e.getMessage(), HttpResponseStatus.BAD_REQUEST);
    } catch (SerializationException e) {
      log.debug(e.getMessage(), e);
      sendResponse(ctx, request, e.getMessage(), HttpResponseStatus.INTERNAL_SERVER_ERROR);
    } catch (Exception e) {
      log.error(e.getMessage(), e);
      sendResponse(ctx, request, e.getMessage(), HttpResponseStatus.INTERNAL_SERVER_ERROR);
    } finally {
      httpBatchMetricsFetchTimerContext.stop();
    }
  }
  @Override
  public void run() {
    int operationIndex = -1;

    try (JBossASClient client = new JBossASClient(mccFactory.createClient())) {

      for (ModelNode operation : this.operations) {
        operationIndex++; // move to the one we are working on - this is important in the catch
                          // block

        // execute request
        final Timer.Context requestContext = diagnostics.getDMRRequestTimer().time();
        final ModelNode response = client.execute(operation);
        final long durationNanos = requestContext.stop();
        final long durationMs = TimeUnit.MILLISECONDS.convert(durationNanos, TimeUnit.NANOSECONDS);

        final AvailDMRTask task = (AvailDMRTask) group.getTask(operationIndex);

        if (JBossASClient.isSuccess(response)) {

          if (durationMs > group.getInterval().millis()) {
            diagnostics.getDMRDelayedRate().mark(1);
          }

          // deconstruct model node
          Avail avail;
          if (task.getAttribute() == null) {
            // step operation didn't read any attribute, it just read the resource to see if it
            // exists
            avail = Avail.UP;
          } else {
            // operation read attribute; need to see what avail that attrib value corresponds to
            final ModelNode result = JBossASClient.getResults(response);
            if (result.getType() != ModelType.UNDEFINED) {
              if (result.getType() == ModelType.LIST) {
                // a avail request that asked to aggregate avail across potentially multiple
                // resources
                LOG.tracef("Task [%s] resulted in aggregated avail: %s", task, result);
                Avail aggregate = null;
                List<ModelNode> listNodes = result.asList();
                for (ModelNode listNode : listNodes) {
                  if (JBossASClient.isSuccess(listNode)) {
                    avail = getAvailFromResponse(listNode, task);
                    // If we don't know the avail yet, set it to the first avail result we get.
                    // Otherwise, if the aggregate is down, it stays down (we don't have the
                    // concept of MIXED). The aggregate stays as it was unless the new avail
                    // is down in which case aggregate goes down.
                    if (aggregate == null) {
                      aggregate = avail;
                    } else {
                      aggregate = (avail == Avail.DOWN) ? Avail.DOWN : aggregate;
                    }
                  } else {
                    // a resource failed to report avail but keep going and aggregate the others
                    this.diagnostics.getDMRErrorRate().mark(1);
                    LOG.debugf(
                        "Failed to fully aggregate avail for task [%s]: %s ", task, listNode);
                  }
                }
                completionHandler.onCompleted(new AvailDataPoint(task, aggregate));
              } else {
                avail = getAvailFromResponse(response, task);
                completionHandler.onCompleted(new AvailDataPoint(task, avail));
              }
            }
          }

        } else {
          if (task.getAttribute() == null) {
            // operation didn't read any attribute, it just read the resource to see if it exists
            completionHandler.onCompleted(new AvailDataPoint(task, Avail.DOWN));
          } else {
            this.diagnostics.getDMRErrorRate().mark(1);
            String err = JBossASClient.getFailureDescription(response);
            completionHandler.onFailed(new RuntimeException(err));

            // we are going to artifically mark the availabilities UNKNOWN since we really don't
            // know
            completionHandler.onCompleted(new AvailDataPoint(task, Avail.UNKNOWN));
          }
        }
      }
    } catch (Throwable e) {
      this.diagnostics.getDMRErrorRate().mark(1);
      completionHandler.onFailed(e);
      // we are going to artifically mark the availabilities UNKNOWN since we really don't know
      // only mark the ones we didn't get to yet and the one we are currently worked on
      for (int i = operationIndex; i < this.group.size(); i++) {
        Task task = this.group.getTask(i);
        completionHandler.onCompleted(new AvailDataPoint(task, Avail.UNKNOWN));
      }
    }
  }