Exemplo n.º 1
0
  @Override
  public Future<Long> write(ByteBuffer src, Callback<Long> callback) {
    long writeProcessingStartTime = System.currentTimeMillis();
    if (!responseMetadataWritten.get()) {
      maybeWriteResponseMetadata(responseMetadata, responseMetadataWriteListener);
    }
    Chunk chunk = new Chunk(src, callback);
    chunksToWriteCount.incrementAndGet();
    chunksToWrite.add(chunk);
    if (!isOpen()) {
      // the isOpen() check is not before addition to the queue because chunks need to be
      // acknowledged in the order
      // they were received. If we don't add it to the queue and clean up, chunks may be
      // acknowledged out of order.
      logger.debug("Scheduling a chunk cleanup on channel {}", ctx.channel());
      writeFuture.addListener(cleanupCallback);
    } else {
      chunkedWriteHandler.resumeTransfer();
    }

    long writeProcessingTime = System.currentTimeMillis() - writeProcessingStartTime;
    nettyMetrics.writeProcessingTimeInMs.update(writeProcessingTime);
    if (request != null) {
      request
          .getMetricsTracker()
          .nioMetricsTracker
          .addToResponseProcessingTime(writeProcessingTime);
    }
    return chunk.future;
  }
Exemplo n.º 2
0
 /**
  * Completes the request by closing the request and network channel (if {@code
  * closeNetworkChannel} is {@code true}). May also close the channel if the class internally is
  * forcing a close (i.e. if {@link #close()} is called.
  *
  * @param closeNetworkChannel network channel is closed if {@code true}.
  */
 private void completeRequest(boolean closeNetworkChannel) {
   if ((closeNetworkChannel || forceClose) && ctx.channel().isOpen()) {
     writeFuture.addListener(ChannelFutureListener.CLOSE);
     logger.trace("Requested closing of channel {}", ctx.channel());
   }
   closeRequest();
   responseComplete = true;
 }
Exemplo n.º 3
0
 /**
  * Performs a query search, writes the results to a data channel. This function does not break the
  * execution of the query if the client channel gets closed.
  *
  * @param query A Query object that contains the path or paths of the root query.
  * @param result A DataChannelOutput to which the result will be written. In practice, this will
  *     be the head of a QueryOpProcessor that represents the first operator in a query, which in
  *     turn sends its output to another QueryOpProcessor and the last will send its output to a
  *     DataChannelOutput sending bytes back to meshy, usually defined at the MQSource side of
  *     code.
  * @param queryPromise A wrapper for a boolean flag that gets set to true by MQSource in case the
  *     user cancels the query at the MQMaster side.
  */
 public void search(Query query, DataChannelOutput result, ChannelProgressivePromise queryPromise)
     throws QueryException {
   for (QueryElement[] path : query.getQueryPaths()) {
     if (!(queryPromise.isDone())) {
       search(path, result, queryPromise);
     }
   }
 }
Exemplo n.º 4
0
 @Override
 public void onResponseComplete(Exception exception) {
   long responseCompleteStartTime = System.currentTimeMillis();
   try {
     if (responseCompleteCalled.compareAndSet(false, true)) {
       logger.trace("Finished responding to current request on channel {}", ctx.channel());
       nettyMetrics.requestCompletionRate.mark();
       if (exception == null) {
         if (!maybeWriteResponseMetadata(responseMetadata, responseMetadataWriteListener)) {
           // There were other writes. Let ChunkedWriteHandler finish if it has been kicked off.
           chunkedWriteHandler.resumeTransfer();
         }
       } else {
         log(exception);
         if (request != null) {
           request.getMetricsTracker().markFailure();
         }
         // need to set writeFuture as failed in case writes have started or chunks have been
         // queued.
         if (!writeFuture.isDone()) {
           writeFuture.setFailure(exception);
         }
         if (!maybeSendErrorResponse(exception)) {
           completeRequest(true);
         }
       }
     } else if (exception != null) {
       // this is probably an attempt to force close the channel *after* the response is already
       // complete.
       log(exception);
       if (!writeFuture.isDone()) {
         writeFuture.setFailure(exception);
       }
       completeRequest(true);
     }
     long responseFinishProcessingTime = System.currentTimeMillis() - responseCompleteStartTime;
     nettyMetrics.responseFinishProcessingTimeInMs.update(responseFinishProcessingTime);
     if (request != null) {
       request
           .getMetricsTracker()
           .nioMetricsTracker
           .addToResponseProcessingTime(responseFinishProcessingTime);
     }
   } catch (Exception e) {
     logger.error("Swallowing exception encountered during onResponseComplete tasks", e);
     nettyMetrics.responseCompleteTasksError.inc();
     if (!writeFuture.isDone()) {
       writeFuture.setFailure(exception);
     }
     completeRequest(true);
   }
 }
Exemplo n.º 5
0
  /** see above. */
  private void tableSearch(
      LinkedList<DataTreeNode> stack,
      FieldValueList prefix,
      QueryElement[] path,
      int pathIndex,
      DataChannelOutput sink,
      int collect,
      ChannelProgressivePromise queryPromise)
      throws QueryException {
    if (queryPromise.isDone()) {
      log.debug("Query promise completed during processing");
      if (queryPromise.isCancelled()) {
        throw (CancellationException) queryPromise.cause();
      }
      throw new QueryException("Query closed during processing");
    }

    DataTreeNode root = stack != null ? stack.peek() : null;
    if (log.isDebugEnabled()) {
      log.debug(
          "root={} pre={} path={} idx={} res={} coll={}",
          root,
          prefix,
          Arrays.toString(path),
          pathIndex,
          sink,
          collect);
    }

    if (Thread.currentThread().isInterrupted()) {
      QueryException exception = new QueryException("query interrupted");
      log.warn("Query closed due to thread interruption:\n", exception);
      throw exception;
    }
    if (pathIndex >= path.length) {
      log.debug("pathIndex>path.length, return root={}", root);
      if (!queryPromise.isDone()) {
        sink.send(prefix.createBundle(sink));
      }
      return;
    }
    QueryElement next = path[pathIndex];
    Iterator<DataTreeNode> iter =
        root != null
            ? next.matchNodes(tree, stack)
            : next.emptyok() ? Iterators.<DataTreeNode>emptyIterator() : null;
    if (iter == null) {
      return;
    }
    try {
      int skip = next.skip();
      int limit = next.limit();
      if (next.flatten()) {
        int count = 0;
        while (iter.hasNext() && (next.limit() == 0 || limit > 0)) {
          // Check for interruptions or cancellations
          if (Thread.currentThread().isInterrupted()) {
            QueryException exception = new QueryException("query interrupted");
            log.warn("Query closed due to thread interruption:\n", exception);
            throw exception;
          }
          if (queryPromise.isDone()) {
            if (iter instanceof ClosableIterator) {
              ((ClosableIterator<DataTreeNode>) iter).close();
            }

            log.debug("Query promise completed during processing. root={}", root);
            if (queryPromise.isCancelled()) {
              throw (CancellationException) queryPromise.cause();
            }
            throw new QueryException("Query closed during processing, root=" + root);
          }

          DataTreeNode tn = iter.next();
          if (tn == null && !next.emptyok()) {
            break;
          }
          if (skip > 0) {
            skip--;
            continue;
          }
          int updates = next.update(prefix, tn);
          if (updates > 0) {
            count += updates;
          }
          limit--;
        }
        if (!queryPromise.isDone()) {
          tableSearch(null, prefix, path, pathIndex + 1, sink, collect + count, queryPromise);
        }
        prefix.pop(count);
        return;
      }
      while (iter.hasNext() && (next.limit() == 0 || limit > 0)) {
        // Check for interruptions or cancellations
        if (Thread.currentThread().isInterrupted()) {
          QueryException exception = new QueryException("query interrupted");
          log.warn("Query closed due to thread interruption", exception);
          throw exception;
        }
        if (queryPromise.isDone()) {
          break;
        }
        if (queryPromise.isDone()) {
          if (iter instanceof ClosableIterator) {
            ((ClosableIterator<DataTreeNode>) iter).close();
          }

          log.debug("Query promise completed during processing. root={}", root);
          if (queryPromise.isCancelled()) {
            throw (CancellationException) queryPromise.cause();
          }
          throw new QueryException("Query closed during processing, root=" + root);
        }

        DataTreeNode tn = iter.next();
        if (tn == null && !next.emptyok()) {
          return;
        }
        if (skip > 0) {
          skip--;
          continue;
        }
        int count = next.update(prefix, tn);
        if (count >= 0) {
          if (!queryPromise.isDone()) {
            tableSearch(
                stack, tn, prefix, path, pathIndex + 1, sink, collect + count, queryPromise);
          }
          prefix.pop(count);
          limit--;
        }
      }
    } finally {
      if (log.isDebugEnabled()) {
        log.debug(
            "CLOSING: root={} pre={} path={} idx={} res={} coll={}",
            root,
            prefix,
            Arrays.toString(path),
            pathIndex,
            sink,
            collect);
      }

      if (iter instanceof ClosableIterator) {
        ((ClosableIterator<DataTreeNode>) iter).close();
      }
    }
  }