protected void checkExceptionHandling(
      final LinkedBlockingQueue<Throwable> unexpectedExceptions,
      ExecutorTaskAgent agent,
      Future<?> future,
      Class<? extends Exception> expectedKlass,
      boolean isExpected)
      throws InterruptedException {

    try {
      future.get();
    } catch (ExecutionException ce) {
      assertTrue(expectedKlass.isInstance(ce.getCause()));
      // ok
    }
    agent.awaitPendingTasks();

    if (expectedKlass == null || isExpected) {
      assertTrue(unexpectedExceptions.size() == 0);
      return;
    } else {
      assertTrue(unexpectedExceptions.size() == 1);
      Throwable removed = unexpectedExceptions.remove();
      assertTrue(expectedKlass.isInstance(removed));
    }
  }
  @Test
  public void testScenarios_publications() throws Exception {

    InMemoryClient client = new InMemoryClient(new InMemoryRoutingContext(), RecipientKind.DM);
    LinkedBlockingQueue<Message> queue = new LinkedBlockingQueue<>();
    client.setMessageQueue(queue);

    MessagingContext ctx = new MessagingContext(RecipientKind.AGENTS, "domain", "app");

    // Not connected, publications cannot work
    Message msg = new MsgCmdAddInstance(new Instance(""));

    Assert.assertFalse(client.isConnected());
    Assert.assertEquals(0, queue.size());
    client.publish(ctx, msg);
    Assert.assertEquals(0, queue.size());

    // Connection
    client.openConnection();
    Assert.assertTrue(client.isConnected());
    client.publish(ctx, msg);
    Assert.assertEquals(0, queue.size());

    // We need to subscribe to this context to dispatch the message.
    // The messaging tests verify routing more precisely.
    client.subscribe(ctx);
    client.publish(ctx, msg);

    Assert.assertEquals(1, queue.size());
    Assert.assertEquals(msg, queue.element());
  }
 @Test
 public void testAddNew() throws Exception {
   RpcStub handler = makeHandler();
   MultiplexPoolHandler han = new MultiplexPoolHandler(Collections.singleton(handler), 2, 3);
   han.setWaitTimeout(1);
   travelPool(han);
   ArrayList<RpcStub> list = new ArrayList<RpcStub>();
   list.add(makeHandler());
   list.add(makeHandler());
   list.add(makeHandler());
   han.addNew(list);
   travelPool(han);
   ArrayList<RpcStub> backupHandlers = FieldUtil.getValue(han, "backupHandlers");
   assertEquals(4, backupHandlers.size());
   LinkedBlockingQueue<RpcStub> readyQueue = FieldUtil.getValue(han, "readyQueue");
   assertEquals(4, readyQueue.size());
   RpcStub h1 = han.take();
   RpcStub h2 = han.take();
   RpcStub h3 = han.take();
   RpcStub h4 = han.take();
   travelPool(han);
   han.offer(h1);
   han.offer(h2);
   han.offer(h3);
   han.offer(h4);
   travelPool(han);
   ArrayList<RpcStub> list2 = new ArrayList<RpcStub>();
   list2.add(makeHandler());
   han.addNew(list2);
   assertEquals(5, backupHandlers.size());
   assertEquals(10, readyQueue.size());
   han.destroy();
 }
  @Override
  public void tempFileAnalysis(
      LinkedBlockingQueue<String> urlQ, LinkedBlockingQueue<DataModle> dmQ) {
    logger.info("now useful URL queue size:" + urlQ.size());

    DataModle dm = null;
    String url = "";
    try {
      url = urlQ.take();
    } catch (InterruptedException e2) {
      logger.warn("url queue is empty,can not take a dm");
      return;
    }

    try {
      dm = elementFilter(url);
    } catch (IOException e1) {
      logger.error("can not generate a dm");
      return;
    }

    try {
      dmQ.put(dm);
    } catch (InterruptedException e) {
      logger.warn("unfinishDmQ is full,can not put a dm");
    }
  }
  @Test
  @Transactional
  @Rollback
  public void verify() throws Exception {
    final LinkedBlockingQueue<HttpRequest> requests = new LinkedBlockingQueue<HttpRequest>();

    server.register(
        "/*",
        new HttpRequestHandler() {
          @Override
          public void handle(HttpRequest request, HttpResponse response, HttpContext context)
              throws HttpException, IOException {
            requests.add(request);

            response.setEntity(
                new StringEntity(
                    getQueryParamValue(request.getRequestLine().getUri(), "hub.challenge")));
          }
        });

    subscriber.verify(SubscriptionMode.SUBSCRIBE);

    Assert.assertEquals(1, requests.size());

    HttpRequest actualRequest = requests.poll();
    String requestUri = actualRequest.getRequestLine().getUri();
    Assert.assertEquals("subscribe", getQueryParamValue(requestUri, "hub.mode"));
    Assert.assertEquals(
        subscriber.getTopic().toString(),
        URLDecoder.decode(getQueryParamValue(requestUri, "hub.topic"), "UTF-8"));
    Assert.assertNotNull(getQueryParamValue(requestUri, "hub.challenge"));
    Assert.assertEquals("123", getQueryParamValue(requestUri, "hub.lease_seconds"));
    Assert.assertEquals(
        subscriber.getVerifyToken(), getQueryParamValue(requestUri, "hub.verify_token"));
  }
  @Override
  public String nextSentence() {
    if (buffer.size() < linesToFetch) {
      // prefetch
      if (currentReader != null) {
        fetchLines(linesToFetch);
      } else if (this.iterator.hasNext()) {
        currentReader = new BufferedReader(new InputStreamReader(iterator.nextDocument()));
        fetchLines(linesToFetch);
      }
    }

    // actually its the same. You get string or you get null as result of poll, if buffer is empty
    // after prefetch try
    if (buffer.size() < 1) return null;
    else return buffer.poll();
  }
  /** @see de.willuhn.jameica.messaging.MessagingQueue#flush() */
  public void flush() {
    if (pool.isTerminated()) return;

    try {
      while (messages != null && messages.size() > 0) Thread.sleep(5);
    } catch (Exception e) {
      Logger.error("unable to flush queue", e);
    }
  }
  /**
   * Called in case of emergency after deserialization. Deserialization doesn't save listeners
   * because they are transient because listeners are mostly windows which cannot be serialized.
   * After Deserialization we have to find instances of classes implementing IChangesListener and
   * reconstruct listeners list once again to get everything working smooth.
   */
  public synchronized void recoverChangesListeners() {
    if (listeners == null || listeners.size() < 2) {
      listeners = new java.util.concurrent.LinkedBlockingQueue<IChangesListener>();

      listeners.add(VehicleInspector.getInstance());
      listeners.add(BuildingInspector.getInstance());
      listeners.add(DrawPanel.getInstance());
    }
  }
 @Override
 public String toString() {
   StringBuilder sb = new StringBuilder();
   sb.append("LearnerHandler ").append(sock);
   sb.append(" tickOfLastAck:").append(tickOfLastAck());
   sb.append(" synced?:").append(synced());
   sb.append(" queuedPacketLength:").append(queuedPackets.size());
   return sb.toString();
 }
 public synchronized void debug() {
   GnuBackgammon.out.println("===> EVENTI IN CODA: " + queue.size());
   GnuBackgammon.out.print("   ===> ");
   Iterator<Evt> itr = queue.iterator();
   while (itr.hasNext()) {
     Evt element = itr.next();
     GnuBackgammon.out.print("  " + element.e);
   }
   GnuBackgammon.out.println(" ");
 }
  public synchronized void flushBuffer() {

    int size = sendingQueue.size();

    if (size > 0) {
      logger.info(
          "Found "
              + size
              + " Messages waiting  in Buffer for Connection "
              + address.getIPv4Address());
      while (sendingQueue.size() > 0) {
        try {
          PCEPMessage message = sendingQueue.take();
          sendMessageToPeer(message, ModuleEnum.NETWORK_MODULE);
        } catch (InterruptedException e) {
          e.printStackTrace();
        }
      }
    }
  }
 @Test
 public void testAddMultiplex() throws Exception {
   RpcStub handler = makeHandler(0);
   MultiplexPoolHandler han = new MultiplexPoolHandler(Collections.singleton(handler), 2, 3);
   han.addMultiplex(); // 2
   han.addMultiplex(); // 3
   han.addMultiplex(); // 3, return immediately
   Integer currentMultiplex = FieldUtil.getValue(han, "currentMultiplex");
   assertEquals(3, currentMultiplex.intValue());
   LinkedBlockingQueue<RpcStub> readyQueue = FieldUtil.getValue(han, "readyQueue");
   assertEquals(1, readyQueue.size());
 }
  /**
   * Wait for a number of messages.
   *
   * @param eventQueue event queue
   * @param numMessages number of messages
   */
  private void waitFor(final LinkedBlockingQueue<Exchange> eventQueue, final int numMessages) {
    long start = System.currentTimeMillis();

    while (System.currentTimeMillis() < start + _waitTimeout) {
      if (eventQueue.size() >= numMessages) {
        return;
      }
      sleep();
    }

    TestCase.fail("Timed out waiting on event queue length to be " + numMessages + " or greater.");
  }
 private void consumeRemaining() {
   int counter = 0;
   while (queue.size() != 0) {
     try {
       consumeInternal();
       counter++;
     } catch (InterruptedException e) {
       log.error("[{}] Error occurred", queueName, e);
     }
   }
   log.info("[{}] Finished processing of all {} remaining item(s)", queueName, counter);
 }
Exemple #15
0
 @Override
 public void execute(Runnable command) {
   final int size = tasks.size();
   if (size == WARNING_THRESHOLD) {
     log.warn(
         "User thread has {} pending tasks, memory exhaustion may occur.\n"
             + "If you see this message, check your memory consumption and see if it's problematic or excessively spikey.\n"
             + "If it is, check for deadlocked or slow event handlers. If it isn't, try adjusting the constant \n"
             + "Threading.UserThread.WARNING_THRESHOLD upwards until it's a suitable level for your app, or Integer.MAX_VALUE to disable.",
         size);
   }
   Uninterruptibles.putUninterruptibly(tasks, command);
 }
  @Test
  public void testOffer() throws Exception {
    RpcStub handler1 = makeHandler(2);
    RpcStub handler2 = makeHandler(0);

    MultiplexPoolHandler han = new MultiplexPoolHandler(Collections.singleton(handler1), 2, 2);
    han.offer(handler1);
    han.offer(handler2);
    LinkedBlockingQueue<RpcStub> readyQueue = FieldUtil.getValue(han, "readyQueue");
    assertEquals(2, readyQueue.size());
    assertEquals(handler1, readyQueue.take());
    assertEquals(handler1, readyQueue.take());
  }
  /**
   * This method generate the input stream and load it into stream buffer. Before loading the stream
   * tuples into hash table it check the status of queue if it is already full remove the oldest
   * tuples from hash table along with their pointer addresses from Queue. Finally it loads the new
   * scanned tuples into hash table with their pointer address in queue.
   *
   * @param ht
   */
  public void inputStream() {
    long startTime = 0, endTime = 0, CA_per_Iteration = 0;
    ;
    int w[] = new int[STREAM_BUFFER];

    if (round2) {
      removeExpireTuples();
    }
    while (streamBuffer.size() < STREAM_BUFFER) ;
    for (int i = 0; i < STREAM_BUFFER; i++) {
      startTime = System.nanoTime();
      set(w, streamBuffer.peek().attr1, i);
      mhm.put(new Integer(streamBuffer.peek().attr1), streamBuffer.poll());
      endTime = System.nanoTime();
      if (measurementStart) {
        CA_per_Iteration += endTime - startTime;
      }
    }
    System.out.println("Available stream size After: " + streamBuffer.size());
    abq.offer(w);
    if (measurementStart) {
      CA[CA_index++] = CA_per_Iteration / STREAM_BUFFER;
    }
  }
 @Override
 public Stream<V> stream() {
   return StreamSupport.stream(
       new Spliterators.AbstractSpliterator<V>(
           queue.size(), NONNULL | ORDERED | SIZED | SUBSIZED | DISTINCT | IMMUTABLE) {
         @Override
         public boolean tryAdvance(Consumer<? super V> action) {
           V v = read();
           if (v == null) {
             return false;
           }
           action.accept(v);
           return true;
         }
       },
       false);
 }
  public void testExceptionHandling$() throws Exception {
    final LinkedBlockingQueue<Throwable> unexpectedExceptions = new LinkedBlockingQueue<>();

    ExecutorTaskAgent agent =
        new ExecutorTaskAgent("testExceptionHandling") {
          @Override
          protected void handleUnexpectedException(Throwable throwable) {
            if (throwable != null) {
              unexpectedExceptions.add(throwable);
            }
          }
        };
    Future<?> future;

    Runnable npeRunnable =
        new Runnable() {
          @Override
          public void run() {
            throw new RuntimeException(); // a RuntimeException, representing an internal error
          }
        };
    Callable<String> normalTask =
        new Callable<String>() {
          @Override
          public String call() throws Exception {
            throw new IOException("Some expected exception");
          }
        };

    future = agent.submit(npeRunnable);

    try {
      future.cancel(true);
      future.get();
    } catch (CancellationException ce) {
      // ok
    }
    agent.awaitPendingTasks();
    assertTrue(unexpectedExceptions.size() == 0);

    checkExceptionHandling(
        unexpectedExceptions, agent, agent.submit(npeRunnable), RuntimeException.class, false);

    checkExceptionHandling(
        unexpectedExceptions, agent, agent.submit(normalTask), IOException.class, true);
  }
 @Test
 public void testDestroy() throws Exception {
   ArrayList<RpcStub> list = new ArrayList<RpcStub>();
   list.add(makeHandler2());
   list.add(makeHandler2());
   list.add(makeHandler2());
   MultiplexPoolHandler han = new MultiplexPoolHandler(list, 2, 3);
   han.setWaitTimeout(1);
   LinkedBlockingQueue<RpcStub> readyQueue = FieldUtil.getValue(han, "readyQueue");
   ArrayList<RpcStub> backupHandlers = FieldUtil.getValue(han, "backupHandlers");
   travelPool(han);
   han.destroy();
   travelPool(han);
   assertNull(han.take());
   assertNull(han.take());
   assertEquals(3, backupHandlers.size());
   assertEquals(0, readyQueue.size());
 }
  private void handleInterrupt() {
    writeLock.lock();
    try {
      // Stop accepting/adding new items to queue
      queueAdder = this::addAfterInterrupt;
    } finally {
      writeLock.unlock();
    }

    if (queue.isEmpty()) {
      log.info("[{}] Interrupted. Queue is empty. Stopping consumer", queueName);
    } else {
      log.warn(
          "[{}] Interrupted. There are still {} item(s) in queue. Will process these and THEN stop the consumer",
          queueName,
          queue.size());
      // Consume the items that are already in the queue
      consumeRemaining();
    }
  }
  @Override
  public synchronized ReportEvent getReport() {
    ReportEvent rpt = new ReportEvent(getName());

    // historical counts
    rpt.setLongMetric(A_IMPORTED, importedCount.get());
    rpt.setLongMetric(A_WRITING, writingCount.get());
    rpt.setLongMetric(A_LOGGED, loggedCount.get());
    rpt.setLongMetric(A_SENDING, sendingCount.get());
    rpt.setLongMetric(A_ERROR, errCount.get());
    rpt.setLongMetric(A_RECOVERED, recoverCount.get());

    // Waiting to send
    rpt.setLongMetric(A_IN_LOGGED, loggedQ.size());

    // message counts
    rpt.setLongMetric(A_MSG_WRITING, writingEvtCount.get());
    rpt.setLongMetric(A_MSG_READ, readEvtCount.get());
    return rpt;
  }
 @Override
 public int size() {
   return queue.size();
 }
 public int size() {
   return eventsQueue.size();
 }
Exemple #25
0
 /** Method description */
 public int size() {
   return queue.size();
 }
  /**
   * This method sequentially read the fixed amount of tuples from the disk using disk_buffer and
   * probe them in hash table.
   *
   * @param ht
   * @throws java.sql.SQLException
   */
  public void lookupMasterData() throws java.io.IOException, InterruptedException {

    long startTime = 0, endTime = 0, CH_per_Iteration = 0;

    int matched = 0, diskInputs = 0;

    conn = connectDB();
    mhm.clear();
    int index = 0;
    try {
      stmt = conn.createStatement();
      stmt.setFetchSize(DISK_BUFFER);

    } catch (SQLException exp) {
      exp.printStackTrace();
    }

    stream.start();
    // Thread.sleep(LATE_START);
    for (int round = 1; round <= 2; round++) {
      startRead = 1;
      if (round == 2) {
        System.out.println("Second round started");
        round2 = true;
      }
      for (int tuple = 0; tuple < R_SIZE; tuple++) {

        measurementStart = false;
        if ((round2) && (tuple >= MEASUREMENT_START && tuple < MEASUREMENT_STOP)) {
          measurementStart = true;
        }
        if (tuple % DISK_BUFFER == 0) {
          readDiskTuplesIntoBufferB();
          inputStream();
          diskInputs++;
          if (measurementStart && matched != 0) {
            CH[CH_index++] = CH_per_Iteration / matched;
            CH_per_Iteration = 0;
            matched = 0;
          }

          index = 0;
        }

        if (mhm.containsKey(bufferB[index][0])) {
          startTime = System.nanoTime();
          list = (ArrayList<MeshJoinObject>) mhm.get(bufferB[index][0]);
          endTime = System.nanoTime();
          if (measurementStart) {
            CH_per_Iteration += endTime - startTime;
            matched++;
          }
        }
        index++;
      }
    }

    stream.stop();
    closeConnection(conn);
    System.out.println("Hash tuples:  " + mhm.size());
    System.out.println("Unmatched tuples: " + unmatched);
    System.out.println("Iterations required to bring R into Disk_buffer=" + diskInputs);
    System.out.println("Stream back log: " + streamBuffer.size());
  }
 public long getTotalBufferedCount() {
   return queue.size() + currentProcessingCount.get();
 }
 @Override
 public boolean isQueueSpaceAvailable() {
   return queue.size() < rejectionQueueSizeThreshold;
 }
Exemple #29
0
 @Override
 public int getAvailable() {
   return _queue.size();
 }
  /**
   * Receive an incoming connection (built from a received SYN) Non-SYN packets with a zero
   * SendStreamID may also be queued here so that they don't get thrown away while the SYN packet
   * before it is queued.
   *
   * @param timeoutMs max amount of time to wait for a connection (if less than 1ms, wait
   *     indefinitely)
   * @return connection received, or null if there was a timeout or the handler was shut down
   */
  public Connection accept(long timeoutMs) {
    if (_log.shouldLog(Log.DEBUG)) _log.debug("Accept(" + timeoutMs + ") called");

    long expiration = timeoutMs + _context.clock().now();
    while (true) {
      if ((timeoutMs > 0) && (expiration < _context.clock().now())) return null;
      if (!_active) {
        // fail all the ones we had queued up
        while (true) {
          Packet packet = _synQueue.poll(); // fails immediately if empty
          if (packet == null || packet.getOptionalDelay() == PoisonPacket.POISON_MAX_DELAY_REQUEST)
            break;
          sendReset(packet);
        }
        return null;
      }

      Packet syn = null;
      while (_active && syn == null) {
        if (_log.shouldLog(Log.DEBUG))
          _log.debug(
              "Accept(" + timeoutMs + "): active=" + _active + " queue: " + _synQueue.size());
        if (timeoutMs <= 0) {
          try {
            syn = _synQueue.take(); // waits forever
          } catch (InterruptedException ie) {
          } // { break;}
        } else {
          long remaining = expiration - _context.clock().now();
          // (dont think this applies anymore for LinkedBlockingQueue)
          // BUGFIX
          // The specified amount of real time has elapsed, more or less.
          // If timeout is zero, however, then real time is not taken into consideration
          // and the thread simply waits until notified.
          if (remaining < 1) break;
          try {
            syn = _synQueue.poll(remaining, TimeUnit.MILLISECONDS); // waits the specified time max
          } catch (InterruptedException ie) {
          }
          break;
        }
      }

      if (syn != null) {
        if (syn.getOptionalDelay() == PoisonPacket.POISON_MAX_DELAY_REQUEST) return null;

        // deal with forged / invalid syn packets in _manager.receiveConnection()

        // Handle both SYN and non-SYN packets in the queue
        if (syn.isFlagSet(Packet.FLAG_SYNCHRONIZE)) {
          // We are single-threaded here, so this is
          // a good place to check for dup SYNs and drop them
          Destination from = syn.getOptionalFrom();
          if (from == null) {
            if (_log.shouldLog(Log.WARN)) _log.warn("Dropping SYN packet with no FROM: " + syn);
            // drop it
            continue;
          }
          Connection oldcon = _manager.getConnectionByOutboundId(syn.getReceiveStreamId());
          if (oldcon != null) {
            // His ID not guaranteed to be unique to us, but probably is...
            // only drop it on a destination match too
            if (from.equals(oldcon.getRemotePeer())) {
              if (_log.shouldLog(Log.WARN)) _log.warn("Dropping dup SYN: " + syn);
              continue;
            }
          }
          Connection con = _manager.receiveConnection(syn);
          if (con != null) return con;
        } else {
          reReceivePacket(syn);
          // ... and keep looping
        }
      }
      // keep looping...
    }
  }