void checkEmpty(BlockingQueue q) {
   try {
     assertTrue(q.isEmpty());
     assertEquals(0, q.size());
     assertNull(q.peek());
     assertNull(q.poll());
     assertNull(q.poll(0, MILLISECONDS));
     assertEquals("[]", q.toString());
     assertTrue(Arrays.equals(q.toArray(), new Object[0]));
     assertFalse(q.iterator().hasNext());
     try {
       q.element();
       shouldThrow();
     } catch (NoSuchElementException success) {
     }
     try {
       q.iterator().next();
       shouldThrow();
     } catch (NoSuchElementException success) {
     }
     try {
       q.remove();
       shouldThrow();
     } catch (NoSuchElementException success) {
     }
   } catch (InterruptedException ie) {
     threadUnexpectedException(ie);
   }
 }
  public String dumpQueue() {
    StringBuffer queueLists = new StringBuffer();
    queueLists.append("Compaction/Split Queue dump:\n");
    queueLists.append("  LargeCompation Queue:\n");
    BlockingQueue<Runnable> lq = longCompactions.getQueue();
    Iterator<Runnable> it = lq.iterator();
    while (it.hasNext()) {
      queueLists.append("    " + it.next().toString());
      queueLists.append("\n");
    }

    if (shortCompactions != null) {
      queueLists.append("\n");
      queueLists.append("  SmallCompation Queue:\n");
      lq = shortCompactions.getQueue();
      it = lq.iterator();
      while (it.hasNext()) {
        queueLists.append("    " + it.next().toString());
        queueLists.append("\n");
      }
    }

    queueLists.append("\n");
    queueLists.append("  Split Queue:\n");
    lq = splits.getQueue();
    it = lq.iterator();
    while (it.hasNext()) {
      queueLists.append("    " + it.next().toString());
      queueLists.append("\n");
    }

    queueLists.append("\n");
    queueLists.append("  Region Merge Queue:\n");
    lq = mergePool.getQueue();
    it = lq.iterator();
    while (it.hasNext()) {
      queueLists.append("    " + it.next().toString());
      queueLists.append("\n");
    }

    return queueLists.toString();
  }
  /** Close all connection */
  public void shutDown() {

    Iterator<Connection> iterator = connections.iterator();
    while (iterator.hasNext()) {
      Connection connection = iterator.next();
      try {
        connection.close();
        iterator.remove();
      } catch (SQLException e) {
        logger.error("Couldn't close connection: " + e.getMessage());
      }
    }
    logger.info("Connection pool is shut down");
  }
    public void expectOrderedEventQueue(ClientEvent.Type type) {
      BlockingQueue<CustomEvent> queue = queue(type);
      if (queue.size() < 2) return;

      try {
        CustomEvent before = queue.poll(10, TimeUnit.SECONDS);
        Iterator<CustomEvent> iter = queue.iterator();
        while (iter.hasNext()) {
          CustomEvent after = iter.next();
          expectTimeOrdered(before, after);
          before = after;
        }
      } catch (InterruptedException e) {
        throw new AssertionError(e);
      }
    }
Exemple #5
0
 @Override
 public Iterator<T> iterator() {
   return delegate.iterator();
 }
 @Override
 public Iterator<E> iterator() {
   return localInternalQueue.iterator();
 }
Exemple #7
0
  // 分配任务和结果提交处理由于是单线程处理,
  // 因此本身不用做状态池并发控制,将消耗较多的发送操作交给ServerConnector多线程操作
  @Override
  public void getUnDoJobTasks(GetTaskRequestEvent requestEvent) {

    String jobName = requestEvent.getJobName();
    int jobCount = requestEvent.getRequestJobCount();
    final List<JobTask> jobTasks = new ArrayList<JobTask>();

    // 如果关闭,则直接返回一个空的JobTask的list给slave
    if (this.stopped) {
      masterNode.echoGetJobTasks(requestEvent.getSequence(), jobTasks, requestEvent.getChannel());
      return;
    }
    // 指定job
    if (jobName != null && jobs.containsKey(jobName)) {
      Job job = jobs.get(jobName);

      List<JobTask> tasks = job.getJobTasks();

      for (JobTask jobTask : tasks) {
        if (jobTask.getStatus().equals(JobTaskStatus.UNDO)) {
          if (statusPool.replace(jobTask.getTaskId(), JobTaskStatus.UNDO, JobTaskStatus.DOING)) {
            this.allocateTask(jobTask);
            jobTasks.add(jobTask);

            if (jobTasks.size() == jobCount) break;
          }
        }
      }
    } else {
      Iterator<JobTask> taskIter = undoTaskQueue.iterator();

      while (taskIter.hasNext()) {
        //                String taskId = taskIds.next();
        //                JobTask jobTask = jobTaskPool.get(taskId);
        JobTask jobTask = taskIter.next();
        if (!jobTaskPool.keySet().contains(jobTask.getTaskId())
            || jobs.get(jobTask.getJobName()).getEpoch().get() > jobTask.getJobEpoch()
            || jobs.get(jobTask.getJobName()).getJobTimeOut().get()) {
          taskIter.remove();
          continue;
        }

        if (statusPool.get(jobTask.getTaskId()).equals(JobTaskStatus.UNDO)) {
          if (statusPool.replace(jobTask.getTaskId(), JobTaskStatus.UNDO, JobTaskStatus.DOING)) {
            this.allocateTask(jobTask);
            jobTasks.add(jobTask);
            taskIter.remove();

            if (jobTasks.size() >= jobCount) break;
          }
        } else taskIter.remove();
      }
    }

    // 是否需要用异步方式发送,减少对jobManager事件处理延时
    if (config.isUseAsynModeToSendResponse()) {
      final String sequence = requestEvent.getSequence();
      final Object channel = requestEvent.getChannel();

      // 由于该操作比较慢,开线程执行,保证速度
      eventProcessThreadPool.execute(
          new Runnable() {
            public void run() {
              try {
                masterNode.echoGetJobTasks(sequence, jobTasks, channel);
              } catch (Throwable e) {
                logger.error(e);
              }
            }
          });
    } else
      masterNode.echoGetJobTasks(requestEvent.getSequence(), jobTasks, requestEvent.getChannel());
  }
 public Iterator<T> iterator() {
   return queue.iterator();
 }
  @Override
  public void stop() {
    LOG.info(
        "Stopping JobHistoryEventHandler. "
            + "Size of the outstanding queue size is "
            + eventQueue.size());
    stopped = true;
    // do not interrupt while event handling is in progress
    synchronized (lock) {
      if (eventHandlingThread != null) eventHandlingThread.interrupt();
    }

    try {
      if (eventHandlingThread != null) eventHandlingThread.join();
    } catch (InterruptedException ie) {
      LOG.info("Interruped Exception while stopping", ie);
    }

    // Cancel all timers - so that they aren't invoked during or after
    // the metaInfo object is wrapped up.
    for (MetaInfo mi : fileMap.values()) {
      try {
        mi.shutDownTimer();
      } catch (IOException e) {
        LOG.info(
            "Exception while cancelling delayed flush timer. "
                + "Likely caused by a failed flush "
                + e.getMessage());
      }
    }

    // write all the events remaining in queue
    Iterator<JobHistoryEvent> it = eventQueue.iterator();
    while (it.hasNext()) {
      JobHistoryEvent ev = it.next();
      LOG.info("In stop, writing event " + ev.getType());
      handleEvent(ev);
    }

    // Process JobUnsuccessfulCompletionEvent for jobIds which still haven't
    // closed their event writers
    Iterator<JobId> jobIt = fileMap.keySet().iterator();
    if (isSignalled) {
      while (jobIt.hasNext()) {
        JobId toClose = jobIt.next();
        MetaInfo mi = fileMap.get(toClose);
        if (mi != null && mi.isWriterActive()) {
          LOG.warn("Found jobId " + toClose + " to have not been closed. Will close");
          // Create a JobFinishEvent so that it is written to the job history
          JobUnsuccessfulCompletionEvent jucEvent =
              new JobUnsuccessfulCompletionEvent(
                  TypeConverter.fromYarn(toClose),
                  System.currentTimeMillis(),
                  context.getJob(toClose).getCompletedMaps(),
                  context.getJob(toClose).getCompletedReduces(),
                  JobState.KILLED.toString());
          JobHistoryEvent jfEvent = new JobHistoryEvent(toClose, jucEvent);
          // Bypass the queue mechanism which might wait. Call the method directly
          handleEvent(jfEvent);
        }
      }
    }

    // close all file handles
    for (MetaInfo mi : fileMap.values()) {
      try {
        mi.closeWriter();
      } catch (IOException e) {
        LOG.info("Exception while closing file " + e.getMessage());
      }
    }
    LOG.info("Stopped JobHistoryEventHandler. super.stop()");
    super.stop();
  }
 public Iterator<Transmission> pendingTransmissionIterator() {
   return pendingTransmissions.iterator();
 }