@Override
  public synchronized void stop() {
    try {
      accepted.close().awaitUninterruptibly(10, TimeUnit.SECONDS);
      ServerBootstrap bootstrap = new ServerBootstrap(selector);
      bootstrap.releaseExternalResources();
      pipelineFact.destroy();

      localFS.close();
    } catch (Throwable t) {
      LOG.error(t);
    } finally {
      super.stop();
    }
  }
Exemplo n.º 2
0
 @Override
 public synchronized void stop() {
   ((Service) this.containerLauncher).stop();
   super.stop();
 }
 @Override
 public synchronized void stop() {
   // Interrupt the updater.
   this.isStopped = true;
   super.stop();
 }
  @Override
  public void stop() {
    LOG.info(
        "Stopping JobHistoryEventHandler. "
            + "Size of the outstanding queue size is "
            + eventQueue.size());
    stopped = true;
    // do not interrupt while event handling is in progress
    synchronized (lock) {
      if (eventHandlingThread != null) eventHandlingThread.interrupt();
    }

    try {
      if (eventHandlingThread != null) eventHandlingThread.join();
    } catch (InterruptedException ie) {
      LOG.info("Interruped Exception while stopping", ie);
    }

    // Cancel all timers - so that they aren't invoked during or after
    // the metaInfo object is wrapped up.
    for (MetaInfo mi : fileMap.values()) {
      try {
        mi.shutDownTimer();
      } catch (IOException e) {
        LOG.info(
            "Exception while cancelling delayed flush timer. "
                + "Likely caused by a failed flush "
                + e.getMessage());
      }
    }

    // write all the events remaining in queue
    Iterator<JobHistoryEvent> it = eventQueue.iterator();
    while (it.hasNext()) {
      JobHistoryEvent ev = it.next();
      LOG.info("In stop, writing event " + ev.getType());
      handleEvent(ev);
    }

    // Process JobUnsuccessfulCompletionEvent for jobIds which still haven't
    // closed their event writers
    Iterator<JobId> jobIt = fileMap.keySet().iterator();
    if (isSignalled) {
      while (jobIt.hasNext()) {
        JobId toClose = jobIt.next();
        MetaInfo mi = fileMap.get(toClose);
        if (mi != null && mi.isWriterActive()) {
          LOG.warn("Found jobId " + toClose + " to have not been closed. Will close");
          // Create a JobFinishEvent so that it is written to the job history
          JobUnsuccessfulCompletionEvent jucEvent =
              new JobUnsuccessfulCompletionEvent(
                  TypeConverter.fromYarn(toClose),
                  System.currentTimeMillis(),
                  context.getJob(toClose).getCompletedMaps(),
                  context.getJob(toClose).getCompletedReduces(),
                  JobState.KILLED.toString());
          JobHistoryEvent jfEvent = new JobHistoryEvent(toClose, jucEvent);
          // Bypass the queue mechanism which might wait. Call the method directly
          handleEvent(jfEvent);
        }
      }
    }

    // close all file handles
    for (MetaInfo mi : fileMap.values()) {
      try {
        mi.closeWriter();
      } catch (IOException e) {
        LOG.info("Exception while closing file " + e.getMessage());
      }
    }
    LOG.info("Stopped JobHistoryEventHandler. super.stop()");
    super.stop();
  }