@Test
  public void testDeleteOnExit() throws Exception {
    // Create deleteOnExit entries
    Path file1 = helper.getTestRootPath(fc, "file1");
    createFile(fc, file1, numBlocks, blockSize);
    fc.deleteOnExit(file1);
    checkDeleteOnExitData(1, fc, file1);

    // Ensure shutdown hook is added
    Assert.assertTrue(ShutdownHookManager.get().hasShutdownHook(FileContext.FINALIZER));

    Path file2 = helper.getTestRootPath(fc, "dir1/file2");
    createFile(fc, file2, numBlocks, blockSize);
    fc.deleteOnExit(file2);
    checkDeleteOnExitData(1, fc, file1, file2);

    Path dir = helper.getTestRootPath(fc, "dir3/dir4/dir5/dir6");
    createFile(fc, dir, numBlocks, blockSize);
    fc.deleteOnExit(dir);
    checkDeleteOnExitData(1, fc, file1, file2, dir);

    // trigger deleteOnExit and ensure the registered
    // paths are cleaned up
    FileContext.FINALIZER.run();
    checkDeleteOnExitData(0, fc, new Path[0]);
    Assert.assertFalse(exists(fc, file1));
    Assert.assertFalse(exists(fc, file2));
    Assert.assertFalse(exists(fc, dir));
  }
예제 #2
0
  @SuppressWarnings("unchecked")
  protected void dispatch(Event event) {
    // all events go thru this loop
    if (LOG.isDebugEnabled()) {
      LOG.debug("Dispatching the event " + event.getClass().getName() + "." + event.toString());
    }

    Class<? extends Enum> type = event.getType().getDeclaringClass();

    try {
      EventHandler handler = eventDispatchers.get(type);
      if (handler != null) {
        handler.handle(event);
      } else {
        throw new Exception("No handler for registered for " + type);
      }
    } catch (Throwable t) {
      // TODO Maybe log the state of the queue
      LOG.fatal("Error in dispatcher thread", t);
      // If serviceStop is called, we should exit this thread gracefully.
      if (exitOnDispatchException
          && (ShutdownHookManager.get().isShutdownInProgress()) == false
          && stopped == false) {
        Thread shutDownThread = new Thread(createShutDownThread());
        shutDownThread.setName("AsyncDispatcher ShutDown handler");
        shutDownThread.start();
      }
    }
  }
  public static void main(String[] args) {
    try {
      Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
      String containerIdStr = System.getenv(Environment.CONTAINER_ID.name());
      String nodeHostString = System.getenv(Environment.NM_HOST.name());
      String nodePortString = System.getenv(Environment.NM_PORT.name());
      String nodeHttpPortString = System.getenv(Environment.NM_HTTP_PORT.name());
      String appSubmitTimeStr = System.getenv(ApplicationConstants.APP_SUBMIT_TIME_ENV);

      validateInputParam(appSubmitTimeStr, ApplicationConstants.APP_SUBMIT_TIME_ENV);

      ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
      ApplicationAttemptId applicationAttemptId = containerId.getApplicationAttemptId();

      long appSubmitTime = Long.parseLong(appSubmitTimeStr);

      Configuration conf = new Configuration(new YarnConfiguration());
      TezUtils.addUserSpecifiedTezConfiguration(conf);

      String jobUserName = System.getenv(ApplicationConstants.Environment.USER.name());

      // Do not automatically close FileSystem objects so that in case of
      // SIGTERM I have a chance to write out the job history. I'll be closing
      // the objects myself.
      conf.setBoolean("fs.automatic.close", false);

      // Command line options
      Options opts = new Options();
      opts.addOption(
          TezConstants.TEZ_SESSION_MODE_CLI_OPTION,
          false,
          "Run Tez Application Master in Session mode");

      CommandLine cliParser = new GnuParser().parse(opts, args);

      DAGAppMaster appMaster =
          new DAGAppMaster(
              applicationAttemptId,
              containerId,
              nodeHostString,
              Integer.parseInt(nodePortString),
              Integer.parseInt(nodeHttpPortString),
              appSubmitTime,
              cliParser.hasOption(TezConstants.TEZ_SESSION_MODE_CLI_OPTION));
      ShutdownHookManager.get()
          .addShutdownHook(new DAGAppMasterShutdownHook(appMaster), SHUTDOWN_HOOK_PRIORITY);

      initAndStartAppMaster(appMaster, conf, jobUserName);

    } catch (Throwable t) {
      LOG.fatal("Error starting DAGAppMaster", t);
      System.exit(1);
    }
  }
  @Override
  public void setConf(Configuration conf) {
    this.conf = conf;
    // set scheduler
    Class<? extends ResourceScheduler> klass =
        conf.getClass(SLSConfiguration.RM_SCHEDULER, null, ResourceScheduler.class);

    scheduler = ReflectionUtils.newInstance(klass, conf);
    // start metrics
    metricsON = conf.getBoolean(SLSConfiguration.METRICS_SWITCH, true);
    if (metricsON) {
      try {
        initMetrics();
      } catch (Exception e) {
        e.printStackTrace();
      }
    }

    ShutdownHookManager.get()
        .addShutdownHook(
            new Runnable() {
              @Override
              public void run() {
                try {
                  if (metricsLogBW != null) {
                    metricsLogBW.write("]");
                    metricsLogBW.close();
                  }
                  if (web != null) {
                    web.stop();
                  }
                  tearDown();
                } catch (Exception e) {
                  e.printStackTrace();
                }
              }
            },
            SHUTDOWN_HOOK_PRIORITY);
  }
  /**
   * Create a blook pool slice
   *
   * @param bpid Block pool Id
   * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
   * @param bpDir directory corresponding to the BlockPool
   * @param conf configuration
   * @throws IOException
   */
  BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir, Configuration conf)
      throws IOException {
    this.bpid = bpid;
    this.volume = volume;
    this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
    this.finalizedDir = new File(currentDir, DataStorage.STORAGE_DIR_FINALIZED);
    this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
    if (!this.finalizedDir.exists()) {
      if (!this.finalizedDir.mkdirs()) {
        throw new IOException("Failed to mkdirs " + this.finalizedDir);
      }
    }

    this.deleteDuplicateReplicas =
        conf.getBoolean(
            DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
            DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

    // Files that were being written when the datanode was last shutdown
    // are now moved back to the data directory. It is possible that
    // in the future, we might want to do some sort of datanode-local
    // recovery for these blocks. For example, crc validation.
    //
    this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
    if (tmpDir.exists()) {
      FileUtil.fullyDelete(tmpDir);
    }
    this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
    final boolean supportAppends =
        conf.getBoolean(
            DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
    if (rbwDir.exists() && !supportAppends) {
      FileUtil.fullyDelete(rbwDir);
    }
    if (!rbwDir.mkdirs()) { // create rbw directory if not exist
      if (!rbwDir.isDirectory()) {
        throw new IOException("Mkdirs failed to create " + rbwDir.toString());
      }
    }
    if (!tmpDir.mkdirs()) {
      if (!tmpDir.isDirectory()) {
        throw new IOException("Mkdirs failed to create " + tmpDir.toString());
      }
    }
    // Use cached value initially if available. Or the following call will
    // block until the initial du command completes.
    this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
    this.dfsUsage.start();

    // Make the dfs usage to be saved during shutdown.
    ShutdownHookManager.get()
        .addShutdownHook(
            new Runnable() {
              @Override
              public void run() {
                if (!dfsUsedSaved) {
                  saveDfsUsed();
                }
              }
            },
            SHUTDOWN_HOOK_PRIORITY);
  }