コード例 #1
0
  public void testLoggingWithNullParameters() {
    Log log = this.getLogObject();

    assertNotNull(log);

    log.debug(null);

    log.debug(null, null);

    log.debug(log.getClass().getName() + ": debug statement");

    log.debug(
        log.getClass().getName() + ": debug statement w/ null exception", new RuntimeException());

    log.error(null);

    log.error(null, null);

    log.error(log.getClass().getName() + ": error statement");

    log.error(
        log.getClass().getName() + ": error statement w/ null exception", new RuntimeException());

    log.fatal(null);

    log.fatal(null, null);

    log.fatal(log.getClass().getName() + ": fatal statement");

    log.fatal(
        log.getClass().getName() + ": fatal statement w/ null exception", new RuntimeException());

    log.info(null);

    log.info(null, null);

    log.info(log.getClass().getName() + ": info statement");

    log.info(
        log.getClass().getName() + ": info statement w/ null exception", new RuntimeException());

    log.trace(null);

    log.trace(null, null);

    log.trace(log.getClass().getName() + ": trace statement");

    log.trace(
        log.getClass().getName() + ": trace statement w/ null exception", new RuntimeException());

    log.warn(null);

    log.warn(null, null);

    log.warn(log.getClass().getName() + ": warn statement");

    log.warn(
        log.getClass().getName() + ": warn statement w/ null exception", new RuntimeException());
  }
コード例 #2
0
 private void logException(String message, Exception e) {
   if (e instanceof FailureException) {
     throw (FailureException) e;
   }
   // TODO: use record exception stuff below
   log.fatal(e, message, e + "");
 }
コード例 #3
0
  // Checks if the cache has already been localized and is fresh
  private static boolean ifExistsAndFresh(
      Configuration conf,
      FileSystem fs,
      URI cache,
      long confFileStamp,
      CacheStatus lcacheStatus,
      FileStatus fileStatus)
      throws IOException {
    // check for existence of the cache
    long dfsFileStamp;
    if (fileStatus != null) {
      dfsFileStamp = fileStatus.getModificationTime();
    } else {
      dfsFileStamp = getTimestamp(conf, cache);
    }

    // ensure that the file on hdfs hasn't been modified since the job started
    if (dfsFileStamp != confFileStamp) {
      LOG.fatal("File: " + cache + " has changed on HDFS since job started");
      throw new IOException("File: " + cache + " has changed on HDFS since job started");
    }

    if (dfsFileStamp != lcacheStatus.mtime) {
      // needs refreshing
      return false;
    }

    return true;
  }
コード例 #4
0
ファイル: TestIPC.java プロジェクト: Ronald33/hadoop-0.21
 public void run() {
   for (int i = 0; i < count; i++) {
     try {
       LongWritable param = new LongWritable(RANDOM.nextLong());
       LongWritable value = (LongWritable) client.call(param, server, null, null);
       if (!param.equals(value)) {
         LOG.fatal("Call failed!");
         failed = true;
         break;
       }
     } catch (Exception e) {
       LOG.fatal("Caught: " + StringUtils.stringifyException(e));
       failed = true;
     }
   }
 }
コード例 #5
0
ファイル: TestIPC.java プロジェクト: Ronald33/hadoop-0.21
 public void run() {
   for (int i = 0; i < count; i++) {
     try {
       Writable[] params = new Writable[addresses.length];
       for (int j = 0; j < addresses.length; j++)
         params[j] = new LongWritable(RANDOM.nextLong());
       Writable[] values = client.call(params, addresses, null, null);
       for (int j = 0; j < addresses.length; j++) {
         if (!params[j].equals(values[j])) {
           LOG.fatal("Call failed!");
           failed = true;
           break;
         }
       }
     } catch (Exception e) {
       LOG.fatal("Caught: " + StringUtils.stringifyException(e));
       failed = true;
     }
   }
 }
コード例 #6
0
 private void addPrebuiltJsp(String path, String className) {
   try {
     Class clazz =
         Class.forName(
             className); // ttt2 see if possible to not use this, preferably without doing
                         // redirections like RedirectServlet
     Object obj = clazz.newInstance();
     addServlet(new ServletHolder((Servlet) obj), path);
     LOG.info("Added prebuilt JSP: " + obj.toString());
   } catch (Exception e) {
     LOG.fatal(String.format("Failed to load prebuilt JSP for %s and %s", path, className), e);
   }
 }
  @Override
  public boolean perform(AbstractBuild build, Launcher launcher, BuildListener listener) {
    // This method deserves a refactor and cleanup.
    boolean success = true;
    Log log = new Log(listener);
    if (Result.FAILURE.equals(build.getResult())) {
      log.info("Not deploying due to job being in FAILED state.");
      return success;
    }

    logStartHeader(log);
    // todo: getting from descriptor is ugly. refactor?
    getDescriptorImpl().setGlobalConfiguration();
    OctopusApi api = getDescriptorImpl().api;

    VariableResolver resolver = build.getBuildVariableResolver();
    EnvVars envVars;
    try {
      envVars = build.getEnvironment(listener);
    } catch (Exception ex) {
      log.fatal(
          String.format(
              "Failed to retrieve environment variables for this build - '%s'", ex.getMessage()));
      return false;
    }
    EnvironmentVariableValueInjector envInjector =
        new EnvironmentVariableValueInjector(resolver, envVars);
    // NOTE: hiding the member variables of the same name with their env-injected equivalents
    String project = envInjector.injectEnvironmentVariableValues(this.project);
    String releaseVersion = envInjector.injectEnvironmentVariableValues(this.releaseVersion);
    String environment = envInjector.injectEnvironmentVariableValues(this.environment);
    String variables = envInjector.injectEnvironmentVariableValues(this.variables);

    com.octopusdeploy.api.Project p = null;
    try {
      p = api.getProjectByName(project);
    } catch (Exception ex) {
      log.fatal(
          String.format(
              "Retrieving project name '%s' failed with message '%s'", project, ex.getMessage()));
      success = false;
    }
    com.octopusdeploy.api.Environment env = null;
    try {
      env = api.getEnvironmentByName(environment);
    } catch (Exception ex) {
      log.fatal(
          String.format(
              "Retrieving environment name '%s' failed with message '%s'",
              environment, ex.getMessage()));
      success = false;
    }
    if (p == null) {
      log.fatal("Project was not found.");
      success = false;
    }
    if (env == null) {
      log.fatal("Environment was not found.");
      success = false;
    }
    if (!success) // Early exit
    {
      return success;
    }
    Set<com.octopusdeploy.api.Release> releases = null;
    try {
      releases = api.getReleasesForProject(p.getId());
    } catch (Exception ex) {
      log.fatal(
          String.format(
              "Retrieving releases for project '%s' failed with message '%s'",
              project, ex.getMessage()));
      success = false;
    }
    if (releases == null) {
      log.fatal("Releases was not found.");
      return false;
    }
    Release releaseToDeploy = null;
    for (Release r : releases) {
      if (releaseVersion.equals(r.getVersion())) {
        releaseToDeploy = r;
        break;
      }
    }
    if (releaseToDeploy == null) // early exit
    {
      log.fatal(
          String.format(
              "Unable to find release version %s for project %s", releaseVersion, project));
      return false;
    }
    Properties properties = new Properties();
    try {
      properties.load(new StringReader(variables));
    } catch (Exception ex) {
      log.fatal(
          String.format(
              "Unable to load entry variables failed with message '%s'", ex.getMessage()));
      success = false;
    }

    // TODO: Can we tell if we need to call? For now I will always try and get variable and use if I
    // find them
    Set<com.octopusdeploy.api.Variable> variablesForDeploy = null;

    try {
      String releaseId = releaseToDeploy.getId();
      String environmentId = env.getId();
      variablesForDeploy =
          api.getVariablesByReleaseAndEnvironment(releaseId, environmentId, properties);
    } catch (Exception ex) {
      log.fatal(
          String.format(
              "Retrieving variables for release '%s' to environment '%s' failed with message '%s'",
              releaseToDeploy.getId(), env.getName(), ex.getMessage()));
      success = false;
    }
    try {
      String results =
          api.executeDeployment(releaseToDeploy.getId(), env.getId(), variablesForDeploy);
      if (isTaskJson(results)) {
        JSON resultJson = JSONSerializer.toJSON(results);
        String urlSuffix = ((JSONObject) resultJson).getJSONObject("Links").getString("Web");
        String url = getDescriptorImpl().octopusHost;
        if (url.endsWith("/")) {
          url = url.substring(0, url.length() - 2);
        }
        log.info("Deployment executed: \n\t" + url + urlSuffix);
        build.addAction(
            new BuildInfoSummary(
                BuildInfoSummary.OctopusDeployEventType.Deployment, url + urlSuffix));
        if (waitForDeployment) {

          log.info("Waiting for deployment to complete.");
          String resultState = waitForDeploymentCompletion(resultJson, api, log);
          if (resultState == null) {
            log.info("Marking build failed due to failure in waiting for deployment to complete.");
            success = false;
          }

          if ("Failed".equals(resultState)) {
            log.info("Marking build failed due to deployment task status.");
            success = false;
          }
        }
      }
    } catch (IOException ex) {
      log.fatal("Failed to deploy: " + ex.getMessage());
      success = false;
    }

    return success;
  }
コード例 #8
0
ファイル: TaskRunner.java プロジェクト: LefKok/upright
  @Override
  public final void run() {
    try {

      // before preparing the job localize
      // all the archives
      TaskAttemptID taskid = t.getTaskID();
      LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
      File jobCacheDir = null;
      if (conf.getJar() != null) {
        jobCacheDir = new File(new Path(conf.getJar()).getParent().toString());
      }
      File workDir =
          new File(
              lDirAlloc
                  .getLocalPathToRead(
                      TaskTracker.getJobCacheSubdir()
                          + Path.SEPARATOR
                          + t.getJobID()
                          + Path.SEPARATOR
                          + t.getTaskID()
                          + Path.SEPARATOR
                          + MRConstants.WORKDIR,
                      conf)
                  .toString());

      URI[] archives = DistributedCache.getCacheArchives(conf);
      URI[] files = DistributedCache.getCacheFiles(conf);
      FileStatus fileStatus;
      FileSystem fileSystem;
      Path localPath;
      String baseDir;

      if ((archives != null) || (files != null)) {
        if (archives != null) {
          String[] archivesTimestamps = DistributedCache.getArchiveTimestamps(conf);
          Path[] p = new Path[archives.length];
          for (int i = 0; i < archives.length; i++) {
            fileSystem = FileSystem.get(archives[i], conf);
            fileStatus = fileSystem.getFileStatus(new Path(archives[i].getPath()));
            String cacheId = DistributedCache.makeRelative(archives[i], conf);
            String cachePath = TaskTracker.getCacheSubdir() + Path.SEPARATOR + cacheId;
            if (lDirAlloc.ifExists(cachePath, conf)) {
              localPath = lDirAlloc.getLocalPathToRead(cachePath, conf);
            } else {
              localPath = lDirAlloc.getLocalPathForWrite(cachePath, fileStatus.getLen(), conf);
            }
            baseDir = localPath.toString().replace(cacheId, "");
            p[i] =
                DistributedCache.getLocalCache(
                    archives[i],
                    conf,
                    new Path(baseDir),
                    fileStatus,
                    true,
                    Long.parseLong(archivesTimestamps[i]),
                    new Path(workDir.getAbsolutePath()),
                    false);
          }
          DistributedCache.setLocalArchives(conf, stringifyPathArray(p));
        }
        if ((files != null)) {
          String[] fileTimestamps = DistributedCache.getFileTimestamps(conf);
          Path[] p = new Path[files.length];
          for (int i = 0; i < files.length; i++) {
            fileSystem = FileSystem.get(files[i], conf);
            fileStatus = fileSystem.getFileStatus(new Path(files[i].getPath()));
            String cacheId = DistributedCache.makeRelative(files[i], conf);
            String cachePath = TaskTracker.getCacheSubdir() + Path.SEPARATOR + cacheId;
            if (lDirAlloc.ifExists(cachePath, conf)) {
              localPath = lDirAlloc.getLocalPathToRead(cachePath, conf);
            } else {
              localPath = lDirAlloc.getLocalPathForWrite(cachePath, fileStatus.getLen(), conf);
            }
            baseDir = localPath.toString().replace(cacheId, "");
            p[i] =
                DistributedCache.getLocalCache(
                    files[i],
                    conf,
                    new Path(baseDir),
                    fileStatus,
                    false,
                    Long.parseLong(fileTimestamps[i]),
                    new Path(workDir.getAbsolutePath()),
                    false);
          }
          DistributedCache.setLocalFiles(conf, stringifyPathArray(p));
        }
        Path localTaskFile = new Path(t.getJobFile());
        FileSystem localFs = FileSystem.getLocal(conf);
        localFs.delete(localTaskFile, true);
        OutputStream out = localFs.create(localTaskFile);
        try {
          conf.writeXml(out);
        } finally {
          out.close();
        }
      }

      if (!prepare()) {
        return;
      }

      String sep = System.getProperty("path.separator");
      StringBuffer classPath = new StringBuffer();
      // start with same classpath as parent process
      classPath.append(System.getProperty("java.class.path"));
      classPath.append(sep);
      if (!workDir.mkdirs()) {
        if (!workDir.isDirectory()) {
          LOG.fatal("Mkdirs failed to create " + workDir.toString());
        }
      }

      String jar = conf.getJar();
      if (jar != null) {
        // if jar exists, it into workDir
        File[] libs = new File(jobCacheDir, "lib").listFiles();
        if (libs != null) {
          for (int i = 0; i < libs.length; i++) {
            classPath.append(sep); // add libs from jar to classpath
            classPath.append(libs[i]);
          }
        }
        classPath.append(sep);
        classPath.append(new File(jobCacheDir, "classes"));
        classPath.append(sep);
        classPath.append(jobCacheDir);
      }

      // include the user specified classpath

      // archive paths
      Path[] archiveClasspaths = DistributedCache.getArchiveClassPaths(conf);
      if (archiveClasspaths != null && archives != null) {
        Path[] localArchives = DistributedCache.getLocalCacheArchives(conf);
        if (localArchives != null) {
          for (int i = 0; i < archives.length; i++) {
            for (int j = 0; j < archiveClasspaths.length; j++) {
              if (archives[i].getPath().equals(archiveClasspaths[j].toString())) {
                classPath.append(sep);
                classPath.append(localArchives[i].toString());
              }
            }
          }
        }
      }
      // file paths
      Path[] fileClasspaths = DistributedCache.getFileClassPaths(conf);
      if (fileClasspaths != null && files != null) {
        Path[] localFiles = DistributedCache.getLocalCacheFiles(conf);
        if (localFiles != null) {
          for (int i = 0; i < files.length; i++) {
            for (int j = 0; j < fileClasspaths.length; j++) {
              if (files[i].getPath().equals(fileClasspaths[j].toString())) {
                classPath.append(sep);
                classPath.append(localFiles[i].toString());
              }
            }
          }
        }
      }

      classPath.append(sep);
      classPath.append(workDir);
      //  Build exec child jmv args.
      Vector<String> vargs = new Vector<String>(8);
      File jvm = // use same jvm as parent
          new File(new File(System.getProperty("java.home"), "bin"), "java");

      vargs.add(jvm.toString());

      // Add child (task) java-vm options.
      //
      // The following symbols if present in mapred.child.java.opts value are
      // replaced:
      // + @taskid@ is interpolated with value of TaskID.
      // Other occurrences of @ will not be altered.
      //
      // Example with multiple arguments and substitutions, showing
      // jvm GC logging, and start of a passwordless JVM JMX agent so can
      // connect with jconsole and the likes to watch child memory, threads
      // and get thread dumps.
      //
      //  <property>
      //    <name>mapred.child.java.opts</name>
      //    <value>-verbose:gc -Xloggc:/tmp/@[email protected] \
      //           -Dcom.sun.management.jmxremote.authenticate=false \
      //           -Dcom.sun.management.jmxremote.ssl=false \
      //    </value>
      //  </property>
      //
      String javaOpts = conf.get("mapred.child.java.opts", "-Xmx200m");
      javaOpts = javaOpts.replace("@taskid@", taskid.toString());
      String[] javaOptsSplit = javaOpts.split(" ");

      // Add java.library.path; necessary for loading native libraries.
      //
      // 1. To support native-hadoop library i.e. libhadoop.so, we add the
      //    parent processes' java.library.path to the child.
      // 2. We also add the 'cwd' of the task to it's java.library.path to help
      //    users distribute native libraries via the DistributedCache.
      // 3. The user can also specify extra paths to be added to the
      //    java.library.path via mapred.child.java.opts.
      //
      String libraryPath = System.getProperty("java.library.path");
      if (libraryPath == null) {
        libraryPath = workDir.getAbsolutePath();
      } else {
        libraryPath += sep + workDir;
      }
      boolean hasUserLDPath = false;
      for (int i = 0; i < javaOptsSplit.length; i++) {
        if (javaOptsSplit[i].startsWith("-Djava.library.path=")) {
          javaOptsSplit[i] += sep + libraryPath;
          hasUserLDPath = true;
          break;
        }
      }
      if (!hasUserLDPath) {
        vargs.add("-Djava.library.path=" + libraryPath);
      }
      for (int i = 0; i < javaOptsSplit.length; i++) {
        vargs.add(javaOptsSplit[i]);
      }

      // add java.io.tmpdir given by mapred.child.tmp
      String tmp = conf.get("mapred.child.tmp", "./tmp");
      Path tmpDir = new Path(tmp);

      // if temp directory path is not absolute
      // prepend it with workDir.
      if (!tmpDir.isAbsolute()) {
        tmpDir = new Path(workDir.toString(), tmp);
      }
      FileSystem localFs = FileSystem.getLocal(conf);
      if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()) {
        throw new IOException("Mkdirs failed to create " + tmpDir.toString());
      }
      vargs.add("-Djava.io.tmpdir=" + tmpDir.toString());

      // Add classpath.
      vargs.add("-classpath");
      vargs.add(classPath.toString());

      // Setup the log4j prop
      long logSize = TaskLog.getTaskLogLength(conf);
      vargs.add(
          "-Dhadoop.log.dir=" + new File(System.getProperty("hadoop.log.dir")).getAbsolutePath());
      vargs.add("-Dhadoop.root.logger=INFO,TLA");
      vargs.add("-Dhadoop.tasklog.taskid=" + taskid);
      vargs.add("-Dhadoop.tasklog.totalLogFileSize=" + logSize);

      if (conf.getProfileEnabled()) {
        if (conf.getProfileTaskRange(t.isMapTask()).isIncluded(t.getPartition())) {
          File prof = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.PROFILE);
          vargs.add(String.format(conf.getProfileParams(), prof.toString()));
        }
      }

      // Add main class and its arguments
      vargs.add(Child.class.getName()); // main of Child
      // pass umbilical address
      InetSocketAddress address = tracker.getTaskTrackerReportAddress();
      vargs.add(address.getAddress().getHostAddress());
      vargs.add(Integer.toString(address.getPort()));
      vargs.add(taskid.toString()); // pass task identifier

      String pidFile = null;
      if (tracker.isTaskMemoryManagerEnabled()) {
        pidFile =
            lDirAlloc
                .getLocalPathForWrite(
                    (TaskTracker.getPidFilesSubdir() + Path.SEPARATOR + taskid), this.conf)
                .toString();
      }

      // set memory limit using ulimit if feasible and necessary ...
      String[] ulimitCmd = Shell.getUlimitMemoryCommand(conf);
      List<String> setup = null;
      if (ulimitCmd != null) {
        setup = new ArrayList<String>();
        for (String arg : ulimitCmd) {
          setup.add(arg);
        }
      }

      // Set up the redirection of the task's stdout and stderr streams
      File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT);
      File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR);
      stdout.getParentFile().mkdirs();
      tracker.getTaskTrackerInstrumentation().reportTaskLaunch(taskid, stdout, stderr);

      Map<String, String> env = new HashMap<String, String>();
      StringBuffer ldLibraryPath = new StringBuffer();
      ldLibraryPath.append(workDir.toString());
      String oldLdLibraryPath = null;
      oldLdLibraryPath = System.getenv("LD_LIBRARY_PATH");
      if (oldLdLibraryPath != null) {
        ldLibraryPath.append(sep);
        ldLibraryPath.append(oldLdLibraryPath);
      }
      env.put("LD_LIBRARY_PATH", ldLibraryPath.toString());
      jvmManager.launchJvm(
          this,
          jvmManager.constructJvmEnv(
              setup, vargs, stdout, stderr, logSize, workDir, env, pidFile, conf));
      synchronized (lock) {
        while (!done) {
          lock.wait();
        }
      }
      tracker.getTaskTrackerInstrumentation().reportTaskEnd(t.getTaskID());
      if (exitCodeSet) {
        if (!killed && exitCode != 0) {
          if (exitCode == 65) {
            tracker.getTaskTrackerInstrumentation().taskFailedPing(t.getTaskID());
          }
          throw new IOException("Task process exit with nonzero status of " + exitCode + ".");
        }
      }
    } catch (FSError e) {
      LOG.fatal("FSError", e);
      try {
        tracker.fsError(t.getTaskID(), e.getMessage());
      } catch (IOException ie) {
        LOG.fatal(t.getTaskID() + " reporting FSError", ie);
      }
    } catch (Throwable throwable) {
      LOG.warn(t.getTaskID() + " Child Error", throwable);
      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      throwable.printStackTrace(new PrintStream(baos));
      try {
        tracker.reportDiagnosticInfo(t.getTaskID(), baos.toString());
      } catch (IOException e) {
        LOG.warn(t.getTaskID() + " Reporting Diagnostics", e);
      }
    } finally {
      try {
        URI[] archives = DistributedCache.getCacheArchives(conf);
        URI[] files = DistributedCache.getCacheFiles(conf);
        if (archives != null) {
          for (int i = 0; i < archives.length; i++) {
            DistributedCache.releaseCache(archives[i], conf);
          }
        }
        if (files != null) {
          for (int i = 0; i < files.length; i++) {
            DistributedCache.releaseCache(files[i], conf);
          }
        }
      } catch (IOException ie) {
        LOG.warn("Error releasing caches : Cache files might not have been cleaned up");
      }
      tracker.reportTaskFinished(t.getTaskID(), false);
      if (t.isMapTask()) {
        tracker.addFreeMapSlot();
      } else {
        tracker.addFreeReduceSlot();
      }
    }
  }