Exemplo n.º 1
0
 /**
  * Fatal errors are those errors that cannot be recovered by retries. These are application
  * dependent. Examples of fatal errors include: - the small table in the map-side joins is too
  * large to be feasible to be handled by one mapper. The job should fail and the user should be
  * warned to use regular joins rather than map-side joins. Fatal errors are indicated by counters
  * that are set at execution time. If the counter is non-zero, a fatal error occurred. The value
  * of the counter indicates the error type.
  *
  * @return true if fatal errors happened during job execution, false otherwise.
  */
 @Override
 public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
   Counters.Counter cntr =
       ctrs.findCounter(
           HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERGROUP), Operator.HIVECOUNTERFATAL);
   return cntr != null && cntr.getValue() > 0;
 }
Exemplo n.º 2
0
  public void finish(RunningJob runningJob) throws IOException {

    super.finish(runningJob);

    unforwardedCounts = new HashMap<Unforwarded, Long>();

    for (Unforwarded u : Unforwarded.values()) {

      Counters.Counter counter = runningJob.getCounters().findCounter(u);
      if (counter != null) unforwardedCounts.put(u, counter.getCounter());
      else unforwardedCounts.put(u, 0L);
    }

    saveUnforwardedCounts();
  }
Exemplo n.º 3
0
  /**
   * Do some basic verification on the input received -- Being defensive
   *
   * @param compressedLength
   * @param decompressedLength
   * @param forReduce
   * @param remaining
   * @param mapId
   * @return true/false, based on if the verification succeeded or not
   */
  private boolean verifySanity(
      long compressedLength,
      long decompressedLength,
      int forReduce,
      Set<TaskAttemptID> remaining,
      TaskAttemptID mapId) {
    if (compressedLength < 0 || decompressedLength < 0) {
      wrongLengthErrs.increment(1);
      LOG.warn(
          getName()
              + " invalid lengths in map output header: id: "
              + mapId
              + " len: "
              + compressedLength
              + ", decomp len: "
              + decompressedLength);
      return false;
    }

    if (forReduce != reduce) {
      wrongReduceErrs.increment(1);
      LOG.warn(
          getName()
              + " data for the wrong reduce map: "
              + mapId
              + " len: "
              + compressedLength
              + " decomp len: "
              + decompressedLength
              + " for reduce "
              + forReduce);
      return false;
    }

    // Sanity check
    if (!remaining.contains(mapId)) {
      wrongMapErrs.increment(1);
      LOG.warn("Invalid map-output! Received output for " + mapId);
      return false;
    }

    return true;
  }
Exemplo n.º 4
0
  private TaskAttemptID[] copyMapOutput(
      MapHost host, DataInputStream input, Set<TaskAttemptID> remaining) {
    MapOutput<K, V> mapOutput = null;
    TaskAttemptID mapId = null;
    long decompressedLength = -1;
    long compressedLength = -1;

    try {
      long startTime = System.currentTimeMillis();
      int forReduce = -1;
      // Read the shuffle header
      try {
        ShuffleHeader header = new ShuffleHeader();
        header.readFields(input);
        mapId = TaskAttemptID.forName(header.mapId);
        compressedLength = header.compressedLength;
        decompressedLength = header.uncompressedLength;
        forReduce = header.forReduce;
      } catch (IllegalArgumentException e) {
        badIdErrs.increment(1);
        LOG.warn("Invalid map id ", e);
        // Don't know which one was bad, so consider all of them as bad
        return remaining.toArray(new TaskAttemptID[remaining.size()]);
      }

      InputStream is = input;
      is = CryptoUtils.wrapIfNecessary(jobConf, is, compressedLength);
      compressedLength -= CryptoUtils.cryptoPadding(jobConf);
      decompressedLength -= CryptoUtils.cryptoPadding(jobConf);

      // Do some basic sanity verification
      if (!verifySanity(compressedLength, decompressedLength, forReduce, remaining, mapId)) {
        return new TaskAttemptID[] {mapId};
      }

      if (LOG.isDebugEnabled()) {
        LOG.debug(
            "header: "
                + mapId
                + ", len: "
                + compressedLength
                + ", decomp len: "
                + decompressedLength);
      }

      // Get the location for the map output - either in-memory or on-disk
      try {
        mapOutput = merger.reserve(mapId, decompressedLength, id);
      } catch (IOException ioe) {
        // kill this reduce attempt
        ioErrs.increment(1);
        scheduler.reportLocalError(ioe);
        return EMPTY_ATTEMPT_ID_ARRAY;
      }

      // Check if we can shuffle *now* ...
      if (mapOutput == null) {
        LOG.info("fetcher#" + id + " - MergeManager returned status WAIT ...");
        // Not an error but wait to process data.
        return EMPTY_ATTEMPT_ID_ARRAY;
      }

      // The codec for lz0,lz4,snappy,bz2,etc. throw java.lang.InternalError
      // on decompression failures. Catching and re-throwing as IOException
      // to allow fetch failure logic to be processed
      try {
        // Go!
        LOG.info(
            "fetcher#"
                + id
                + " about to shuffle output of map "
                + mapOutput.getMapId()
                + " decomp: "
                + decompressedLength
                + " len: "
                + compressedLength
                + " to "
                + mapOutput.getDescription());
        mapOutput.shuffle(host, is, compressedLength, decompressedLength, metrics, reporter);
      } catch (java.lang.InternalError e) {
        LOG.warn("Failed to shuffle for fetcher#" + id, e);
        throw new IOException(e);
      }

      // Inform the shuffle scheduler
      long endTime = System.currentTimeMillis();
      scheduler.copySucceeded(mapId, host, compressedLength, endTime - startTime, mapOutput);
      // Note successful shuffle
      remaining.remove(mapId);
      metrics.successFetch();
      return null;
    } catch (IOException ioe) {
      ioErrs.increment(1);
      if (mapId == null || mapOutput == null) {
        LOG.info(
            "fetcher#"
                + id
                + " failed to read map header"
                + mapId
                + " decomp: "
                + decompressedLength
                + ", "
                + compressedLength,
            ioe);
        if (mapId == null) {
          return remaining.toArray(new TaskAttemptID[remaining.size()]);
        } else {
          return new TaskAttemptID[] {mapId};
        }
      }

      LOG.warn("Failed to shuffle output of " + mapId + " from " + host.getHostName(), ioe);

      // Inform the shuffle-scheduler
      mapOutput.abort();
      metrics.failedFetch();
      return new TaskAttemptID[] {mapId};
    }
  }
Exemplo n.º 5
0
  /**
   * The crux of the matter...
   *
   * @param host {@link MapHost} from which we need to shuffle available map-outputs.
   */
  @VisibleForTesting
  protected void copyFromHost(MapHost host) throws IOException {
    // Get completed maps on 'host'
    List<TaskAttemptID> maps = scheduler.getMapsForHost(host);

    // Sanity check to catch hosts with only 'OBSOLETE' maps,
    // especially at the tail of large jobs
    if (maps.size() == 0) {
      return;
    }

    if (LOG.isDebugEnabled()) {
      LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: " + maps);
    }

    // List of maps to be fetched yet
    Set<TaskAttemptID> remaining = new HashSet<TaskAttemptID>(maps);

    // Construct the url and connect
    DataInputStream input = null;
    try {
      URL url = getMapOutputURL(host, maps);
      openConnection(url);
      if (stopped) {
        abortConnect(host, remaining);
        return;
      }

      // generate hash of the url
      String msgToEncode = SecureShuffleUtils.buildMsgFrom(url);
      String encHash = SecureShuffleUtils.hashFromString(msgToEncode, shuffleSecretKey);

      // put url hash into http header
      connection.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash);
      // set the read timeout
      connection.setReadTimeout(readTimeout);
      // put shuffle version into http header
      connection.addRequestProperty(
          ShuffleHeader.HTTP_HEADER_NAME, ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
      connection.addRequestProperty(
          ShuffleHeader.HTTP_HEADER_VERSION, ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
      connect(connection, connectionTimeout);
      // verify that the thread wasn't stopped during calls to connect
      if (stopped) {
        abortConnect(host, remaining);
        return;
      }
      input = new DataInputStream(connection.getInputStream());

      // Validate response code
      int rc = connection.getResponseCode();
      if (rc != HttpURLConnection.HTTP_OK) {
        throw new IOException(
            "Got invalid response code "
                + rc
                + " from "
                + url
                + ": "
                + connection.getResponseMessage());
      }
      // get the shuffle version
      if (!ShuffleHeader.DEFAULT_HTTP_HEADER_NAME.equals(
              connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
          || !ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION.equals(
              connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))) {
        throw new IOException("Incompatible shuffle response version");
      }
      // get the replyHash which is HMac of the encHash we sent to the server
      String replyHash = connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH);
      if (replyHash == null) {
        throw new IOException("security validation of TT Map output failed");
      }
      LOG.debug("url=" + msgToEncode + ";encHash=" + encHash + ";replyHash=" + replyHash);
      // verify that replyHash is HMac of encHash
      SecureShuffleUtils.verifyReply(replyHash, encHash, shuffleSecretKey);
      LOG.info("for url=" + msgToEncode + " sent hash and received reply");
    } catch (IOException ie) {
      boolean connectExcpt = ie instanceof ConnectException;
      ioErrs.increment(1);
      LOG.warn("Failed to connect to " + host + " with " + remaining.size() + " map outputs", ie);

      // If connect did not succeed, just mark all the maps as failed,
      // indirectly penalizing the host
      scheduler.hostFailed(host.getHostName());
      for (TaskAttemptID left : remaining) {
        scheduler.copyFailed(left, host, false, connectExcpt);
      }

      // Add back all the remaining maps, WITHOUT marking them as failed
      for (TaskAttemptID left : remaining) {
        scheduler.putBackKnownMapOutput(host, left);
      }

      return;
    }

    try {
      // Loop through available map-outputs and fetch them
      // On any error, faildTasks is not null and we exit
      // after putting back the remaining maps to the
      // yet_to_be_fetched list and marking the failed tasks.
      TaskAttemptID[] failedTasks = null;
      while (!remaining.isEmpty() && failedTasks == null) {
        failedTasks = copyMapOutput(host, input, remaining);
      }

      if (failedTasks != null && failedTasks.length > 0) {
        LOG.warn("copyMapOutput failed for tasks " + Arrays.toString(failedTasks));
        scheduler.hostFailed(host.getHostName());
        for (TaskAttemptID left : failedTasks) {
          scheduler.copyFailed(left, host, true, false);
        }
      }

      // Sanity check
      if (failedTasks == null && !remaining.isEmpty()) {
        throw new IOException(
            "server didn't return all expected map outputs: " + remaining.size() + " left.");
      }
      input.close();
      input = null;
    } finally {
      if (input != null) {
        IOUtils.cleanup(LOG, input);
        input = null;
      }
      for (TaskAttemptID left : remaining) {
        scheduler.putBackKnownMapOutput(host, left);
      }
    }
  }