Exemplo n.º 1
0
 private void writeIndexRecord(
     FSDataOutputStream indexOut, FSDataOutputStream out, long start, Writer<K, V> writer)
     throws IOException {
   // when we write the offset/decompressed-length/compressed-length to
   // the final index file, we write longs for both compressed and
   // decompressed lengths. This helps us to reliably seek directly to
   // the offset/length for a partition when we start serving the
   // byte-ranges to the reduces. We probably waste some space in the
   // file by doing this as opposed to writing VLong but it helps us later on.
   // index record: <offset, raw-length, compressed-length>
   // StringBuffer sb = new StringBuffer();
   indexOut.writeLong(start);
   indexOut.writeLong(writer.getRawLength());
   long segmentLength = out.getPos() - start;
   indexOut.writeLong(segmentLength);
   LOG.info("Index: (" + start + ", " + writer.getRawLength() + ", " + segmentLength + ")");
 }
Exemplo n.º 2
0
    @Override
    public void merge(List<CompressAwarePath> inputs) throws IOException {
      // sanity check
      if (inputs == null || inputs.isEmpty()) {
        LOG.info("No ondisk files to merge...");
        return;
      }

      long approxOutputSize = 0;
      int bytesPerSum = jobConf.getInt("io.bytes.per.checksum", 512);

      LOG.info(
          "OnDiskMerger: We have  " + inputs.size() + " map outputs on disk. Triggering merge...");

      // 1. Prepare the list of files to be merged.
      for (CompressAwarePath file : inputs) {
        approxOutputSize += localFS.getFileStatus(file).getLen();
      }

      // add the checksum length
      approxOutputSize += ChecksumFileSystem.getChecksumLength(approxOutputSize, bytesPerSum);

      // 2. Start the on-disk merge process
      Path outputPath =
          localDirAllocator
              .getLocalPathForWrite(inputs.get(0).toString(), approxOutputSize, jobConf)
              .suffix(Task.MERGED_OUTPUT_PREFIX);
      Writer<K, V> writer =
          new Writer<K, V>(
              jobConf,
              rfs,
              outputPath,
              (Class<K>) jobConf.getMapOutputKeyClass(),
              (Class<V>) jobConf.getMapOutputValueClass(),
              codec,
              null);
      RawKeyValueIterator iter = null;
      CompressAwarePath compressAwarePath;
      Path tmpDir = new Path(reduceId.toString());
      try {
        iter =
            Merger.merge(
                jobConf,
                rfs,
                (Class<K>) jobConf.getMapOutputKeyClass(),
                (Class<V>) jobConf.getMapOutputValueClass(),
                codec,
                inputs.toArray(new Path[inputs.size()]),
                true,
                ioSortFactor,
                tmpDir,
                (RawComparator<K>) jobConf.getOutputKeyComparator(),
                reporter,
                spilledRecordsCounter,
                null,
                mergedMapOutputsCounter,
                null);

        Merger.writeFile(iter, writer, reporter, jobConf);
        writer.close();
        compressAwarePath =
            new CompressAwarePath(outputPath, writer.getRawLength(), writer.getCompressedLength());
      } catch (IOException e) {
        localFS.delete(outputPath, true);
        throw e;
      }

      closeOnDiskFile(compressAwarePath);

      LOG.info(
          reduceId
              + " Finished merging "
              + inputs.size()
              + " map output files on disk of total-size "
              + approxOutputSize
              + "."
              + " Local output file is "
              + outputPath
              + " of size "
              + localFS.getFileStatus(outputPath).getLen());
    }
Exemplo n.º 3
0
  private RawKeyValueIterator finalMerge(
      JobConf job,
      FileSystem fs,
      List<InMemoryMapOutput<K, V>> inMemoryMapOutputs,
      List<CompressAwarePath> onDiskMapOutputs)
      throws IOException {
    LOG.info(
        "finalMerge called with "
            + inMemoryMapOutputs.size()
            + " in-memory map-outputs and "
            + onDiskMapOutputs.size()
            + " on-disk map-outputs");

    final float maxRedPer = job.getFloat(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT, 0f);
    if (maxRedPer > 1.0 || maxRedPer < 0.0) {
      throw new IOException(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT + maxRedPer);
    }
    int maxInMemReduce =
        (int) Math.min(Runtime.getRuntime().maxMemory() * maxRedPer, Integer.MAX_VALUE);

    // merge config params
    Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
    Class<V> valueClass = (Class<V>) job.getMapOutputValueClass();
    boolean keepInputs = job.getKeepFailedTaskFiles();
    final Path tmpDir = new Path(reduceId.toString());
    final RawComparator<K> comparator = (RawComparator<K>) job.getOutputKeyComparator();

    // segments required to vacate memory
    List<Segment<K, V>> memDiskSegments = new ArrayList<Segment<K, V>>();
    long inMemToDiskBytes = 0;
    boolean mergePhaseFinished = false;
    if (inMemoryMapOutputs.size() > 0) {
      TaskID mapId = inMemoryMapOutputs.get(0).getMapId().getTaskID();
      inMemToDiskBytes =
          createInMemorySegments(inMemoryMapOutputs, memDiskSegments, maxInMemReduce);
      final int numMemDiskSegments = memDiskSegments.size();
      if (numMemDiskSegments > 0 && ioSortFactor > onDiskMapOutputs.size()) {

        // If we reach here, it implies that we have less than io.sort.factor
        // disk segments and this will be incremented by 1 (result of the
        // memory segments merge). Since this total would still be
        // <= io.sort.factor, we will not do any more intermediate merges,
        // the merge of all these disk segments would be directly fed to the
        // reduce method

        mergePhaseFinished = true;
        // must spill to disk, but can't retain in-mem for intermediate merge
        final Path outputPath =
            mapOutputFile
                .getInputFileForWrite(mapId, inMemToDiskBytes)
                .suffix(Task.MERGED_OUTPUT_PREFIX);
        final RawKeyValueIterator rIter =
            Merger.merge(
                job,
                fs,
                keyClass,
                valueClass,
                memDiskSegments,
                numMemDiskSegments,
                tmpDir,
                comparator,
                reporter,
                spilledRecordsCounter,
                null,
                mergePhase);
        Writer<K, V> writer =
            new Writer<K, V>(job, fs, outputPath, keyClass, valueClass, codec, null);
        try {
          Merger.writeFile(rIter, writer, reporter, job);
          writer.close();
          onDiskMapOutputs.add(
              new CompressAwarePath(
                  outputPath, writer.getRawLength(), writer.getCompressedLength()));
          writer = null;
          // add to list of final disk outputs.
        } catch (IOException e) {
          if (null != outputPath) {
            try {
              fs.delete(outputPath, true);
            } catch (IOException ie) {
              // NOTHING
            }
          }
          throw e;
        } finally {
          if (null != writer) {
            writer.close();
          }
        }
        LOG.info(
            "Merged "
                + numMemDiskSegments
                + " segments, "
                + inMemToDiskBytes
                + " bytes to disk to satisfy "
                + "reduce memory limit");
        inMemToDiskBytes = 0;
        memDiskSegments.clear();
      } else if (inMemToDiskBytes != 0) {
        LOG.info(
            "Keeping "
                + numMemDiskSegments
                + " segments, "
                + inMemToDiskBytes
                + " bytes in memory for "
                + "intermediate, on-disk merge");
      }
    }

    // segments on disk
    List<Segment<K, V>> diskSegments = new ArrayList<Segment<K, V>>();
    long onDiskBytes = inMemToDiskBytes;
    long rawBytes = inMemToDiskBytes;
    CompressAwarePath[] onDisk =
        onDiskMapOutputs.toArray(new CompressAwarePath[onDiskMapOutputs.size()]);
    for (CompressAwarePath file : onDisk) {
      long fileLength = fs.getFileStatus(file).getLen();
      onDiskBytes += fileLength;
      rawBytes += (file.getRawDataLength() > 0) ? file.getRawDataLength() : fileLength;

      LOG.debug("Disk file: " + file + " Length is " + fileLength);
      diskSegments.add(
          new Segment<K, V>(
              job,
              fs,
              file,
              codec,
              keepInputs,
              (file.toString().endsWith(Task.MERGED_OUTPUT_PREFIX)
                  ? null
                  : mergedMapOutputsCounter),
              file.getRawDataLength()));
    }
    LOG.info("Merging " + onDisk.length + " files, " + onDiskBytes + " bytes from disk");
    Collections.sort(
        diskSegments,
        new Comparator<Segment<K, V>>() {
          public int compare(Segment<K, V> o1, Segment<K, V> o2) {
            if (o1.getLength() == o2.getLength()) {
              return 0;
            }
            return o1.getLength() < o2.getLength() ? -1 : 1;
          }
        });

    // build final list of segments from merged backed by disk + in-mem
    List<Segment<K, V>> finalSegments = new ArrayList<Segment<K, V>>();
    long inMemBytes = createInMemorySegments(inMemoryMapOutputs, finalSegments, 0);
    LOG.info(
        "Merging "
            + finalSegments.size()
            + " segments, "
            + inMemBytes
            + " bytes from memory into reduce");
    if (0 != onDiskBytes) {
      final int numInMemSegments = memDiskSegments.size();
      diskSegments.addAll(0, memDiskSegments);
      memDiskSegments.clear();
      // Pass mergePhase only if there is a going to be intermediate
      // merges. See comment where mergePhaseFinished is being set
      Progress thisPhase = (mergePhaseFinished) ? null : mergePhase;
      RawKeyValueIterator diskMerge =
          Merger.merge(
              job,
              fs,
              keyClass,
              valueClass,
              codec,
              diskSegments,
              ioSortFactor,
              numInMemSegments,
              tmpDir,
              comparator,
              reporter,
              false,
              spilledRecordsCounter,
              null,
              thisPhase);
      diskSegments.clear();
      if (0 == finalSegments.size()) {
        return diskMerge;
      }
      finalSegments.add(
          new Segment<K, V>(new RawKVIteratorReader(diskMerge, onDiskBytes), true, rawBytes));
    }
    return Merger.merge(
        job,
        fs,
        keyClass,
        valueClass,
        finalSegments,
        finalSegments.size(),
        tmpDir,
        comparator,
        reporter,
        spilledRecordsCounter,
        null,
        null);
  }
Exemplo n.º 4
0
    @Override
    public void merge(List<InMemoryMapOutput<K, V>> inputs) throws IOException {
      if (inputs == null || inputs.size() == 0) {
        return;
      }

      // name this output file same as the name of the first file that is
      // there in the current list of inmem files (this is guaranteed to
      // be absent on the disk currently. So we don't overwrite a prev.
      // created spill). Also we need to create the output file now since
      // it is not guaranteed that this file will be present after merge
      // is called (we delete empty files as soon as we see them
      // in the merge method)

      // figure out the mapId
      TaskAttemptID mapId = inputs.get(0).getMapId();
      TaskID mapTaskId = mapId.getTaskID();

      List<Segment<K, V>> inMemorySegments = new ArrayList<Segment<K, V>>();
      long mergeOutputSize = createInMemorySegments(inputs, inMemorySegments, 0);
      int noInMemorySegments = inMemorySegments.size();

      Path outputPath =
          mapOutputFile
              .getInputFileForWrite(mapTaskId, mergeOutputSize)
              .suffix(Task.MERGED_OUTPUT_PREFIX);

      Writer<K, V> writer =
          new Writer<K, V>(
              jobConf,
              rfs,
              outputPath,
              (Class<K>) jobConf.getMapOutputKeyClass(),
              (Class<V>) jobConf.getMapOutputValueClass(),
              codec,
              null);

      RawKeyValueIterator rIter = null;
      CompressAwarePath compressAwarePath;
      try {
        LOG.info("Initiating in-memory merge with " + noInMemorySegments + " segments...");

        rIter =
            Merger.merge(
                jobConf,
                rfs,
                (Class<K>) jobConf.getMapOutputKeyClass(),
                (Class<V>) jobConf.getMapOutputValueClass(),
                inMemorySegments,
                inMemorySegments.size(),
                new Path(reduceId.toString()),
                (RawComparator<K>) jobConf.getOutputKeyComparator(),
                reporter,
                spilledRecordsCounter,
                null,
                null);

        if (null == combinerClass) {
          Merger.writeFile(rIter, writer, reporter, jobConf);
        } else {
          combineCollector.setWriter(writer);
          combineAndSpill(rIter, reduceCombineInputCounter);
        }
        writer.close();
        compressAwarePath =
            new CompressAwarePath(outputPath, writer.getRawLength(), writer.getCompressedLength());

        LOG.info(
            reduceId
                + " Merge of the "
                + noInMemorySegments
                + " files in-memory complete."
                + " Local file is "
                + outputPath
                + " of size "
                + localFS.getFileStatus(outputPath).getLen());
      } catch (IOException e) {
        // make sure that we delete the ondisk file that we created
        // earlier when we invoked cloneFileAttributes
        localFS.delete(outputPath, true);
        throw e;
      }

      // Note the output of the merge
      closeOnDiskFile(compressAwarePath);
    }