@Override
  public void insertAll(Iterator<Product2<K, V>> records) throws IOException {
    assert (partitionWriters == null);
    if (!records.hasNext()) {
      return;
    }
    final SerializerInstance serInstance = serializer.newInstance();
    final long openStartTime = System.nanoTime();
    partitionWriters = new DiskBlockObjectWriter[numPartitions];
    for (int i = 0; i < numPartitions; i++) {
      final Tuple2<TempShuffleBlockId, File> tempShuffleBlockIdPlusFile =
          blockManager.diskBlockManager().createTempShuffleBlock();
      final File file = tempShuffleBlockIdPlusFile._2();
      final BlockId blockId = tempShuffleBlockIdPlusFile._1();
      partitionWriters[i] =
          blockManager
              .getDiskWriter(blockId, file, serInstance, fileBufferSize, writeMetrics)
              .open();
    }
    // Creating the file to write to and creating a disk writer both involve interacting with
    // the disk, and can take a long time in aggregate when we open many files, so should be
    // included in the shuffle write time.
    writeMetrics.incShuffleWriteTime(System.nanoTime() - openStartTime);

    while (records.hasNext()) {
      final Product2<K, V> record = records.next();
      final K key = record._1();
      partitionWriters[partitioner.getPartition(key)].write(key, record._2());
    }

    for (DiskBlockObjectWriter writer : partitionWriters) {
      writer.commitAndClose();
    }
  }
 @Override
 public void stop() throws IOException {
   if (partitionWriters != null) {
     try {
       for (DiskBlockObjectWriter writer : partitionWriters) {
         // This method explicitly does _not_ throw exceptions:
         File file = writer.revertPartialWritesAndClose();
         if (!file.delete()) {
           logger.error("Error while deleting file {}", file.getAbsolutePath());
         }
       }
     } finally {
       partitionWriters = null;
     }
   }
 }