@Override
 public void writeDictionaryPage(DictionaryPage dictionaryPage) throws IOException {
   if (this.dictionaryPage != null) {
     throw new ParquetEncodingException("Only one dictionary page is allowed");
   }
   BytesInput dictionaryBytes = dictionaryPage.getBytes();
   int uncompressedSize = (int) dictionaryBytes.size();
   BytesInput compressedBytes = compressor.compress(dictionaryBytes);
   this.dictionaryPage =
       new DictionaryPage(
           BytesInput.copy(compressedBytes),
           uncompressedSize,
           dictionaryPage.getDictionarySize(),
           dictionaryPage.getEncoding());
 }
 @Override
 public void writePage(
     BytesInput bytes,
     int valueCount,
     Statistics statistics,
     Encoding rlEncoding,
     Encoding dlEncoding,
     Encoding valuesEncoding)
     throws IOException {
   long uncompressedSize = bytes.size();
   if (uncompressedSize > Integer.MAX_VALUE) {
     throw new ParquetEncodingException(
         "Cannot write page larger than Integer.MAX_VALUE bytes: " + uncompressedSize);
   }
   BytesInput compressedBytes = compressor.compress(bytes);
   long compressedSize = compressedBytes.size();
   if (compressedSize > Integer.MAX_VALUE) {
     throw new ParquetEncodingException(
         "Cannot write compressed page larger than Integer.MAX_VALUE bytes: " + compressedSize);
   }
   tempOutputStream.reset();
   parquetMetadataConverter.writeDataPageHeader(
       (int) uncompressedSize,
       (int) compressedSize,
       valueCount,
       statistics,
       rlEncoding,
       dlEncoding,
       valuesEncoding,
       tempOutputStream);
   this.uncompressedLength += uncompressedSize;
   this.compressedLength += compressedSize;
   this.totalValueCount += valueCount;
   this.pageCount += 1;
   this.totalStatistics.mergeStatistics(statistics);
   // by concatenating before collecting instead of collecting twice,
   // we only allocate one buffer to copy into instead of multiple.
   buf.collect(BytesInput.concat(BytesInput.from(tempOutputStream), compressedBytes));
   rlEncodings.add(rlEncoding);
   dlEncodings.add(dlEncoding);
   dataEncodings.add(valuesEncoding);
 }
    @Override
    public void writePageV2(
        int rowCount,
        int nullCount,
        int valueCount,
        BytesInput repetitionLevels,
        BytesInput definitionLevels,
        Encoding dataEncoding,
        BytesInput data,
        Statistics<?> statistics)
        throws IOException {
      int rlByteLength = toIntWithCheck(repetitionLevels.size());
      int dlByteLength = toIntWithCheck(definitionLevels.size());
      int uncompressedSize =
          toIntWithCheck(data.size() + repetitionLevels.size() + definitionLevels.size());
      // TODO: decide if we compress
      BytesInput compressedData = compressor.compress(data);
      int compressedSize =
          toIntWithCheck(compressedData.size() + repetitionLevels.size() + definitionLevels.size());
      tempOutputStream.reset();
      parquetMetadataConverter.writeDataPageV2Header(
          uncompressedSize,
          compressedSize,
          valueCount,
          nullCount,
          rowCount,
          statistics,
          dataEncoding,
          rlByteLength,
          dlByteLength,
          tempOutputStream);
      this.uncompressedLength += uncompressedSize;
      this.compressedLength += compressedSize;
      this.totalValueCount += valueCount;
      this.pageCount += 1;
      this.totalStatistics.mergeStatistics(statistics);

      // by concatenating before collecting instead of collecting twice,
      // we only allocate one buffer to copy into instead of multiple.
      buf.collect(
          BytesInput.concat(
              BytesInput.from(tempOutputStream),
              repetitionLevels,
              definitionLevels,
              compressedData));
      dataEncodings.add(dataEncoding);
    }