private void resetBatch() {
   for (final ColumnReader<?> column : columnStatuses) {
     column.valuesReadInCurrentPass = 0;
   }
   for (final VarLengthColumn<?> r : varLengthReader.columns) {
     r.valuesReadInCurrentPass = 0;
   }
 }
  @Override
  public void close() {
    logger.debug(
        "Read {} records out of row group({}) in file '{}'",
        totalRecordsRead,
        rowGroupIndex,
        hadoopPath.toUri().getPath());
    // enable this for debugging when it is know that a whole file will be read
    // limit kills upstream operators once it has enough records, so this assert will fail
    //    assert totalRecordsRead == footer.getBlocks().get(rowGroupIndex).getRowCount();
    if (columnStatuses != null) {
      for (final ColumnReader<?> column : columnStatuses) {
        column.clear();
      }
      columnStatuses.clear();
      columnStatuses = null;
    }

    codecFactory.release();

    if (varLengthReader != null) {
      for (final VarLengthColumn r : varLengthReader.columns) {
        r.clear();
      }
      varLengthReader.columns.clear();
      varLengthReader = null;
    }

    if (parquetReaderStats != null) {
      logger.trace(
          "ParquetTrace,Summary,{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}",
          hadoopPath,
          parquetReaderStats.numDictPageLoads,
          parquetReaderStats.numDataPageLoads,
          parquetReaderStats.numDataPagesDecoded,
          parquetReaderStats.numDictPagesDecompressed,
          parquetReaderStats.numDataPagesDecompressed,
          parquetReaderStats.totalDictPageReadBytes,
          parquetReaderStats.totalDataPageReadBytes,
          parquetReaderStats.totalDictDecompressedBytes,
          parquetReaderStats.totalDataDecompressedBytes,
          parquetReaderStats.timeDictPageLoads,
          parquetReaderStats.timeDataPageLoads,
          parquetReaderStats.timeDataPageDecode,
          parquetReaderStats.timeDictPageDecode,
          parquetReaderStats.timeDictPagesDecompressed,
          parquetReaderStats.timeDataPagesDecompressed,
          parquetReaderStats.timeDiskScanWait,
          parquetReaderStats.timeDiskScan);
      parquetReaderStats = null;
    }
  }