protected void _handleFirstLine() throws IOException {
   _handleFirstLine = false;
   if (_schema.usesHeader()) {
     int count = _schema.size();
     if (count == 0) {
       _reportMappingError(
           "Schema specified that header line is to be written; but contains no column names");
     }
     for (CsvSchema.Column column : _schema) {
       _writer.writeColumnName(column.getName());
     }
     _writer.endRow();
   }
 }
  @Override
  public final void writeStartArray() throws IOException {
    _verifyValueWrite("start an array");
    /* Ok to create root-level array to contain Objects/Arrays, but
     * can not nest arrays in objects
     */
    if (_writeContext.inObject()) {
      if (!_skipValue) {
        // First: column may have its own separator
        int sep;

        if (_nextColumnByName >= 0) {
          CsvSchema.Column col = _schema.column(_nextColumnByName);
          sep = col.isArray() ? col.getArrayElementSeparator() : -1;
        } else {
          sep = -1;
        }
        if (sep <= 0) {
          if (!_schema.hasArrayElementSeparator()) {
            _reportMappingError(
                "CSV generator does not support Array values for properties without setting 'arrayElementSeparator' in schema");
          }
          sep = _schema.getArrayElementSeparator();
        }
        _arraySeparator = sep;
        if (_arrayContents == null) {
          _arrayContents = new StringBuilder();
        } else {
          _arrayContents.setLength(0);
        }
        _arrayElements = 0;
      }
    } else if (_arraySeparator >= 0) {
      // also: no nested arrays, yet
      _reportMappingError("CSV generator does not support nested Array values");
    }

    _writeContext = _writeContext.createChildArrayContext();
    // and that's about it, really
  }
 private final void _writeFieldName(String name) throws IOException {
   // just find the matching index -- must have schema for that
   if (_schema == null) {
     // not a low-level error, so:
     _reportMappingError("Unrecognized column '" + name + "', can not resolve without CsvSchema");
   }
   // note: we are likely to get next column name, so pass it as hint
   CsvSchema.Column col = _schema.column(name, _nextColumnByName + 1);
   if (col == null) {
     if (isEnabled(JsonGenerator.Feature.IGNORE_UNKNOWN)) {
       _skipValue = true;
       _nextColumnByName = -1;
       return;
     }
     // not a low-level error, so:
     _reportMappingError(
         "Unrecognized column '" + name + "': known columns: " + _schema.getColumnDesc());
   }
   _skipValue = false;
   // and all we do is just note index to use for following value write
   _nextColumnByName = col.getIndex();
 }
 public <T> List<T> loadObjectList(final Class<T> type, final String fileName) {
   try {
     final CsvSchema bootstrapSchema = CsvSchema.emptySchema().withHeader();
     final CsvMapper mapper = new CsvMapper();
     final File file = new ClassPathResource(fileName).getFile();
     final MappingIterator<T> readValues =
         mapper.reader(type).with(bootstrapSchema).readValues(file);
     return readValues.readAll();
   } catch (final Exception e) {
     logger.error("Error ocurred while loading object list from file " + fileName, e);
     return Collections.emptyList();
   }
 }
  private void _testMapsWithLinefeeds(boolean useBytes) throws Exception {
    String CSV =
        "A,B,C\n"
            + "data11,data12\n"
            + "data21,data22,data23\r\n"
            + "data31,\"data32 data32\ndata32 data32\",data33\n"
            + "data41,\"data42 data42\r\ndata42\",data43\n";

    CsvSchema cs = CsvSchema.emptySchema().withHeader();
    ObjectReader or = MAPPER.readerFor(HashMap.class).with(cs);

    MappingIterator<Map<String, String>> mi;

    if (useBytes) {
      mi = or.readValues(CSV.getBytes("UTF-8"));
    } else {
      mi = or.readValues(CSV);
    }

    assertTrue(mi.hasNext());
    Map<String, String> map = mi.nextValue();
    assertNotNull(map);
    assertEquals("data11", map.get("A"));
    assertEquals("data12", map.get("B"));
    assertEquals(2, map.size());

    assertTrue(mi.hasNext());
    map = mi.nextValue();
    assertNotNull(map);
    assertEquals(3, map.size());

    // then entries with linefeeds
    assertTrue(mi.hasNext());
    map = mi.nextValue();
    assertNotNull(map);
    assertEquals(3, map.size());
    assertEquals("data31", map.get("A"));
    assertEquals("data32 data32\ndata32 data32", map.get("B"));
    assertEquals("data33", map.get("C"));

    assertTrue(mi.hasNext());
    map = mi.nextValue();
    assertNotNull(map);
    assertEquals(3, map.size());
    assertEquals("data41", map.get("A"));
    assertEquals("data42 data42\r\ndata42", map.get("B"));
    assertEquals("data43", map.get("C"));

    assertFalse(mi.hasNext());
    mi.close();
  }
 public List<String[]> loadManyToManyRelationship(final String fileName) {
   try {
     final CsvMapper mapper = new CsvMapper();
     final CsvSchema bootstrapSchema = CsvSchema.emptySchema().withSkipFirstDataRow(true);
     mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
     final File file = new ClassPathResource(fileName).getFile();
     final MappingIterator<String[]> readValues =
         mapper.reader(String[].class).with(bootstrapSchema).readValues(file);
     return readValues.readAll();
   } catch (final Exception e) {
     logger.error(
         "Error ocurred while loading many to many relationship from file " + fileName, e);
     return Collections.emptyList();
   }
 }
  // [Issue#41]
  public void testIncorrectDups41() throws Exception {
    final String INPUT = "\"foo\",\"bar\",\"foo\"";
    CsvSchema schema =
        CsvSchema.builder().addColumn("Col1").addColumn("Col2").addColumn("Col3").build();

    MappingIterator<Object> iter = MAPPER.readerFor(Object.class).with(schema).readValues(INPUT);

    Map<?, ?> m = (Map<?, ?>) iter.next();
    assertFalse(iter.hasNextValue());
    iter.close();

    if (m.size() != 3) {
      fail("Should have 3 entries, but got: " + m);
    }
    assertEquals("foo", m.get("Col1"));
    assertEquals("bar", m.get("Col2"));
    assertEquals("foo", m.get("Col3"));
  }
 @Override
 public void writeOmittedField(String fieldName) throws IOException {
   // basically combination of "writeFieldName()" and "writeNull()"
   if (_writeContext.writeFieldName(fieldName) == JsonWriteContext.STATUS_EXPECT_VALUE) {
     _reportError("Can not skip a field, expecting a value");
   }
   // Hmmh. Should we require a match? Actually, let's use logic: if field found,
   // assumption is we must add a placeholder; if not, we can merely ignore
   CsvSchema.Column col = _schema.column(fieldName);
   if (col == null) {
     // assumed to have been removed from schema too
   } else {
     // and all we do is just note index to use for following value write
     _nextColumnByName = col.getIndex();
     // We can basically copy what 'writeNull()' does...
     _verifyValueWrite("skip positional value due to filtering");
     _writer.write(_columnIndex(), "");
   }
 }
  private <T> int readAll(File inputFile, Class<T> cls) throws IOException {
    System.out.print("Reading input as " + cls.getName() + " instances: ");

    int count = 0;
    CsvMapper mapper = new CsvMapper();
    CsvSchema schema = CsvSchema.builder().setUseHeader(true).build();

    MappingIterator<T> it = mapper.reader(cls).with(schema).readValues(inputFile);
    while (it.hasNext()) {
      @SuppressWarnings("unused")
      T row = it.nextValue();
      ++count;
      if ((count & 0x3FFF) == 0) {
        System.out.print('.');
      }
    }
    System.out.println();
    it.close();
    return count;
  }
  @Override
  public void writeNull() throws IOException {
    _verifyValueWrite("write null value");

    if (!_skipValue) {
      if (_arraySeparator >= 0) {
        _addToArray(_schema.getNullValueOrEmpty());
      } else if (!_writeContext.inObject()) { // as per [#69]
        // note: 'root' not enough, for case of wrap-as array, or serialize List

        // or, to write 'empty Object' (for common case), would
        // write single null, then finish row, like so:
        /*
                        _writer.writeNull(_columnIndex());
                        finishRow();
        */
      } else {
        _writer.writeNull(_columnIndex());
      }
    }
  }
  // for pull request 89
  public void testColumnReordering() throws IOException {
    CsvFactory factory = new CsvFactory();
    String CSV = "b,a,c\nvb,va,vc\n";

    /* Test first column reordering, by setting the
      columns in a different order to the ones
      found in the CSV example
    */
    CsvSchema schemaWithReordering =
        CsvSchema.builder()
            .addColumn("a")
            .addColumn("b")
            .addColumn("c")
            .setLineSeparator('\n')
            .setUseHeader(true) // must be set for column reordering
            .setReorderColumns(true) // set column reordering
            .build();

    // Create a parser and ensure data is processed in the
    // right order, as per header
    CsvParser parser = factory.createParser(CSV);
    parser.setSchema(schemaWithReordering);
    assertEquals(JsonToken.START_OBJECT, parser.nextToken());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("b", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vb", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("a", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("va", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("c", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vc", parser.getValueAsString());
    assertEquals(JsonToken.END_OBJECT, parser.nextToken());

    /*
       Now make a copy of the schema but set the reordering
       flag to false.  In this case the columns values are
       reported as per the schema order, not the header.
    */
    CsvSchema schemaWithoutReordering = schemaWithReordering.withColumnReordering(false);
    parser = factory.createParser(CSV);
    parser.setSchema(schemaWithoutReordering);
    assertEquals(JsonToken.START_OBJECT, parser.nextToken());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("a", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vb", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("b", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("va", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("c", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vc", parser.getValueAsString());
    assertEquals(JsonToken.END_OBJECT, parser.nextToken());

    /*
       From the schema with reordering, disabling use header flag
       causes the same effect as the previous test.
    */
    CsvSchema schemaWithoutHeader =
        schemaWithReordering.withUseHeader(false).withSkipFirstDataRow(true);

    parser = factory.createParser(CSV);
    parser.setSchema(schemaWithoutHeader);
    assertEquals(JsonToken.START_OBJECT, parser.nextToken());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("a", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vb", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("b", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("va", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("c", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vc", parser.getValueAsString());
    assertEquals(JsonToken.END_OBJECT, parser.nextToken());

    /*
        Finally, test an empty schema, where the header is use to set
        the columns, independently of the reordering flag.
    */
    CsvSchema emptySchema = CsvSchema.builder().setLineSeparator('\n').setUseHeader(true).build();

    parser = factory.createParser(CSV);
    parser.setSchema(emptySchema);
    assertEquals(JsonToken.START_OBJECT, parser.nextToken());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("b", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vb", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("a", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("va", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("c", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vc", parser.getValueAsString());
    assertEquals(JsonToken.END_OBJECT, parser.nextToken());
  }
public class BasicParserTest extends ModuleTestBase {
  @JsonPropertyOrder({"x", "y", "z"})
  public static class Point {
    public int x;
    public Integer y;
    public Integer z = 8;
  }

  static final CsvSchema SIMPLE_SCHEMA =
      CsvSchema.builder()
          .addColumn("firstName")
          .addColumn("lastName")
          .addColumn("gender")
          .addColumn("userImage")
          .addColumn("verified")
          .build();

  /*
  /**********************************************************
  /* Test methods
  /**********************************************************
   */

  final CsvMapper MAPPER = mapperForCsv();

  public void testSimpleExplicit() throws Exception {
    ObjectReader r = MAPPER.reader(SIMPLE_SCHEMA);
    _testSimpleExplicit(r, false);
    _testSimpleExplicit(r, true);
  }

  private void _testSimpleExplicit(ObjectReader r, boolean useBytes) throws Exception {
    r = r.forType(FiveMinuteUser.class);
    FiveMinuteUser user;
    final String INPUT = "Bob,Robertson,MALE,AQIDBAU=,false\n";
    if (useBytes) {
      user = r.readValue(INPUT);
    } else {
      user = r.readValue(INPUT.getBytes("UTF-8"));
    }
    assertEquals("Bob", user.firstName);
    assertEquals("Robertson", user.lastName);
    assertEquals(Gender.MALE, user.getGender());
    assertFalse(user.isVerified());
    assertArrayEquals(new byte[] {1, 2, 3, 4, 5}, user.getUserImage());
  }

  public void testSimpleExplicitWithBOM() throws Exception {
    ObjectReader r = MAPPER.reader(SIMPLE_SCHEMA);
    r = r.forType(FiveMinuteUser.class);
    FiveMinuteUser user;

    ByteArrayOutputStream b = new ByteArrayOutputStream();

    // first, UTF-8 BOM:
    b.write(new byte[] {(byte) 0xEF, (byte) 0xBB, (byte) 0xBF});
    b.write("Bob,Robertson,MALE,AQIDBAU=,false\n".getBytes("UTF-8"));
    b.close();

    user = r.readValue(b.toByteArray());
    String fn = user.firstName;

    if (!fn.equals("Bob")) {
      fail("Expected 'Bob' (3), got '" + fn + "' (" + fn.length() + ")");
    }
    assertEquals("Robertson", user.lastName);
    assertEquals(Gender.MALE, user.getGender());
    assertFalse(user.isVerified());
    assertArrayEquals(new byte[] {1, 2, 3, 4, 5}, user.getUserImage());
  }

  public void testSimpleWithAutoSchema() throws Exception {
    CsvSchema schema = MAPPER.schemaFor(FiveMinuteUser.class);
    // NOTE: order different from above test (as per POJO def!)
    FiveMinuteUser user =
        MAPPER
            .reader(schema)
            .forType(FiveMinuteUser.class)
            .readValue("Joe,Josephson,MALE,true,AwE=\n");
    assertEquals("Joe", user.firstName);
    assertEquals("Josephson", user.lastName);
    assertEquals(Gender.MALE, user.getGender());
    assertTrue(user.isVerified());
    assertArrayEquals(new byte[] {3, 1}, user.getUserImage());
  }

  /** Test to verify that we can mix "untyped" access as Maps with schema information... */
  public void testSimpleAsMaps() throws Exception {
    CsvSchema schema = MAPPER.schemaFor(FiveMinuteUser.class);
    MappingIterator<Map<?, ?>> it =
        MAPPER.reader(schema).forType(Map.class).readValues("Joe,Smith,MALE,false,");
    assertTrue(it.hasNext());
    Map<?, ?> result = it.nextValue();
    assertEquals(5, result.size());
    assertEquals("Joe", result.get("firstName"));
    assertEquals("Smith", result.get("lastName"));
    assertEquals("MALE", result.get("gender"));
    assertEquals("false", result.get("verified"));
    assertEquals("", result.get("userImage"));

    assertFalse(it.hasNextValue());
    it.close();
  }

  // Test for [Issue#10]
  public void testMapsWithLinefeeds() throws Exception {
    _testMapsWithLinefeeds(false);
    _testMapsWithLinefeeds(true);
  }

  private void _testMapsWithLinefeeds(boolean useBytes) throws Exception {
    String CSV =
        "A,B,C\n"
            + "data11,data12\n"
            + "data21,data22,data23\r\n"
            + "data31,\"data32 data32\ndata32 data32\",data33\n"
            + "data41,\"data42 data42\r\ndata42\",data43\n";

    CsvSchema cs = CsvSchema.emptySchema().withHeader();
    ObjectReader or = MAPPER.readerFor(HashMap.class).with(cs);

    MappingIterator<Map<String, String>> mi;

    if (useBytes) {
      mi = or.readValues(CSV.getBytes("UTF-8"));
    } else {
      mi = or.readValues(CSV);
    }

    assertTrue(mi.hasNext());
    Map<String, String> map = mi.nextValue();
    assertNotNull(map);
    assertEquals("data11", map.get("A"));
    assertEquals("data12", map.get("B"));
    assertEquals(2, map.size());

    assertTrue(mi.hasNext());
    map = mi.nextValue();
    assertNotNull(map);
    assertEquals(3, map.size());

    // then entries with linefeeds
    assertTrue(mi.hasNext());
    map = mi.nextValue();
    assertNotNull(map);
    assertEquals(3, map.size());
    assertEquals("data31", map.get("A"));
    assertEquals("data32 data32\ndata32 data32", map.get("B"));
    assertEquals("data33", map.get("C"));

    assertTrue(mi.hasNext());
    map = mi.nextValue();
    assertNotNull(map);
    assertEquals(3, map.size());
    assertEquals("data41", map.get("A"));
    assertEquals("data42 data42\r\ndata42", map.get("B"));
    assertEquals("data43", map.get("C"));

    assertFalse(mi.hasNext());
    mi.close();
  }

  // [Issue#12]
  public void testEmptyHandlingForInteger() throws Exception {
    CsvSchema schema = MAPPER.typedSchemaFor(Point.class).withoutHeader();

    // First: empty value, to be considered as null
    Point result = MAPPER.readerFor(Point.class).with(schema).readValue(",,\n");
    assertEquals(0, result.x);
    assertNull(result.y);
    assertNull(result.z);
  }

  public void testStringNullHandlingForInteger() throws Exception {
    CsvSchema schema = MAPPER.typedSchemaFor(Point.class).withoutHeader();

    // First: empty value, to be considered as null
    Point result = MAPPER.readerFor(Point.class).with(schema).readValue("null,null,null\n");
    assertEquals(0, result.x);
    assertNull(result.y);
    assertNull(result.z);
  }

  // [Issue#41]
  public void testIncorrectDups41() throws Exception {
    final String INPUT = "\"foo\",\"bar\",\"foo\"";
    CsvSchema schema =
        CsvSchema.builder().addColumn("Col1").addColumn("Col2").addColumn("Col3").build();

    MappingIterator<Object> iter = MAPPER.readerFor(Object.class).with(schema).readValues(INPUT);

    Map<?, ?> m = (Map<?, ?>) iter.next();
    assertFalse(iter.hasNextValue());
    iter.close();

    if (m.size() != 3) {
      fail("Should have 3 entries, but got: " + m);
    }
    assertEquals("foo", m.get("Col1"));
    assertEquals("bar", m.get("Col2"));
    assertEquals("foo", m.get("Col3"));
  }

  // for pull request 89
  public void testColumnReordering() throws IOException {
    CsvFactory factory = new CsvFactory();
    String CSV = "b,a,c\nvb,va,vc\n";

    /* Test first column reordering, by setting the
      columns in a different order to the ones
      found in the CSV example
    */
    CsvSchema schemaWithReordering =
        CsvSchema.builder()
            .addColumn("a")
            .addColumn("b")
            .addColumn("c")
            .setLineSeparator('\n')
            .setUseHeader(true) // must be set for column reordering
            .setReorderColumns(true) // set column reordering
            .build();

    // Create a parser and ensure data is processed in the
    // right order, as per header
    CsvParser parser = factory.createParser(CSV);
    parser.setSchema(schemaWithReordering);
    assertEquals(JsonToken.START_OBJECT, parser.nextToken());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("b", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vb", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("a", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("va", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("c", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vc", parser.getValueAsString());
    assertEquals(JsonToken.END_OBJECT, parser.nextToken());

    /*
       Now make a copy of the schema but set the reordering
       flag to false.  In this case the columns values are
       reported as per the schema order, not the header.
    */
    CsvSchema schemaWithoutReordering = schemaWithReordering.withColumnReordering(false);
    parser = factory.createParser(CSV);
    parser.setSchema(schemaWithoutReordering);
    assertEquals(JsonToken.START_OBJECT, parser.nextToken());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("a", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vb", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("b", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("va", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("c", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vc", parser.getValueAsString());
    assertEquals(JsonToken.END_OBJECT, parser.nextToken());

    /*
       From the schema with reordering, disabling use header flag
       causes the same effect as the previous test.
    */
    CsvSchema schemaWithoutHeader =
        schemaWithReordering.withUseHeader(false).withSkipFirstDataRow(true);

    parser = factory.createParser(CSV);
    parser.setSchema(schemaWithoutHeader);
    assertEquals(JsonToken.START_OBJECT, parser.nextToken());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("a", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vb", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("b", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("va", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("c", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vc", parser.getValueAsString());
    assertEquals(JsonToken.END_OBJECT, parser.nextToken());

    /*
        Finally, test an empty schema, where the header is use to set
        the columns, independently of the reordering flag.
    */
    CsvSchema emptySchema = CsvSchema.builder().setLineSeparator('\n').setUseHeader(true).build();

    parser = factory.createParser(CSV);
    parser.setSchema(emptySchema);
    assertEquals(JsonToken.START_OBJECT, parser.nextToken());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("b", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vb", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("a", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("va", parser.getValueAsString());
    assertEquals(JsonToken.FIELD_NAME, parser.nextToken());
    assertEquals("c", parser.getCurrentName());
    assertEquals(JsonToken.VALUE_STRING, parser.nextToken());
    assertEquals("vc", parser.getValueAsString());
    assertEquals(JsonToken.END_OBJECT, parser.nextToken());
  }
}
 static {
   EMPTY_SCHEMA = CsvSchema.emptySchema();
 }