@Test public void writeAndReadConfiguration() { Plumber plumber = Plumber.createWithDefaultPath(); Pipeline origin = PipelineTest.getJDBCPipeline(); plumber.intoConfiguration(origin); Pipeline deserialized = plumber.fromConfiguration(origin.getName()); assertNotSame(deserialized, origin); assertThat(deserialized, is(origin)); }
@Test public void readConfigurationFromInputStream() throws FileNotFoundException { Plumber output = Plumber.createWithDefaultPath(); Pipeline origin = PipelineTest.getJDBCPipeline(); output.intoConfiguration(origin); Plumber input = Plumber.createWithoutPath(); Reader reader = new FileReader("./config/" + origin.getName() + ".xml"); Pipeline deserialized = input.fromInputStream(reader); assertNotSame(deserialized, origin); assertThat(deserialized, is(origin)); }
public static void addNextRow( final Supplier<Committer> committerSupplier, final Firehose firehose, final Plumber plumber, final boolean reportParseExceptions, final FireDepartmentMetrics metrics) { try { final InputRow inputRow = firehose.nextRow(); if (inputRow == null) { if (reportParseExceptions) { throw new ParseException("null input row"); } else { log.debug("Discarded null input row, considering unparseable."); metrics.incrementUnparseable(); return; } } // Included in ParseException try/catch, as additional parsing can be done during indexing. int numRows = plumber.add(inputRow, committerSupplier); if (numRows == -1) { metrics.incrementThrownAway(); log.debug("Discarded row[%s], considering thrownAway.", inputRow); return; } metrics.incrementProcessed(); } catch (ParseException e) { if (reportParseExceptions) { throw e; } else { log.debug(e, "Discarded row due to exception, considering unparseable."); metrics.incrementUnparseable(); } } catch (IndexSizeExceededException e) { // Shouldn't happen if this is only being called by a single thread. // plumber.add should be swapping out indexes before they fill up. throw new ISE(e, "WTF?! Index size exceeded, this shouldn't happen. Bad Plumber!"); } }