public int writeValues(byte[] output, Object[] values, int srcOffset) { int offset = srcOffset; // check if all values can be delta-ed or not boolean isDelta; if (deltaEnabled && rowDelta) { isDelta = isRowDelta(values); BitCodec.write(output, isDelta ? 1 : 0, offset++, 1); } for (int i = 0; i < writers.length; i++) { Writer writer = writers[i]; Object value = values[i]; if (deltaEnabled && !rowDelta && !writer.fixed) { isDelta = writer.isDelta(value); BitCodec.write(output, isDelta ? 1 : 0, offset++, 1); } else { isDelta = false; } offset += isDelta ? writer.writeDelta(output, value, offset) : writer.writeRaw(output, value, offset); } return offset - srcOffset; }
@Override public void run(String... arg0) throws Exception { dictionary.set("java", "object oriented"); /*dictionary.set("linux", "rulez"); dictionary.set("mac", "beutifull"); dictionary.set("solaris", "old but gold"); dictionary.set("red hat", "new and fit"); dictionary.set("debian", "just old");*/ Writer writer = new Writer(dictionary, "Mr. Writer"); Reader reader1 = new Reader(dictionary, "Mrs Reader 1"); Reader reader2 = new Reader(dictionary, "Mrs Reader 2"); Reader reader3 = new Reader(dictionary, "Mrs Reader 3"); Reader reader4 = new Reader(dictionary, "Mrs Reader 4"); Reader reader5 = new Reader(dictionary, "Mrs Reader 5"); log.info("Inciando thread de writer"); writer.setPriority(Thread.MAX_PRIORITY); writer.start(); log.info("Inciando thread de reader Mrs Reader 1"); reader1.start(); log.info("Inciando thread de reader Mrs Reader 2"); reader2.start(); log.info("Inciando thread de reader Mrs Reader 3"); reader3.start(); log.info("Inciando thread de reader Mrs Reader 4"); reader4.start(); log.info("Inciando thread de reader Mrs Reader 5"); reader5.start(); }
/** * Enables user to provide a custom configuration that can be used with this DrawStream. * * @param key Unique object used to identify the configuration. * @param prog Program to use in configuration. * @param vertWriter BoWriter to serialize data in configuration. */ public void createCustomConfig(Object key, Program prog, BoWriter<? super DrawVert> vertWriter) { Writer writer = new Writer(prog, vertWriter); Writer prev = mWriters.put(key, writer); if (prev != null) { prev.deref(); } }
public <T extends RootEntity> void write(T entity, Callback cb) { if (entity == null) return; ModelType type = ModelType.forModelClass(entity.getClass()); if (type == null || entity.getRefId() == null) { warn(cb, "no refId, or type is unknown", entity); return; } if (conf.hasVisited(type, entity.getId())) return; Writer<T> writer = getWriter(entity, conf); if (writer == null) { warn(cb, "no writer found for type " + type, entity); return; } try { conf.refFn = ref -> { write(ref, cb); }; JsonObject obj = writer.write(entity); conf.store.put(type, obj); if (writer.isExportExternalFiles()) writeExternalFiles(entity, type, cb); if (cb != null) cb.apply(Message.info("data set exported"), entity); } catch (Exception e) { e.printStackTrace(); if (cb != null) cb.apply(Message.error("failed to export data set", e), entity); } }
@Override public boolean process(TProtocol in, TProtocol out) throws TException { TMessage msg = in.readMessageBegin(); String name = msg.name; int idx = name.indexOf('.'); if (idx != -1) { name = name.substring(idx + 1); } TDynamicFunction f = descManager.getFunction(name); if (f == null) throw new TException("unknow function '" + name + "'"); Reader reader = new Reader(f.getManager()); Xdom param = reader.readStruct(f.getParamStruct(), in); in.readMessageEnd(); if (!f.isOneway()) { XdomMap rep; TDynamicField retf = f.getReturnStruct().getField("success"); if (retf.getTypeValue() != TType.VOID) { Xdom ret = handle(name, param); rep = new XdomMap(); rep.put("success", ret); } else { rep = new XdomMap(); } TMessage repmsg = new TMessage(name, TMessageType.REPLY, msg.seqid); out.writeMessageBegin(repmsg); Writer writer = new Writer(f.getManager()); writer.writeStruct(f.getReturnStruct(), rep, out); out.writeMessageEnd(); out.getTransport().flush(); } return true; }
public void write(Command cmd) { if (writer == null || writer.hasFinished()) { writer = new Writer(cmd); } else { writer.write(cmd); } }
/** * Disposes previously created configuration. * * @param key Unique object used to identify the configuration. */ public boolean disposeCustomConfig(Object key) { Writer prev = mWriters.remove(key); if (prev != null) { prev.deref(); return true; } return false; }
protected boolean isRowDelta(Object[] values) { for (int i = 0; i < writers.length; i++) { Writer writer = writers[i]; Object value = values[i]; if (!writer.fixed && !writer.isDelta(value)) { return false; } } return true; }
private void testReadWrite(Random rng, Writer wr) throws InterruptedException { ArrayBlockingQueue<byte[]> q = newQueue(); InputStream is = create(q); wr.init(q); wr.start(); Reader rd = new Reader(rng, is); rd.run(); wr.join(); Assert.assertNull(wr.exn); Assert.assertNull(rd.exn); System.out.printf("sums %x -> %x\n", wr.getSum(), rd.getSum()); Assert.assertEquals(wr.getSum(), rd.getSum()); }
/** * This is executed when the flusher is to write all of the data to the underlying socket. In this * situation the writes are attempted in a non blocking way, if the task does not complete then * this will simply enqueue the writing task for OP_WRITE and leave the method. This returns true * if all the buffers are written. */ private synchronized void execute() throws IOException { boolean ready = writer.flush(); if (!ready) { boolean block = writer.isBlocking(); if (!block && !closed) { scheduler.release(); } scheduler.repeat(); } else { scheduler.ready(); } }
/** * Tries to write the content of this leaf to a file named as this leaf in the directory at the * location <code>parentLocation</code>. * * @param parentLocation The location. Not <code>null</code>. * @throws IllegalStateException If <code>!canWrite()</code>. * @throws NullPointerException If <code>parentLocation==null</code>. * @throws FileLeafException If something goes wrong before or while writing. * @see String#isEmpty() */ public void tryWrite(final String parentLocation) throws FileLeafException { /* Validate arguments. */ if (parentLocation == null) throw new NullPointerException("parentLocation"); if (!canWrite()) throw new IllegalStateException("!canWrite()"); /* Compute location. */ final String location = parentLocation + File.separator + super.getName(); /* Write. */ final Writer writer = new Writer(new File(location)); if (content instanceof File) writer.tryWrite((File) content); else if (content instanceof String) writer.tryWrite((String) content); }
public static void main(String[] args) throws FileNotFoundException { if (args.length < 1) { System.err.println("error: missing parameter: input file"); System.err.println("usage: launch.sh input.in"); System.exit(1); } String inputFileName = args[0]; System.out.println("Output File Name: " + getOutputFileName(inputFileName)); Data d = Parser.parse(inputFileName); for (int i = 0; i < d.turns; i++) { d.currentTurn = i; System.out.println(" Turn #" + i); d.drones.forEach(dr -> dr.update(d)); // System.out.println(d); } Writer.writeInFile(getOutputFileName(inputFileName)); }
/** * Writes a portion of an array of characters. * * <p>Ordinarily this method stores characters from the given array into this stream's buffer, * flushing the buffer to the underlying stream as needed. If the requested length is at least as * large as the buffer, however, then this method will flush the buffer and write the characters * directly to the underlying stream. Thus redundant <code>BufferedWriter</code>s will not copy * data unnecessarily. * * @param cbuf A character array * @param off Offset from which to start reading characters * @param len Number of characters to write * @exception IOException If an I/O error occurs * @primaryAction methodName */ public void write(char cbuf[], int off, int len) throws IOException { synchronized (lock) { ensureOpen(); if ((off < 0) || (off > cbuf.length) || (len < 0) || ((off + len) > cbuf.length) || ((off + len) < 0)) { throw new IndexOutOfBoundsException(); } else if (len == 0) { return; } if (len >= nChars) { /* If the request length exceeds the size of the output buffer, flush the buffer and then write the data directly. In this way buffered streams will cascade harmlessly. */ flushBuffer(); out.write(cbuf, off, len); return; } int b = off, t = off + len; while (b < t) { int d = min(nChars - nextChar, t - b); System.arraycopy(cbuf, b, cb, nextChar, d); b += d; nextChar += d; if (nextChar >= nChars) flushBuffer(); } } }
private static SinkEnd createPair() { Writer wrt = new ContinuousWriter(1); // { // @Override // public void updateState() { // System.out.println("updating <"+getEnds().get(0).getName()+">"); // if (limit == 1) // System.out.println("<"+getEnds().get(0).getName()+">"); // super.updateState(); // } // }; // OwnerManager.register(wrt.getSink()); writers[counter] = wrt.getSink(); counter++; Reader rdr = new ContinuousReader(1) { @Override public void updateState() { // System.out.println("updating ["+getEnds().get(0).getName()+"]"); // System.out.println("["+getEnds().get(0).getName()+"/"+limit+"(before // update)]"); if (limit == 1) { // System.out.println("["+getEnds().get(0).getName()+"]"); PairwiseAsynchrounous.pending--; if (PairwiseAsynchrounous.pending == 0) { System.out.print((System.currentTimeMillis() - PairwiseAsynchrounous.start) + ", "); if (thread != null) PairwiseAsynchrounous.thread.interrupt(); // OwnerManager.kill(); } } super.updateState(); } @Override public String toString() { return "CReader" + super.toString(); } }; Replicator r1 = new Replicator(); wrt.getSink().connect(r1.getSource()); r1.getSink1().connect(rdr.getSource()); return r1.getSink2(); }
/** * This method tries to find an ancestor object of type NodeProvider * * @return the FieldProvider if found else an exception. */ public FieldProvider findFieldProvider(boolean ex) throws JspTagException { FieldProvider fp = findParentTag(FieldProvider.class, (String) parentFieldId.getValue(this), ex); if (fp instanceof Writer) { ((Writer) fp).haveBody(); } return fp; }
/** * Write a new file as a zip entry to the output writer. * * @param name File name. * @param consumer Output writer consumer, producing file contents. * @throws IOException If an I/O error occurs. */ void writeFile(String name, ThrowingConsumer<Writer> consumer) throws IOException { synchronized (os) { os.putNextEntry(new ZipEntry(name)); consumer.accept(writer); writer.flush(); os.closeEntry(); } }
public int setData(Writer w, int data_) { int tmp; try { System.out.println( w.getName() + " has entered with data = " + data + ". Setting data to " + data_); tmp = data; data = data_; try { w.sleep(500); } catch (InterruptedException ex) { Logger.getLogger(Datastore.class.getName()).log(Level.SEVERE, null, ex); } return tmp; } finally { System.out.println(w.getName() + " has exited with data = " + data + "."); } }
/** * Flushes the output buffer to the underlying character stream, without flushing the stream * itself. This method is non-private only so that it may be invoked by PrintStream. */ void flushBuffer() throws IOException { synchronized (lock) { ensureOpen(); if (nextChar == 0) return; out.write(cb, 0, nextChar); nextChar = 0; } }
@Override public void write(NullWritable nullWritable, OrcSerdeRow row) throws IOException { if (writer == null) { options.inspector(row.getInspector()); writer = OrcFile.createWriter(path, options); } writer.addRow(row.getRow()); }
public void _testPut() throws Exception { // put any stuff List<NodeData> nodes = prepare(); Set<Writer> writers = new HashSet<Writer>(); try { // create readers for (int t = 1; t <= 100; t++) { NodeData[] ns = new NodeData[nodes.size()]; nodes.toArray(ns); Writer r = new Writer(ns, "writer #" + t, 50); writers.add(r); r.start(); } Thread.sleep(5 * 60 * 1000); } finally { // join for (Writer w : writers) { w.cancel(); w.join(); } // debug result for (Writer w : writers) { log.info(w.getName() + " " + (w.itemsProcessed)); } } }
@Override public void write(Writable row) throws IOException { OrcSerdeRow serdeRow = (OrcSerdeRow) row; if (writer == null) { options.inspector(serdeRow.getInspector()); writer = OrcFile.createWriter(path, options); } writer.addRow(serdeRow.getRow()); }
public void disconnect() { try { reader.destroy(); writer.destroy(); socket.close(); reader.destroy(); } catch (Exception e) { } }
@Test public void testSaveGame() { initState(); Writer.saveGame(fileFolder + "saveGameWriterTest.ses"); State.resetAll(); Parser.loadGame(fileFolder + "saveGameWriterTest.ses"); assertState(); }
/** * Here in this method we schedule a flush when the underlying writer is write ready. This allows * the writer thread to return without having to fully flush the content to the underlying * transport. If there are references queued this will block. */ public synchronized void flush() throws IOException { if (closed) { throw new TransportException("Flusher is closed"); } boolean block = writer.isBlocking(); if (!closed) { scheduler.schedule(block); } }
/** * This is used to close the flusher ensuring that all of the data within the writer will be * flushed regardless of the amount of data within the writer that needs to be written. If the * writer does not block then this waits to be finished. */ public synchronized void close() throws IOException { boolean ready = writer.flush(); if (!closed) { closed = true; } if (!ready) { scheduler.schedule(true); } }
private void addEvent( int operation, long currentTransaction, long originalTransaction, long rowId, Object row) throws IOException { this.operation.set(operation); this.currentTransaction.set(currentTransaction); this.originalTransaction.set(originalTransaction); this.rowId.set(rowId); item.setFieldValue(OrcRecordUpdater.ROW, row); indexBuilder.addKey(operation, originalTransaction, bucket.get(), rowId); writer.addRow(item); }
/** * Call this method to make changes done through {@code putCharacter(...)}, {@code putString(...)} * visible on the terminal. The screen will calculate the changes that are required and send the * necessary characters and control sequences to make it so. */ public void refresh() { if (!hasBeenActivated) return; synchronized (mutex) { // If any resize operations are in the queue, execute them resizeScreenIfNeeded(); Map<TerminalPosition, ScreenCharacter> updateMap = new TreeMap<TerminalPosition, ScreenCharacter>(new ScreenPointComparator()); for (int y = 0; y < terminalSize.getRows(); y++) { for (int x = 0; x < terminalSize.getColumns(); x++) { ScreenCharacter c = backbuffer[y][x]; if (!c.equals(visibleScreen[y][x]) || wholeScreenInvalid) { visibleScreen[y][x] = c; // Remember, ScreenCharacter is immutable, we don't need to worry about it being // modified updateMap.put(new TerminalPosition(x, y), c); } } } Writer terminalWriter = new Writer(); terminalWriter.reset(); TerminalPosition previousPoint = null; for (TerminalPosition nextUpdate : updateMap.keySet()) { if (previousPoint == null || previousPoint.getRow() != nextUpdate.getRow() || previousPoint.getColumn() + 1 != nextUpdate.getColumn()) { terminalWriter.setCursorPosition(nextUpdate.getColumn(), nextUpdate.getRow()); } terminalWriter.writeCharacter(updateMap.get(nextUpdate)); previousPoint = nextUpdate; } terminalWriter.setCursorPosition( getCursorPosition().getColumn(), getCursorPosition().getRow()); wholeScreenInvalid = false; } terminal.flush(); }
private void writeValue(Object value) throws IOException { if (value == null) { generator.writeNull(); return; } Class<?> type = value.getClass(); Writer writer = MAP.get(type); if (writer != null) { writer.write(generator, value); } else if (value instanceof Map) { writeMap((Map) value); } else if (value instanceof Path) { // Path implements Iterable<Path> and causes endless recursion and a StackOverFlow if treated // as an Iterable here generator.writeString(value.toString()); } else if (value instanceof Iterable) { writeIterable((Iterable<?>) value); } else if (value instanceof Object[]) { writeObjectArray((Object[]) value); } else if (value instanceof Date) { generator.writeString(XContentBuilder.defaultDatePrinter.print(((Date) value).getTime())); } else if (value instanceof Calendar) { generator.writeString( XContentBuilder.defaultDatePrinter.print((((Calendar) value)).getTimeInMillis())); } else if (value instanceof ReadableInstant) { generator.writeString( XContentBuilder.defaultDatePrinter.print((((ReadableInstant) value)).getMillis())); } else if (value instanceof BytesReference) { writeBytesReference((BytesReference) value); } else if (value instanceof ToXContent) { ((ToXContent) value).toXContent(this, ToXContent.EMPTY_PARAMS); } else { // if this is a "value" object, like enum, DistanceUnit, ..., just toString it // yea, it can be misleading when toString a Java class, but really, jackson should be used in // that case generator.writeString(value.toString()); // throw new ElasticsearchIllegalArgumentException("type not supported for generic value // conversion: " + type); } }
public void testMultiThreaded() throws InterruptedException { int NUM_THREADS = 20; Writer[] w = new Writer[NUM_THREADS]; CountDownLatch startLatch = new CountDownLatch(1); for (int i = 0; i < NUM_THREADS; i++) w[i] = new Writer(i, startLatch); for (Writer writer : w) writer.start(); startLatch.countDown(); Thread.sleep(250); // now stop writers for (Writer writer : w) writer.running = false; for (Writer writer : w) writer.join(); // wait for the cache size to drop to CACHE_SIZE, up to a specified amount of time. long giveUpTime = System.currentTimeMillis() + (1000 * 10); // 10 sec while (cache.getAdvancedCache().getDataContainer().size() > 1 && System.currentTimeMillis() < giveUpTime) { // System.out.println("Cache size is " + cache.size() + " and time diff is " + (giveUpTime - // System.currentTimeMillis())); Thread.sleep(100); } assert cache.getAdvancedCache().getDataContainer().size() <= CACHE_SIZE : "Expected 1, was " + cache.size(); // this is what we expect the cache to be pruned to }
@Override public void flush() throws IOException { // We only support flushes on files with multiple transactions, because // flushes create significant overhead in HDFS. Record updaters with a // single transaction should be closed rather than flushed. if (flushLengths == null) { throw new IllegalStateException( "Attempting to flush a RecordUpdater on " + path + " with a single transaction."); } long len = writer.writeIntermediateFooter(); flushLengths.writeLong(len); OrcInputFormat.SHIMS.hflush(flushLengths); }