/** * Fill the current buffer with bytes from the specified array from the specified offset. * * @param s source array * @param o offset from the beginning of the array * @return number of written bytes */ private int write(final byte[] s, final int o) { final Buffer bf = bm.current(); final int len = Math.min(IO.BLOCKSIZE, s.length - o); System.arraycopy(s, o, bf.data, 0, len); bf.dirty = true; return len; }
@Override public void serialize(Buffer buf) { buf.writeString(name); buf.writeShort(worldX); buf.writeShort(worldY); buf.writeString(ownername); }
private static void _testMessage(Message msg) throws Exception { Buffer buf = Util.messageToByteBuffer(msg); Message msg2 = Util.byteBufferToMessage(buf.getBuf(), buf.getOffset(), buf.getLength()); Assert.assertEquals(msg.getSrc(), msg2.getSrc()); Assert.assertEquals(msg.getDest(), msg2.getDest()); Assert.assertEquals(msg.getLength(), msg2.getLength()); }
/** * @param cancel {@code True} to close with cancellation. * @throws GridException If failed. */ @Override public void close(boolean cancel) throws GridException { if (!closed.compareAndSet(false, true)) return; busyLock.block(); if (log.isDebugEnabled()) log.debug("Closing data loader [ldr=" + this + ", cancel=" + cancel + ']'); GridException e = null; try { // Assuming that no methods are called on this loader after this method is called. if (cancel) { cancelled = true; for (Buffer buf : bufMappings.values()) buf.cancelAll(); } else doFlush(); ctx.event().removeLocalEventListener(discoLsnr); ctx.io().removeMessageListener(topic); } catch (GridException e0) { e = e0; } fut.onDone(null, e); if (e != null) throw e; }
private boolean step() { if (next != NONE) return true; while (next == NONE) { if (buffer.isEmpty()) { if (completed) { return false; } else if (sourceIter.hasNext()) { Object iter = null; if (multi) iter = xf.applyTo(RT.cons(null, sourceIter.next())); else iter = xf.invoke(null, sourceIter.next()); if (RT.isReduced(iter)) { xf.invoke(null); completed = true; } } else { xf.invoke(null); completed = true; } } else { next = buffer.remove(); } } return true; }
@Override public void write1(final int pre, final int off, final int v) { final int o = off + cursor(pre); final Buffer bf = bm.current(); final byte[] b = bf.data; b[o] = (byte) v; bf.dirty = true; }
@Override protected void copy(final byte[] entries, final int pre, final int last) { for (int o = 0, i = pre; i < last; ++i, o += IO.NODESIZE) { final int off = cursor(i); final Buffer bf = bm.current(); System.arraycopy(entries, o, bf.data, off, IO.NODESIZE); bf.dirty = true; } }
@Override public void write4(final int pre, final int off, final int v) { final int o = off + cursor(pre); final Buffer bf = bm.current(); final byte[] b = bf.data; b[o] = (byte) (v >>> 24); b[o + 1] = (byte) (v >>> 16); b[o + 2] = (byte) (v >>> 8); b[o + 3] = (byte) v; bf.dirty = true; }
public void render(VisCanvas vc, VisLayer layer, VisCanvas.RenderInfo rinfo, GL gl) { synchronized (buffers) { for (Buffer b : buffers) { if (!b.isEnabled()) continue; synchronized (b) { for (VisObject vo : b.front) { if (vo != null) vo.render(vc, layer, rinfo, gl); } } } } }
/** * Flushes every internal buffer if buffer was flushed before passed in threshold. * * <p>Does not wait for result and does not fail on errors assuming that this method should be * called periodically. */ @Override public void tryFlush() throws GridInterruptedException { if (!busyLock.enterBusy()) return; try { for (Buffer buf : bufMappings.values()) buf.flush(); lastFlushTime = U.currentTimeMillis(); } finally { leaveBusy(); } }
public void captureImage() { String savepath = this.saveDirectory + "\\cam" + this.getDateFormatNow("yyyyMMdd_HHmmss-S") + ".jpg"; System.out.println("Capturing current image to " + savepath); // Grab a frame FrameGrabbingControl fgc = (FrameGrabbingControl) player.getControl("javax.media.control.FrameGrabbingControl"); buf = fgc.grabFrame(); // Convert it to an image btoi = new BufferToImage((VideoFormat) buf.getFormat()); img = btoi.createImage(buf); // save image saveJPG(img, savepath); // show the image // imgpanel.setImage(img); // images.add(img); images.add(savepath); if (images_lastadded.size() >= lastadded_max) { // Remove last images_lastadded.remove(images_lastadded.size() - 1); } images_lastadded.add(0, images.size() - 1); images_nevershown.add(0, images.size() - 1); forceNewImage(); }
@Override public void deserialize(Buffer buf) { name = buf.readString(); worldX = buf.readShort(); if (worldX < -255 || worldX > 255) throw new RuntimeException( "Forbidden value on worldX = " + worldX + ", it doesn't respect the following condition : worldX < -255 || worldX > 255"); worldY = buf.readShort(); if (worldY < -255 || worldY > 255) throw new RuntimeException( "Forbidden value on worldY = " + worldY + ", it doesn't respect the following condition : worldY < -255 || worldY > 255"); ownername = buf.readString(); }
/** * Reads a block from disk. * * @param b block to fetch */ private void readBlock(final int b) { if (!bm.cursor(b)) return; final Buffer bf = bm.current(); try { if (bf.dirty) writeBlock(bf); bf.pos = b; if (b >= blocks) { blocks = b + 1; } else { file.seek(bf.pos * IO.BLOCKSIZE); file.readFully(bf.data); } } catch (final IOException ex) { Util.stack(ex); } }
/** * Copies the content of the most recently received packet into <tt>data</tt>. * * @param buffer an optional <tt>Buffer</tt> instance associated with the specified <tt>data</tt>, * <tt>offset</tt> and <tt>length</tt> and provided to the method in case the implementation * would like to provide additional <tt>Buffer</tt> properties such as <tt>flags</tt> * @param data the <tt>byte[]</tt> that we'd like to copy the content of the packet to. * @param offset the position where we are supposed to start writing in <tt>data</tt>. * @param length the number of <tt>byte</tt>s available for writing in <tt>data</tt>. * @return the number of bytes read * @throws IOException if <tt>length</tt> is less than the size of the packet. */ protected int read(Buffer buffer, byte[] data, int offset, int length) throws IOException { if (data == null) throw new NullPointerException("data"); if (ioError) return -1; RawPacket pkt; synchronized (pktSyncRoot) { pkt = this.pkt; this.pkt = null; } int pktLength; if (pkt == null) { pktLength = 0; } else { // By default, pkt will be returned to the pool after it was read. boolean poolPkt = true; try { pktLength = pkt.getLength(); if (length < pktLength) { /* * If pkt is still the latest RawPacket made available to * reading, reinstate it for the next invocation of read; * otherwise, return it to the pool. */ poolPkt = false; throw new IOException("Input buffer not big enough for " + pktLength); } else { byte[] pktBuffer = pkt.getBuffer(); if (pktBuffer == null) { throw new NullPointerException( "pkt.buffer null, pkt.length " + pktLength + ", pkt.offset " + pkt.getOffset()); } else { System.arraycopy(pkt.getBuffer(), pkt.getOffset(), data, offset, pktLength); if (buffer != null) buffer.setFlags(pkt.getFlags()); } } } finally { if (!poolPkt) { synchronized (pktSyncRoot) { if (this.pkt == null) this.pkt = pkt; else poolPkt = true; } } if (poolPkt) { // Return pkt to the pool because it was successfully read. poolRawPacket(pkt); } } } return pktLength; }
@Override public void deserialize(Buffer buf) { houseId = buf.readInt(); if (houseId < 0) throw new RuntimeException( "Forbidden value on houseId = " + houseId + ", it doesn't respect the following condition : houseId < 0"); }
/** * Loads the specified buffer. The default implementation posts an I/O request to the I/O thread. * * @param view The view * @param buffer The buffer * @param path The path */ public boolean load(View view, Buffer buffer, String path) { if ((getCapabilities() & READ_CAP) == 0) { VFSManager.error(view, path, "vfs.not-supported.load", new String[] {name}); return false; } Object session = createVFSSession(path, view); if (session == null) return false; if ((getCapabilities() & WRITE_CAP) == 0) buffer.setReadOnly(true); BufferIORequest request = new BufferLoadRequest(view, buffer, session, this, path); if (buffer.isTemporary()) // this makes HyperSearch much faster request.run(); else VFSManager.runInWorkThread(request); return true; } // }}}
/** * Saves the specifies buffer. The default implementation posts an I/O request to the I/O thread. * * @param view The view * @param buffer The buffer * @param path The path */ public boolean save(View view, Buffer buffer, String path) { if ((getCapabilities() & WRITE_CAP) == 0) { VFSManager.error(view, path, "vfs.not-supported.save", new String[] {name}); return false; } Object session = createVFSSession(path, view); if (session == null) return false; /* When doing a 'save as', the path to save to (path) * will not be the same as the buffer's previous path * (buffer.getPath()). In that case, we want to create * a backup of the new path, even if the old path was * backed up as well (BACKED_UP property set) */ if (!path.equals(buffer.getPath())) buffer.unsetProperty(Buffer.BACKED_UP); VFSManager.runInWorkThread(new BufferSaveRequest(view, buffer, session, this, path)); return true; } // }}}
@Override public void deserialize(Buffer buf) { objectUID = buf.readInt(); if (objectUID < 0) throw new RuntimeException( "Forbidden value on objectUID = " + objectUID + ", it doesn't respect the following condition : objectUID < 0"); foodUID = buf.readInt(); if (foodUID < 0) throw new RuntimeException( "Forbidden value on foodUID = " + foodUID + ", it doesn't respect the following condition : foodUID < 0"); foodQuantity = buf.readShort(); if (foodQuantity < 0) throw new RuntimeException( "Forbidden value on foodQuantity = " + foodQuantity + ", it doesn't respect the following condition : foodQuantity < 0"); }
public VideoTrack(PullSourceStream stream) throws ResourceUnavailableException { super(); this.stream = stream; // set format // read first frame to determine format final Buffer buffer = new Buffer(); readFrame(buffer); if (buffer.isDiscard() || buffer.isEOM()) throw new ResourceUnavailableException("Unable to read first frame"); // TODO: catch runtime exception too? // parse jpeg final java.awt.Image image; try { image = ImageIO.read( new ByteArrayInputStream( (byte[]) buffer.getData(), buffer.getOffset(), buffer.getLength())); } catch (IOException e) { logger.log(Level.WARNING, "" + e, e); throw new ResourceUnavailableException("Error reading image: " + e); } if (image == null) { logger.log(Level.WARNING, "Failed to read image (ImageIO.read returned null)."); throw new ResourceUnavailableException(); } if (frameContentType.equals("image/jpeg")) format = new JPEGFormat( new Dimension(image.getWidth(null), image.getHeight(null)), Format.NOT_SPECIFIED, Format.byteArray, -1.f, Format.NOT_SPECIFIED, Format.NOT_SPECIFIED); else if (frameContentType.equals("image/gif")) format = new GIFFormat( new Dimension(image.getWidth(null), image.getHeight(null)), Format.NOT_SPECIFIED, Format.byteArray, -1.f); else if (frameContentType.equals("image/png")) format = new PNGFormat( new Dimension(image.getWidth(null), image.getHeight(null)), Format.NOT_SPECIFIED, Format.byteArray, -1.f); else throw new ResourceUnavailableException( "Unsupported frame content type: " + frameContentType); // TODO: this discards first image. save and return first time // readFrame is called. }
@Override public void serialize(Buffer buf) { short flag1 = 0; flag1 = BooleanByteWrapper.setFlag(flag1, 0, isOnSale); flag1 = BooleanByteWrapper.setFlag(flag1, 1, isSaleLocked); buf.writeUByte(flag1); buf.writeInt(houseId); buf.writeUShort(doorsOnMap.length); for (int entry : doorsOnMap) { buf.writeInt(entry); } buf.writeString(ownerName); buf.writeShort(modelId); }
@Override public void deserialize(Buffer buf) { short flag1 = buf.readUByte(); isOnSale = BooleanByteWrapper.getFlag(flag1, 0); isSaleLocked = BooleanByteWrapper.getFlag(flag1, 1); houseId = buf.readInt(); if (houseId < 0) throw new RuntimeException( "Forbidden value on houseId = " + houseId + ", it doesn't respect the following condition : houseId < 0"); int limit = buf.readUShort(); doorsOnMap = new int[limit]; for (int i = 0; i < limit; i++) { doorsOnMap[i] = buf.readInt(); } ownerName = buf.readString(); modelId = buf.readShort(); if (modelId < 0) throw new RuntimeException( "Forbidden value on modelId = " + modelId + ", it doesn't respect the following condition : modelId < 0"); }
@Override public void readFrame(Buffer buffer) { // example data: // --ssBoundary8345 // Content-Type: image/jpeg // Content-Length: 114587 try { String line; // eat leading blank lines while (true) { line = readLine(MAX_LINE_LENGTH); if (line == null) { buffer.setEOM(true); buffer.setLength(0); return; } if (!line.trim().equals("")) break; // end of header } if (boundary == null) { boundary = line.trim(); // TODO: we should be able to get // this from the content type, but // the content type has this // stripped out. So we'll just take // the first nonblank line to be the // boundary. // System.out.println("boundary: " + boundary); } else { if (!line.trim().equals(boundary)) { // throw new IOException("Expected boundary: " + // toPrintable(line)); // TODO: why do we seem to get these when playing back // mmr files recorded using FmjTranscode? logger.warning("Expected boundary (frame " + framesRead + "): " + toPrintable(line)); // handle streams that are truncated in the middle of a // frame: final int eatResult = eatUntil(boundary); // TODO: no // need to // store the // data logger.info( "Ignored bytes (eom after=" + (eatResult < 0) + "): " + (eatResult < 0 ? (-1 * eatResult - 1) : eatResult)); if (eatResult < 0) { buffer.setEOM(true); buffer.setLength(0); return; } // now read boundary line = readLine(MAX_LINE_LENGTH); if (!line.trim().equals(boundary)) { throw new RuntimeException("No boundary found after eatUntil(boundary)"); // should // never // happen } } } final Properties properties = new Properties(); while (true) { line = readLine(MAX_LINE_LENGTH); if (line == null) { buffer.setEOM(true); buffer.setLength(0); return; } if (line.trim().equals("")) break; // end of header if (!parseProperty(line, properties)) throw new IOException("Expected property: " + toPrintable(line)); } final String contentType = properties.getProperty("Content-Type".toUpperCase()); if (contentType == null) { logger.warning("Header properties: " + properties); throw new IOException("Expected Content-Type in header"); } // check supported content types: if (!isSupportedFrameContentType(contentType)) { throw new IOException("Unsupported Content-Type: " + contentType); } if (frameContentType == null) { frameContentType = contentType; } else { if (!contentType.equals(frameContentType)) throw new IOException( "Content type changed during stream from " + frameContentType + " to " + contentType); } // TODO: check that size doesn't change throughout final byte[] data; final String contentLenStr = properties.getProperty("Content-Length".toUpperCase()); if (contentLenStr != null) { // if we know the content length, use it final int contentLen; try { contentLen = Integer.parseInt(contentLenStr); } catch (NumberFormatException e) { throw new IOException("Invalid content length: " + contentLenStr); } // now, read the content-length bytes data = readFully(contentLen); // TODO: don't realloc each // time } else { // if we don't know the content length, just read until we // find the boundary. // Some IP cameras don't specify it, like // http://webcam-1.duesseldorf.it-on.net/cgi-bin/nph-update.cgi data = readUntil(boundary); } // ext final String timestampStr = properties.getProperty(TIMESTAMP_KEY.toUpperCase()); if (timestampStr != null) { try { final long timestamp = Long.parseLong(timestampStr); buffer.setTimeStamp(timestamp); } catch (NumberFormatException e) { logger.log(Level.WARNING, "" + e, e); } } if (data == null) { buffer.setEOM(true); buffer.setLength(0); return; } buffer.setData(data); buffer.setOffset(0); buffer.setLength(data.length); ++framesRead; } catch (IOException e) { throw new RuntimeException(e); } }
public void prep(Buffer buf) { buf.put(proj2d, this); }
/** {@inheritDoc} */ @Override protected int doProcess(Buffer inBuffer, Buffer outBuffer) { byte[] inData = (byte[]) inBuffer.getData(); int inOffset = inBuffer.getOffset(); if (!VP8PayloadDescriptor.isValid(inData, inOffset)) { logger.warn("Invalid RTP/VP8 packet discarded."); outBuffer.setDiscard(true); return BUFFER_PROCESSED_FAILED; // XXX: FAILED or OK? } long inSeq = inBuffer.getSequenceNumber(); long inRtpTimestamp = inBuffer.getRtpTimeStamp(); int inPictureId = VP8PayloadDescriptor.getPictureId(inData, inOffset); boolean inMarker = (inBuffer.getFlags() & Buffer.FLAG_RTP_MARKER) != 0; boolean inIsStartOfFrame = VP8PayloadDescriptor.isStartOfFrame(inData, inOffset); int inLength = inBuffer.getLength(); int inPdSize = VP8PayloadDescriptor.getSize(inData, inOffset); int inPayloadLength = inLength - inPdSize; if (empty && lastSentSeq != -1 && seqNumComparator.compare(inSeq, lastSentSeq) != 1) { if (logger.isInfoEnabled()) logger.info("Discarding old packet (while empty) " + inSeq); outBuffer.setDiscard(true); return BUFFER_PROCESSED_OK; } if (!empty) { // if the incoming packet has a different PictureID or timestamp // than those of the current frame, then it belongs to a different // frame. if ((inPictureId != -1 && pictureId != -1 && inPictureId != pictureId) | (timestamp != -1 && inRtpTimestamp != -1 && inRtpTimestamp != timestamp)) { if (seqNumComparator.compare(inSeq, firstSeq) != 1) // inSeq <= firstSeq { // the packet belongs to a previous frame. discard it if (logger.isInfoEnabled()) logger.info("Discarding old packet " + inSeq); outBuffer.setDiscard(true); return BUFFER_PROCESSED_OK; } else // inSeq > firstSeq (and also presumably isSeq > lastSeq) { // the packet belongs to a subsequent frame (to the one // currently being held). Drop the current frame. if (logger.isInfoEnabled()) logger.info( "Discarding saved packets on arrival of" + " a packet for a subsequent frame: " + inSeq); // TODO: this would be the place to complain about the // not-well-received PictureID by sending a RTCP SLI or NACK. reinit(); } } } // a whole frame in a single packet. avoid the extra copy to // this.data and output it immediately. if (empty && inMarker && inIsStartOfFrame) { byte[] outData = validateByteArraySize(outBuffer, inPayloadLength, false); System.arraycopy(inData, inOffset + inPdSize, outData, 0, inPayloadLength); outBuffer.setOffset(0); outBuffer.setLength(inPayloadLength); outBuffer.setRtpTimeStamp(inBuffer.getRtpTimeStamp()); if (TRACE) logger.trace("Out PictureID=" + inPictureId); lastSentSeq = inSeq; return BUFFER_PROCESSED_OK; } // add to this.data Container container = free.poll(); if (container == null) container = new Container(); if (container.buf == null || container.buf.length < inPayloadLength) container.buf = new byte[inPayloadLength]; if (data.get(inSeq) != null) { if (logger.isInfoEnabled()) logger.info("(Probable) duplicate packet detected, discarding " + inSeq); outBuffer.setDiscard(true); return BUFFER_PROCESSED_OK; } System.arraycopy(inData, inOffset + inPdSize, container.buf, 0, inPayloadLength); container.len = inPayloadLength; data.put(inSeq, container); // update fields frameLength += inPayloadLength; if (firstSeq == -1 || (seqNumComparator.compare(firstSeq, inSeq) == 1)) firstSeq = inSeq; if (lastSeq == -1 || (seqNumComparator.compare(inSeq, lastSeq) == 1)) lastSeq = inSeq; if (empty) { // the first received packet for the current frame was just added empty = false; timestamp = inRtpTimestamp; pictureId = inPictureId; } if (inMarker) haveEnd = true; if (inIsStartOfFrame) haveStart = true; // check if we have a full frame if (frameComplete()) { byte[] outData = validateByteArraySize(outBuffer, frameLength, false); int ptr = 0; Container b; for (Map.Entry<Long, Container> entry : data.entrySet()) { b = entry.getValue(); System.arraycopy(b.buf, 0, outData, ptr, b.len); ptr += b.len; } outBuffer.setOffset(0); outBuffer.setLength(frameLength); outBuffer.setRtpTimeStamp(inBuffer.getRtpTimeStamp()); if (TRACE) logger.trace("Out PictureID=" + inPictureId); lastSentSeq = lastSeq; // prepare for the next frame reinit(); return BUFFER_PROCESSED_OK; } else { // frame not complete yet outBuffer.setDiscard(true); return OUTPUT_BUFFER_NOT_FILLED; } }
/** * Performs flush. * * @throws GridException If failed. */ private void doFlush() throws GridException { lastFlushTime = U.currentTimeMillis(); List<GridFuture> activeFuts0 = null; int doneCnt = 0; for (GridFuture<?> f : activeFuts) { if (!f.isDone()) { if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2)); activeFuts0.add(f); } else { f.get(); doneCnt++; } } if (activeFuts0 == null || activeFuts0.isEmpty()) return; while (true) { Queue<GridFuture<?>> q = null; for (Buffer buf : bufMappings.values()) { GridFuture<?> flushFut = buf.flush(); if (flushFut != null) { if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2); q.add(flushFut); } } if (q != null) { assert !q.isEmpty(); boolean err = false; for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) { try { fut.get(); } catch (GridException e) { if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e); err = true; } } if (err) // Remaps needed - flush buffers. continue; } doneCnt = 0; for (int i = 0; i < activeFuts0.size(); i++) { GridFuture f = activeFuts0.get(i); if (f == null) doneCnt++; else if (f.isDone()) { f.get(); doneCnt++; activeFuts0.set(i, null); } else break; } if (doneCnt == activeFuts0.size()) return; } }
/** * @param entries Entries. * @param resFut Result future. * @param activeKeys Active keys. * @param remaps Remaps count. */ private void load0( Collection<? extends Map.Entry<K, V>> entries, final GridFutureAdapter<Object> resFut, final Collection<K> activeKeys, final int remaps) { assert entries != null; if (remaps >= MAX_REMAP_CNT) { resFut.onDone(new GridException("Failed to finish operation (too many remaps): " + remaps)); return; } Map<GridNode, Collection<Map.Entry<K, V>>> mappings = new HashMap<>(); boolean initPda = ctx.deploy().enabled() && jobPda == null; for (Map.Entry<K, V> entry : entries) { GridNode node; try { K key = entry.getKey(); assert key != null; if (initPda) { jobPda = new DataLoaderPda(key, entry.getValue(), updater); initPda = false; } node = ctx.affinity().mapKeyToNode(cacheName, key); } catch (GridException e) { resFut.onDone(e); return; } if (node == null) { resFut.onDone( new GridTopologyException( "Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']')); return; } Collection<Map.Entry<K, V>> col = mappings.get(node); if (col == null) mappings.put(node, col = new ArrayList<>()); col.add(entry); } for (final Map.Entry<GridNode, Collection<Map.Entry<K, V>>> e : mappings.entrySet()) { final UUID nodeId = e.getKey().id(); Buffer buf = bufMappings.get(nodeId); if (buf == null) { Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey())); if (old != null) buf = old; } final Collection<Map.Entry<K, V>> entriesForNode = e.getValue(); GridInClosure<GridFuture<?>> lsnr = new GridInClosure<GridFuture<?>>() { @Override public void apply(GridFuture<?> t) { try { t.get(); for (Map.Entry<K, V> e : entriesForNode) activeKeys.remove(e.getKey()); if (activeKeys.isEmpty()) resFut.onDone(); } catch (GridException e1) { if (log.isDebugEnabled()) log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']'); if (cancelled) { resFut.onDone( new GridException( "Data loader has been cancelled: " + GridDataLoaderImpl.this, e1)); } else load0(entriesForNode, resFut, activeKeys, remaps + 1); } } }; GridFutureAdapter<?> f; try { f = buf.update(entriesForNode, lsnr); } catch (GridInterruptedException e1) { resFut.onDone(e1); return; } if (ctx.discovery().node(nodeId) == null) { if (bufMappings.remove(nodeId, buf)) buf.onNodeLeft(); if (f != null) f.onDone( new GridTopologyException( "Failed to wait for request completion " + "(node has left): " + nodeId)); } } }
public void write(Object obj, Buffer buf) { byte[] o = (byte[]) obj; int num = o.length; buf.writeI32(num); buf.writeBytes(o, 0, num); }
public Object read(Buffer buf) { int num = buf.readI32(); byte[] o = new byte[num]; buf.readBytes(o, 0, num); return o; }
public void scan(Buffer buf) { int num = buf.readI32(); buf.drop(num); }
@Override public void serialize(Buffer buf) { buf.writeInt(houseId); }