private int send(FileDescriptor fd, ByteBuffer src, SocketAddress target) throws IOException { if (src instanceof DirectBuffer) return sendFromNativeBuffer(fd, src, target); // Substitute a native buffer int pos = src.position(); int lim = src.limit(); assert (pos <= lim); int rem = (pos <= lim ? lim - pos : 0); ByteBuffer bb = Util.getTemporaryDirectBuffer(rem); try { bb.put(src); bb.flip(); // Do not update src until we see how many bytes were written src.position(pos); int n = sendFromNativeBuffer(fd, bb, target); if (n > 0) { // now update src src.position(pos + n); } return n; } finally { Util.releaseTemporaryDirectBuffer(bb); } }
@Override public boolean lock(final boolean lock) { try { if (lock) { if (exclusiveLock()) return true; if (sharedLock()) return false; } else { if (sharedLock()) return true; } } catch (final IOException ex) { Util.stack(ex); } throw Util.notExpected((lock ? "Exclusive" : "Shared") + " lock could not be acquired."); }
public SocketAddress receive(ByteBuffer dst) throws IOException { if (dst.isReadOnly()) throw new IllegalArgumentException("Read-only buffer"); if (dst == null) throw new NullPointerException(); synchronized (readLock) { ensureOpen(); // Socket was not bound before attempting receive if (localAddress() == null) bind(null); int n = 0; ByteBuffer bb = null; try { begin(); if (!isOpen()) return null; SecurityManager security = System.getSecurityManager(); readerThread = NativeThread.current(); if (isConnected() || (security == null)) { do { n = receive(fd, dst); } while ((n == IOStatus.INTERRUPTED) && isOpen()); if (n == IOStatus.UNAVAILABLE) return null; } else { bb = Util.getTemporaryDirectBuffer(dst.remaining()); for (; ; ) { do { n = receive(fd, bb); } while ((n == IOStatus.INTERRUPTED) && isOpen()); if (n == IOStatus.UNAVAILABLE) return null; InetSocketAddress isa = (InetSocketAddress) sender; try { security.checkAccept(isa.getAddress().getHostAddress(), isa.getPort()); } catch (SecurityException se) { // Ignore packet bb.clear(); n = 0; continue; } bb.flip(); dst.put(bb); break; } } return sender; } finally { if (bb != null) Util.releaseTemporaryDirectBuffer(bb); readerThread = 0; end((n > 0) || (n == IOStatus.UNAVAILABLE)); assert IOStatus.check(n); } } }
void releaseBuffers() { for (int i = 0; i < numBufs; i++) { if (!(bufs[i] instanceof DirectBuffer)) { Util.releaseTemporaryDirectBuffer(shadow[i]); } } }
/** * Invoked prior to write to prepare the WSABUF array. Where necessary, it substitutes * non-direct buffers with direct buffers. */ void prepareBuffers() { shadow = new ByteBuffer[numBufs]; long address = writeBufferArray; for (int i = 0; i < numBufs; i++) { ByteBuffer src = bufs[i]; int pos = src.position(); int lim = src.limit(); assert (pos <= lim); int rem = (pos <= lim ? lim - pos : 0); long a; if (!(src instanceof DirectBuffer)) { // substitute with direct buffer ByteBuffer bb = Util.getTemporaryDirectBuffer(rem); bb.put(src); bb.flip(); src.position(pos); // leave heap buffer untouched for now shadow[i] = bb; a = ((DirectBuffer) bb).address(); } else { shadow[i] = src; a = ((DirectBuffer) src).address() + pos; } unsafe.putAddress(address + OFFSETOF_BUF, a); unsafe.putInt(address + OFFSETOF_LEN, rem); address += SIZEOF_WSABUF; } }
public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { if ((offset < 0) || (length < 0) || (offset > dsts.length - length)) throw new IndexOutOfBoundsException(); try { return read(Util.subsequence(dsts, offset, length)); } catch (AsynchronousCloseException x) { close(); throw x; } }
private int receive(FileDescriptor fd, ByteBuffer dst) throws IOException { int pos = dst.position(); int lim = dst.limit(); assert (pos <= lim); int rem = (pos <= lim ? lim - pos : 0); if (dst instanceof DirectBuffer && rem > 0) return receiveIntoNativeBuffer(fd, dst, rem, pos); // Substitute a native buffer. If the supplied buffer is empty // we must instead use a nonempty buffer, otherwise the call // will not block waiting for a datagram on some platforms. int newSize = Math.max(rem, 1); ByteBuffer bb = Util.getTemporaryDirectBuffer(newSize); try { int n = receiveIntoNativeBuffer(fd, bb, newSize, 0); bb.flip(); if (n > 0 && rem > 0) dst.put(bb); return n; } finally { Util.releaseTemporaryDirectBuffer(bb); } }
/** * Reads a block from disk. * * @param b block to fetch */ private void readBlock(final int b) { if (!bm.cursor(b)) return; final Buffer bf = bm.current(); try { if (bf.dirty) writeBlock(bf); bf.pos = b; if (b >= blocks) { blocks = b + 1; } else { file.seek(bf.pos * IO.BLOCKSIZE); file.readFully(bf.data); } } catch (final IOException ex) { Util.stack(ex); } }
/** * Searches for the block containing the entry for the specified pre value. Reads the block and * returns its offset inside the block. * * @param pre pre of the entry to search for * @return offset of the entry in the block */ private int cursor(final int pre) { int fp = fpre; int np = npre; if (pre < fp || pre >= np) { final int last = used - 1; int l = 0; int h = last; int m = page; while (l <= h) { if (pre < fp) h = m - 1; else if (pre >= np) l = m + 1; else break; m = h + l >>> 1; fp = fpre(m); np = m == last ? meta.size : fpre(m + 1); } if (l > h) throw Util.notExpected( "Data Access out of bounds:" + "\n- pre value: " + pre + "\n- #used blocks: " + used + "\n- #total locks: " + blocks + "\n- access: " + m + " (" + l + " > " + h + ']'); readPage(m); } return pre - fpre << IO.NODEPOWER; }
/** * Invoked prior to read to prepare the WSABUF array. Where necessary, it substitutes non-direct * buffers with direct buffers. */ void prepareBuffers() { shadow = new ByteBuffer[numBufs]; long address = readBufferArray; for (int i = 0; i < numBufs; i++) { ByteBuffer dst = bufs[i]; int pos = dst.position(); int lim = dst.limit(); assert (pos <= lim); int rem = (pos <= lim ? lim - pos : 0); long a; if (!(dst instanceof DirectBuffer)) { // substitute with direct buffer ByteBuffer bb = Util.getTemporaryDirectBuffer(rem); shadow[i] = bb; a = ((DirectBuffer) bb).address(); } else { shadow[i] = dst; a = ((DirectBuffer) dst).address() + pos; } unsafe.putAddress(address + OFFSETOF_BUF, a); unsafe.putInt(address + OFFSETOF_LEN, rem); address += SIZEOF_WSABUF; } }
public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { if ((offset < 0) || (length < 0) || (offset > srcs.length - length)) throw new IndexOutOfBoundsException(); // ## Fix IOUtil.write so that we can avoid this array copy return write0(Util.subsequence(srcs, offset, length)); }
static { Util.load(); nd = new FileDispatcherImpl(); }
public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { if ((offset < 0) || (length < 0) || (offset > dsts.length - length)) throw new IndexOutOfBoundsException(); return read(Util.subsequence(dsts, offset, length)); }
@Override public void insert(final int pre, final byte[] entries) { final int nnew = entries.length; if (nnew == 0) return; dirty(); // number of records to be inserted final int nr = nnew >>> IO.NODEPOWER; int split = 0; if (used == 0) { // special case: insert new data into first block if database is empty readPage(0); usedPages.set(0); ++used; } else if (pre > 0) { // find the offset within the block where the new records will be inserted split = cursor(pre - 1) + IO.NODESIZE; } else { // all insert operations will add data after first node. // i.e., there is no "insert before first document" statement throw Util.notExpected("Insertion at beginning of populated table."); } // number of bytes occupied by old records in the current block final int nold = npre - fpre << IO.NODEPOWER; // number of bytes occupied by old records which will be moved at the end final int moved = nold - split; // special case: all entries fit in the current block Buffer bf = bm.current(); if (nold + nnew <= IO.BLOCKSIZE) { Array.move(bf.data, split, nnew, moved); System.arraycopy(entries, 0, bf.data, split, nnew); bf.dirty = true; // increment first pre-values of blocks after the last modified block for (int i = page + 1; i < used; ++i) fpres[i] += nr; // update cached variables (fpre is not changed) npre += nr; meta.size += nr; return; } // append old entries at the end of the new entries final byte[] all = new byte[nnew + moved]; System.arraycopy(entries, 0, all, 0, nnew); System.arraycopy(bf.data, split, all, nnew, moved); // fill in the current block with new entries // number of bytes which fit in the first block int nrem = IO.BLOCKSIZE - split; if (nrem > 0) { System.arraycopy(all, 0, bf.data, split, nrem); bf.dirty = true; } // number of new required blocks and remaining bytes final int req = all.length - nrem; int needed = req / IO.BLOCKSIZE; final int remain = req % IO.BLOCKSIZE; if (remain > 0) { // check if the last entries can fit in the block after the current one if (page + 1 < used) { final int o = occSpace(page + 1) << IO.NODEPOWER; if (remain <= IO.BLOCKSIZE - o) { // copy the last records readPage(page + 1); bf = bm.current(); System.arraycopy(bf.data, 0, bf.data, remain, o); System.arraycopy(all, all.length - remain, bf.data, 0, remain); bf.dirty = true; // reduce the pre value, since it will be later incremented with nr fpres[page] -= remain >>> IO.NODEPOWER; // go back to the previous block readPage(page - 1); } else { // there is not enough space in the block - allocate a new one ++needed; } } else { // this is the last block - allocate a new one ++needed; } } // number of expected blocks: existing blocks + needed block - empty blocks final int exp = blocks + needed - (blocks - used); if (exp > fpres.length) { // resize directory arrays if existing ones are too small final int ns = Math.max(fpres.length << 1, exp); fpres = Arrays.copyOf(fpres, ns); pages = Arrays.copyOf(pages, ns); } // make place for the blocks where the new entries will be written Array.move(fpres, page + 1, needed, used - page - 1); Array.move(pages, page + 1, needed, used - page - 1); // write the all remaining entries while (needed-- > 0) { freeBlock(); nrem += write(all, nrem); fpres[page] = fpres[page - 1] + IO.ENTRIES; pages[page] = (int) bm.current().pos; } // increment all fpre values after the last modified block for (int i = page + 1; i < used; ++i) fpres[i] += nr; meta.size += nr; // update cached variables fpre = fpres[page]; npre = page + 1 < used && fpres[page + 1] < meta.size ? fpres[page + 1] : meta.size; }
static { Util.load(); initIDs(); }