/** * Optimizes descendant-or-self steps and static types. * * @param ctx query context */ void optSteps(final QueryContext ctx) { boolean opt = false; Expr[] st = steps; for (int l = 1; l < st.length; ++l) { if (!(st[l - 1] instanceof Step && st[l] instanceof Step)) continue; final Step prev = (Step) st[l - 1]; final Step curr = (Step) st[l]; if (!prev.simple(DESCORSELF, false)) continue; if (curr.axis == CHILD && !curr.has(Flag.FCS)) { // descendant-or-self::node()/child::X -> descendant::X final int sl = st.length; final Expr[] tmp = new Expr[sl - 1]; System.arraycopy(st, 0, tmp, 0, l - 1); System.arraycopy(st, l, tmp, l - 1, sl - l); st = tmp; curr.axis = DESC; opt = true; } else if (curr.axis == ATTR && !curr.has(Flag.FCS)) { // descendant-or-self::node()/@X -> descendant-or-self::*/@X prev.test = new NameTest(false); opt = true; } } if (opt) ctx.compInfo(OPTDESC); // set atomic type for single attribute steps to speedup predicate tests if (root == null && st.length == 1 && st[0] instanceof Step) { final Step curr = (Step) st[0]; if (curr.axis == ATTR && curr.test.mode == Mode.STD) curr.type = SeqType.NOD_ZO; } steps = st; }
/** * Fill the current buffer with bytes from the specified array from the specified offset. * * @param s source array * @param o offset from the beginning of the array * @return number of written bytes */ private int write(final byte[] s, final int o) { final Buffer bf = bm.current(); final int len = Math.min(IO.BLOCKSIZE, s.length - o); System.arraycopy(s, o, bf.data, 0, len); bf.dirty = true; return len; }
@Override protected void copy(final byte[] entries, final int pre, final int last) { for (int o = 0, i = pre; i < last; ++i, o += IO.NODESIZE) { final int off = cursor(i); final Buffer bf = bm.current(); System.arraycopy(entries, o, bf.data, off, IO.NODESIZE); bf.dirty = true; } }
/** * Sets the output text. * * @param out cached output */ public void setText(final ArrayOutput out) { final byte[] buf = out.buffer(); final int size = (int) out.size(); final byte[] chop = token(DOTS); if (out.finished() && size >= chop.length) { System.arraycopy(chop, 0, buf, size - chop.length, chop.length); } text.setText(buf, size); header.setText((out.finished() ? CHOPPED : "") + RESULT); home.setEnabled(gui.context.data() != null); }
/** * Indexes the specified name and its kind. * * @param nm name id * @param knd node kind * @param value value * @param meta meta data * @return node reference */ PathNode index(final int nm, final byte knd, final byte[] value, final MetaData meta) { for (final PathNode c : children) { if (c.kind == knd && c.name == nm) { if (value != null) c.stats.add(value, meta); c.stats.count++; return c; } } final PathNode node = new PathNode(nm, knd, this); if (value != null) node.stats.add(value, meta); final int cs = children.length; final PathNode[] nodes = new PathNode[cs + 1]; System.arraycopy(children, 0, nodes, 0, cs); nodes[cs] = node; children = nodes; return node; }
/** * Adds values to the index. * * @param key key to be indexed * @param vals sorted values */ void add(final byte[] key, final int... vals) { // token index: add values. otherwise, reference existing values final int id = type == IndexType.TOKEN ? values.put(key) : values.id(key), vl = vals.length; // updatable index: if required, resize existing arrays while (idsList.size() < id + 1) idsList.add(null); if (lenList.size() < id + 1) lenList.set(id, 0); final int len = lenList.get(id), size = len + vl; int[] ids = idsList.get(id); if (ids == null) { ids = vals; } else { if (ids.length < size) ids = Arrays.copyOf(ids, Array.newSize(size)); System.arraycopy(vals, 0, ids, len, vl); if (ids[len - 1] > vals[0]) { if (reorder == null) reorder = new BoolList(values.size()); reorder.set(id, true); } } idsList.set(id, ids); lenList.set(id, size); }
/** * Convenience method for copying blocks. * * @param s source array * @param sp source position * @param d destination array * @param dp destination position * @param l source length */ private void copy(final byte[] s, final int sp, final byte[] d, final int dp, final int l) { System.arraycopy(s, sp << IO.NODEPOWER, d, dp << IO.NODEPOWER, l << IO.NODEPOWER); bm.current().dirty = true; }
@Override public void insert(final int pre, final byte[] entries) { final int nnew = entries.length; if (nnew == 0) return; dirty(); // number of records to be inserted final int nr = nnew >>> IO.NODEPOWER; int split = 0; if (used == 0) { // special case: insert new data into first block if database is empty readPage(0); usedPages.set(0); ++used; } else if (pre > 0) { // find the offset within the block where the new records will be inserted split = cursor(pre - 1) + IO.NODESIZE; } else { // all insert operations will add data after first node. // i.e., there is no "insert before first document" statement throw Util.notExpected("Insertion at beginning of populated table."); } // number of bytes occupied by old records in the current block final int nold = npre - fpre << IO.NODEPOWER; // number of bytes occupied by old records which will be moved at the end final int moved = nold - split; // special case: all entries fit in the current block Buffer bf = bm.current(); if (nold + nnew <= IO.BLOCKSIZE) { Array.move(bf.data, split, nnew, moved); System.arraycopy(entries, 0, bf.data, split, nnew); bf.dirty = true; // increment first pre-values of blocks after the last modified block for (int i = page + 1; i < used; ++i) fpres[i] += nr; // update cached variables (fpre is not changed) npre += nr; meta.size += nr; return; } // append old entries at the end of the new entries final byte[] all = new byte[nnew + moved]; System.arraycopy(entries, 0, all, 0, nnew); System.arraycopy(bf.data, split, all, nnew, moved); // fill in the current block with new entries // number of bytes which fit in the first block int nrem = IO.BLOCKSIZE - split; if (nrem > 0) { System.arraycopy(all, 0, bf.data, split, nrem); bf.dirty = true; } // number of new required blocks and remaining bytes final int req = all.length - nrem; int needed = req / IO.BLOCKSIZE; final int remain = req % IO.BLOCKSIZE; if (remain > 0) { // check if the last entries can fit in the block after the current one if (page + 1 < used) { final int o = occSpace(page + 1) << IO.NODEPOWER; if (remain <= IO.BLOCKSIZE - o) { // copy the last records readPage(page + 1); bf = bm.current(); System.arraycopy(bf.data, 0, bf.data, remain, o); System.arraycopy(all, all.length - remain, bf.data, 0, remain); bf.dirty = true; // reduce the pre value, since it will be later incremented with nr fpres[page] -= remain >>> IO.NODEPOWER; // go back to the previous block readPage(page - 1); } else { // there is not enough space in the block - allocate a new one ++needed; } } else { // this is the last block - allocate a new one ++needed; } } // number of expected blocks: existing blocks + needed block - empty blocks final int exp = blocks + needed - (blocks - used); if (exp > fpres.length) { // resize directory arrays if existing ones are too small final int ns = Math.max(fpres.length << 1, exp); fpres = Arrays.copyOf(fpres, ns); pages = Arrays.copyOf(pages, ns); } // make place for the blocks where the new entries will be written Array.move(fpres, page + 1, needed, used - page - 1); Array.move(pages, page + 1, needed, used - page - 1); // write the all remaining entries while (needed-- > 0) { freeBlock(); nrem += write(all, nrem); fpres[page] = fpres[page - 1] + IO.ENTRIES; pages[page] = (int) bm.current().pos; } // increment all fpre values after the last modified block for (int i = page + 1; i < used; ++i) fpres[i] += nr; meta.size += nr; // update cached variables fpre = fpres[page]; npre = page + 1 < used && fpres[page + 1] < meta.size ? fpres[page + 1] : meta.size; }