/** * Default constructor, called from {@link Open#open}. * * @param meta meta data * @throws IOException I/O Exception */ public DiskData(final MetaData meta) throws IOException { super(meta); try (final DataInput in = new DataInput(meta.dbfile(DATAINF))) { meta.read(in); while (true) { final String k = string(in.readToken()); if (k.isEmpty()) break; if (k.equals(DBTAGS)) elemNames = new Names(in, meta); else if (k.equals(DBATTS)) attrNames = new Names(in, meta); else if (k.equals(DBPATH)) paths = new PathSummary(this, in); else if (k.equals(DBNS)) nspaces = new Namespaces(in); else if (k.equals(DBDOCS)) resources.read(in); } } // open data and indexes init(); if (meta.updindex) { idmap = new IdPreMap(meta.dbfile(DATAIDP)); if (meta.textindex) textIndex = new UpdatableDiskValues(this, true); if (meta.attrindex) attrIndex = new UpdatableDiskValues(this, false); } else { if (meta.textindex) textIndex = new DiskValues(this, true); if (meta.attrindex) attrIndex = new DiskValues(this, false); } if (meta.ftxtindex) ftxtIndex = new FTIndex(this); }
/** * Creates new node ids and recreates updatable index structures. * * @param data data * @throws IOException I/O Exception during index rebuild */ public static void ids(final Data data) throws IOException { final MetaData md = data.meta; final int size = md.size; for (int pre = 0; pre < size; ++pre) data.id(pre, pre); md.lastid = size - 1; md.dirty = true; if (data.meta.updindex) { data.idmap = new IdPreMap(md.lastid); if (data.meta.textindex) optimize(IndexType.TEXT, data, true, true, true, null); if (data.meta.attrindex) optimize(IndexType.ATTRIBUTE, data, true, true, true, null); } }
/** * Updates (renames) an element, attribute or pi name. * * @param pre pre value * @param kind node kind * @param name new tag, attribute or pi name * @param uri uri */ public final void update(final int pre, final int kind, final byte[] name, final byte[] uri) { meta.update(); if (kind == PI) { updateText(pre, trim(concat(name, SPACE, atom(pre))), kind); } else { // update/set namespace reference final int ouri = nspaces.uri(name, pre, this); final boolean ne = ouri == 0 && uri.length != 0; final int npre = kind == ATTR ? parent(pre, kind) : pre; final int nuri = ne ? nspaces.add(npre, npre, prefix(name), uri, this) : ouri != 0 && eq(nspaces.uri(ouri), uri) ? ouri : 0; // write namespace uri reference table.write1(pre, kind == ELEM ? 3 : 11, nuri); // write name reference table.write2( pre, 1, (nsFlag(pre) ? 1 << 15 : 0) | (kind == ELEM ? tagindex : atnindex).index(name, null, false)); // write namespace flag table.write2(npre, 1, (ne || nsFlag(npre) ? 1 << 15 : 0) | name(npre)); } }
/** * Updates (replaces) the value of a single document, text, comment, pi or attribute node. * * @param pre pre value to be replaced * @param kind node kind * @param value value to be updated (tag name, text, comment, pi) */ public final void update(final int pre, final int kind, final byte[] value) { final byte[] v = kind == PI ? trim(concat(name(pre, kind), SPACE, value)) : value; if (eq(v, text(pre, kind != ATTR))) return; meta.update(); updateText(pre, v, kind); if (kind == DOC) resources.rename(pre, value); }
/** * Lists all databases. * * @return success flag * @throws IOException I/O exception */ private boolean list() throws IOException { final Table table = new Table(); table.description = DATABASES_X; final boolean create = context.user.has(Perm.CREATE); table.header.add(T_NAME); table.header.add(RESOURCES); table.header.add(SIZE); if (create) table.header.add(INPUT_PATH); for (final String name : context.databases.listDBs()) { String file = null; long size = 0; int docs = 0; final MetaData meta = new MetaData(name, context); try { meta.read(); size = meta.dbsize(); docs = meta.ndocs; if (context.perm(Perm.READ, meta)) file = meta.original; } catch (final IOException ex) { file = ERROR; } // count number of raw files final IOFile dir = new IOFile(mprop.dbpath(name), M_RAW); final int bin = dir.descendants().size(); // create entry if (file != null) { final TokenList tl = new TokenList(4); tl.add(name); tl.add(docs + bin); tl.add(size); if (create) tl.add(file); table.contents.add(tl); } } out.println(table.sort().finish()); return true; }
/** * Checks if the table of the specified database is locked. * * @param db name of database * @param ctx database context * @return result of check */ public static boolean locked(final String db, final Context ctx) { final IOFile table = MetaData.file(ctx.globalopts.dbpath(db), DATATBL); if (!table.exists()) return false; try (final RandomAccessFile file = new RandomAccessFile(table.file(), "rw")) { return file.getChannel().tryLock() == null; } catch (final ClosedChannelException ex) { return false; } catch (final OverlappingFileLockException | IOException ex) { return true; } }
/** * Deletes a node and its descendants. * * @param pre pre value of the node to delete */ public final void delete(final int pre) { meta.update(); // size of the subtree to delete int k = kind(pre); final int s = size(pre, k); resources.delete(pre, s); if (meta.updindex) { // delete child records from indexes indexDelete(pre, s); } /// explicitly delete text or attribute value if (k != DOC && k != ELEM) delete(pre, k != ATTR); // reduce size of ancestors int par = pre; // check if we are an attribute (different size counters) if (k == ATTR) { par = parent(par, ATTR); attSize(par, ELEM, attSize(par, ELEM) - 1); size(par, ELEM, size(par, ELEM) - 1); k = kind(par); } // reduce size of ancestors while (par > 0 && k != DOC) { par = parent(par, k); k = kind(par); size(par, k, size(par, k) - s); } // preserve empty root node if (kind(pre) == DOC) --meta.ndocs; if (meta.updindex) { // delete node and descendants from ID -> PRE map: idmap.delete(pre, id(pre), -s); } // delete node from table structure and reduce document size table.delete(pre, s); if (!cache) updateDist(pre, -s); // propagate PRE value shifts to namespaces nspaces.delete(pre, s, this); }
@Override protected boolean run() { final Data data = context.data(); final MetaData meta = data.meta; size = meta.size; if (!startUpdate()) return false; boolean ok = true; try { // reassign autooptimize flag final boolean autoopt = options.get(MainOptions.AUTOOPTIMIZE); if (autoopt != data.meta.autoopt) { data.meta.autoopt = autoopt; data.meta.dirty = true; } optimize(data, this); ok = info(DB_OPTIMIZED_X, meta.name, perf); } catch (final IOException ex) { ok = error(Util.message(ex)); } finally { ok &= finishUpdate(); } return ok; }
@Override protected boolean run() { final boolean create = context.user.has(Perm.CREATE); String path = MetaData.normPath(args[0]); if (path == null || path.endsWith(".")) return error(NAME_INVALID_X, args[0]); if (in == null) { final IO io = IO.get(args[1]); if (!io.exists() || io.isDir()) return error(RES_NOT_FOUND_X, create ? io : args[1]); in = io.inputSource(); // set/add name of document if ((path.isEmpty() || path.endsWith("/")) && !(io instanceof IOContent)) path += io.name(); } // ensure that the final name is not empty if (path.isEmpty()) return error(NAME_INVALID_X, path); // ensure that the name is not empty and contains no trailing dots final Data data = context.data(); if (data.inMemory()) return error(NO_MAINMEM); final IOFile file = data.meta.binary(path); if (path.isEmpty() || path.endsWith(".") || file == null || file.isDir()) return error(NAME_INVALID_X, create ? path : args[0]); // start update if (!data.startUpdate()) return error(DB_PINNED_X, data.meta.name); try { store(in, file); return info(QUERY_EXECUTED_X_X, "", perf); } catch (final IOException ex) { return error(FILE_NOT_STORED_X, Util.message(ex)); } finally { data.finishUpdate(); } }
/** * Inserts a data instance at the specified pre value. Note that the specified data instance must * differ from this instance. * * @param ipre value at which to insert new data * @param ipar parent pre value of node * @param clip data clip */ public final void insert(final int ipre, final int ipar, final DataClip clip) { meta.update(); // update value and document indexes if (meta.updindex) indexBegin(); resources.insert(ipre, clip); final int dsize = clip.size(); final int buf = Math.min(dsize, IO.BLOCKSIZE >> IO.NODEPOWER); // resize buffer to cache more entries buffer(buf); // find all namespaces in scope to avoid duplicate declarations final TokenMap nsScope = nspaces.scope(ipar, this); // loop through all entries final IntList preStack = new IntList(); final NSNode nsRoot = nspaces.current(); final HashSet<NSNode> newNodes = new HashSet<NSNode>(); final IntList flagPres = new IntList(); // indicates if database only contains a dummy node final Data data = clip.data; int c = 0; for (int dpre = clip.start; dpre < clip.end; ++dpre, ++c) { if (c != 0 && c % buf == 0) insert(ipre + c - buf); final int pre = ipre + c; final int dkind = data.kind(dpre); final int dpar = data.parent(dpre, dkind); // ipar < 0 if document nodes on top level are added final int dis = dpar >= 0 ? dpre - dpar : ipar >= 0 ? pre - ipar : 0; final int par = dis == 0 ? -1 : pre - dis; if (c == 0) nspaces.root(par, this); while (!preStack.isEmpty() && preStack.peek() > par) nspaces.close(preStack.pop()); switch (dkind) { case DOC: // add document nspaces.prepare(); final int s = data.size(dpre, dkind); doc(pre, s, data.text(dpre, true)); meta.ndocs++; preStack.push(pre); break; case ELEM: // add element nspaces.prepare(); boolean ne = false; if (data.nsFlag(dpre)) { final Atts at = data.ns(dpre); for (int a = 0; a < at.size(); ++a) { // see if prefix has been declared/ is part of current ns scope final byte[] old = nsScope.get(at.name(a)); if (old == null || !eq(old, at.value(a))) { // we have to keep track of all new NSNodes that are added // to the Namespace structure, as their pre values must not // be updated. I.e. if an NSNode N with pre value 3 existed // prior to inserting and two new nodes are inserted at // location pre == 3 we have to make sure N and only N gets // updated. newNodes.add(nspaces.add(at.name(a), at.value(a), pre)); ne = true; } } } byte[] nm = data.name(dpre, dkind); elem( dis, tagindex.index(nm, null, false), data.attSize(dpre, dkind), data.size(dpre, dkind), nspaces.uri(nm, true), ne); preStack.push(pre); break; case TEXT: case COMM: case PI: // add text text(pre, dis, data.text(dpre, true), dkind); break; case ATTR: // add attribute nm = data.name(dpre, dkind); // check if prefix already in nsScope or not final byte[] attPref = prefix(nm); // check if prefix of attribute has already been declared, otherwise // add declaration to parent node if (data.nsFlag(dpre) && nsScope.get(attPref) == null) { nspaces.add( par, preStack.isEmpty() ? -1 : preStack.peek(), attPref, data.nspaces.uri(data.uri(dpre, dkind)), this); // save pre value to set ns flag later for this node. can't be done // here as direct table access would interfere with the buffer flagPres.add(par); } attr( pre, dis, atnindex.index(nm, null, false), data.text(dpre, false), nspaces.uri(nm, false), false); break; } } // finalize and update namespace structure while (!preStack.isEmpty()) nspaces.close(preStack.pop()); nspaces.root(nsRoot); if (bp != 0) insert(ipre + c - 1 - (c - 1) % buf); // reset buffer to old size buffer(1); // set ns flags for (int f = 0; f < flagPres.size(); f++) { final int fl = flagPres.get(f); table.write2(fl, 1, name(fl) | 1 << 15); } // increase size of ancestors int p = ipar; while (p >= 0) { final int k = kind(p); size(p, k, size(p, k) + dsize); p = parent(p, k); } if (meta.updindex) { // add the entries to the ID -> PRE mapping: idmap.insert(ipre, id(ipre), dsize); indexEnd(); } if (!cache) updateDist(ipre + dsize, dsize); // propagate PRE value shifts to namespaces if (ipar != -1) nspaces.insert(ipre, dsize, newNodes); }
/** * Replaces parts of the database with the specified data instance. * * @param rpre pre value to be replaced * @param clip data clip */ public final void replace(final int rpre, final DataClip clip) { meta.update(); final int dsize = clip.size(); final Data data = clip.data; final int rkind = kind(rpre); final int rsize = size(rpre, rkind); final int rpar = parent(rpre, rkind); final int diff = dsize - rsize; buffer(dsize); resources.replace(rpre, rsize, clip); if (meta.updindex) { // update index indexDelete(rpre, rsize); indexBegin(); } for (int dpre = clip.start; dpre < clip.end; ++dpre) { final int dkind = data.kind(dpre); final int dpar = data.parent(dpre, dkind); final int pre = rpre + dpre - clip.start; final int dis = dpar >= 0 ? dpre - dpar : pre - rpar; switch (dkind) { case DOC: // add document doc(pre, data.size(dpre, dkind), data.text(dpre, true)); meta.ndocs++; break; case ELEM: // add element byte[] nm = data.name(dpre, dkind); elem( dis, tagindex.index(nm, null, false), data.attSize(dpre, dkind), data.size(dpre, dkind), nspaces.uri(nm, true), false); break; case TEXT: case COMM: case PI: // add text text(pre, dis, data.text(dpre, true), dkind); break; case ATTR: // add attribute nm = data.name(dpre, dkind); attr( pre, dis, atnindex.index(nm, null, false), data.text(dpre, false), nspaces.uri(nm, false), false); break; } } if (meta.updindex) { indexEnd(); // update ID -> PRE map: idmap.delete(rpre, id(rpre), -rsize); idmap.insert(rpre, meta.lastid - dsize + 1, dsize); } // update table: table.replace(rpre, buffer(), rsize); buffer(1); // no distance/size update if the two subtrees are of equal size if (diff == 0) return; // increase/decrease size of ancestors, adjust distances of siblings int p = rpar; while (p >= 0) { final int k = kind(p); size(p, k, size(p, k) + diff); p = parent(p, k); } if (!cache) updateDist(rpre + dsize, diff); // adjust attribute size of parent if attributes inserted. attribute size // of parent cannot be reduced via a replace expression. int dpre = clip.start; if (data.kind(dpre) == ATTR) { int d = 0; while (dpre < clip.end && data.kind(dpre++) == ATTR) d++; if (d > 1) attSize(rpar, kind(rpar), d + 1); } }
/** * Optimizes the structures of a database. * * @param data data * @param enforceText enforce creation or deletion of text index * @param enforceAttr enforce creation or deletion of attribute index * @param enforceToken enforce creation or deletion of token index * @param enforceFt enforce creation or deletion of full-text index * @param cmd calling command instance (may be {@code null}) * @throws IOException I/O Exception during index rebuild */ public static void optimize( final Data data, final boolean enforceText, final boolean enforceAttr, final boolean enforceToken, final boolean enforceFt, final Optimize cmd) throws IOException { // initialize structural indexes final MetaData md = data.meta; if (!md.uptodate) { data.paths.init(); data.elemNames.init(); data.attrNames.init(); md.dirty = true; final IntList pars = new IntList(), elms = new IntList(); int n = 0; for (int pre = 0; pre < md.size; ++pre) { final byte kind = (byte) data.kind(pre); final int par = data.parent(pre, kind); while (!pars.isEmpty() && pars.peek() > par) { pars.pop(); elms.pop(); } final int level = pars.size(); if (kind == Data.DOC) { data.paths.put(0, Data.DOC, level); pars.push(pre); elms.push(0); ++n; } else if (kind == Data.ELEM) { final int id = data.nameId(pre); data.elemNames.index(data.elemNames.key(id), null, true); data.paths.put(id, Data.ELEM, level); pars.push(pre); elms.push(id); } else if (kind == Data.ATTR) { final int id = data.nameId(pre); final byte[] val = data.text(pre, false); data.attrNames.index(data.attrNames.key(id), val, true); data.paths.put(id, Data.ATTR, level, val, md); } else { final byte[] val = data.text(pre, true); if (kind == Data.TEXT && level > 1) data.elemNames.index(elms.peek(), val); data.paths.put(0, kind, level, val, md); } if (cmd != null) cmd.pre = pre; } md.ndocs = n; md.uptodate = true; } // rebuild value indexes optimize(IndexType.TEXT, data, md.createtext, md.textindex, enforceText, cmd); optimize(IndexType.ATTRIBUTE, data, md.createattr, md.attrindex, enforceAttr, cmd); optimize(IndexType.TOKEN, data, md.createtoken, md.tokenindex, enforceToken, cmd); optimize(IndexType.FULLTEXT, data, md.createft, md.ftindex, enforceFt, cmd); }