@Override public void execute(final GUI gui) { final DialogExport dialog = new DialogExport(gui); if (!dialog.ok()) return; final IOFile root = new IOFile(dialog.path()); // check if existing files will be overwritten if (root.exists()) { IO file = null; boolean overwrite = false; final Data d = gui.context.data(); final IntList il = d.resources.docs(); final int is = il.size(); for (int i = 0; i < is; i++) { file = root.merge(Token.string(d.text(il.get(i), true))); if (file.exists()) { if (overwrite) { // more than one file will be overwritten; check remaining tests file = null; break; } overwrite = true; } } if (overwrite) { // show message for overwriting files or directories final String msg = file == null ? FILES_REPLACE_X : FILE_EXISTS_X; if (file == null) file = root; if (!BaseXDialog.confirm(gui, Util.info(msg, file))) return; } } DialogProgress.execute(gui, new Export(root.path())); }
/** * Creates new node ids and recreates updatable index structures. * * @param data data * @throws IOException I/O Exception during index rebuild */ public static void ids(final Data data) throws IOException { final MetaData md = data.meta; final int size = md.size; for (int pre = 0; pre < size; ++pre) data.id(pre, pre); md.lastid = size - 1; md.dirty = true; if (data.meta.updindex) { data.idmap = new IdPreMap(md.lastid); if (data.meta.textindex) optimize(IndexType.TEXT, data, true, true, true, null); if (data.meta.attrindex) optimize(IndexType.ATTRIBUTE, data, true, true, true, null); } }
/** * Lists resources of the specified database. * * @return success flag * @throws IOException I/O exception */ private boolean listDB() throws IOException { final String db = args[0]; final String path = args[1] != null ? args[1] : ""; if (!Databases.validName(db)) return error(NAME_INVALID_X, db); final Table table = new Table(); table.description = RESOURCES; table.header.add(INPUT_PATH); table.header.add(TYPE); table.header.add(MimeTypes.CONTENT_TYPE); table.header.add(SIZE); try { // add xml documents final Data data = Open.open(db, context); final Resources res = data.resources; final IntList il = res.docs(path); final int ds = il.size(); for (int i = 0; i < ds; i++) { final int pre = il.get(i); final TokenList tl = new TokenList(3); final byte[] file = data.text(pre, true); tl.add(file); tl.add(DataText.M_XML); tl.add(MimeTypes.APP_XML); tl.add(data.size(pre, Data.DOC)); table.contents.add(tl); } // add binary resources for (final byte[] file : res.binaries(path)) { final String f = string(file); final TokenList tl = new TokenList(3); tl.add(file); tl.add(DataText.M_RAW); tl.add(MimeTypes.get(f)); tl.add(data.meta.binary(f).length()); table.contents.add(tl); } Close.close(data, context); } catch (final IOException ex) { return error(Util.message(ex)); } out.println(table.sort().finish()); return true; }
@Override public void execute(final GUI gui) { // skip operation for root context final Context ctx = gui.context; if (ctx.root()) return; // check if all nodes are document nodes boolean doc = true; final Data data = ctx.data(); for (final int pre : ctx.current().pres) doc &= data.kind(pre) == Data.DOC; if (doc) { // if yes, jump to database root ctx.update(); gui.notify.context(ctx.current(), false, null); } else { // otherwise, jump to parent nodes gui.execute(new Cs("..")); } }
@Override public boolean indexAccessible(final IndexInfo ii) { /* If the following conditions yield true, the index is accessed: * - all query terms are statically available * - no FTTimes option is specified * - explicitly set case, diacritics and stemming match options do not * conflict with index options. */ data = ii.ic.data; final MetaData md = data.meta; final FTOpt fto = ftt.opt; /* Index will be applied if no explicit match options have been set * that conflict with the index options. As a consequence, though, index- * based querying might yield other results than sequential scanning. */ if (occ != null || fto.cs != null && md.casesens == (fto.cs == FTCase.INSENSITIVE) || fto.isSet(DC) && md.diacritics != fto.is(DC) || fto.isSet(ST) && md.stemming != fto.is(ST) || fto.ln != null && !fto.ln.equals(md.language)) return false; // adopt database options to tokenizer fto.copy(md); // estimate costs if text is not known at compile time if (tokens == null) { ii.costs = Math.max(2, data.meta.size / 30); return true; } // summarize number of hits; break loop if no hits are expected final FTLexer ft = new FTLexer(fto); ii.costs = 0; for (byte[] t : tokens) { ft.init(t); while (ft.hasNext()) { final byte[] tok = ft.nextToken(); if (fto.sw != null && fto.sw.contains(tok)) continue; if (fto.is(WC)) { // don't use index if one of the terms starts with a wildcard t = ft.get(); if (t[0] == '.') return false; // don't use index if certain characters or more than 1 dot are found int d = 0; for (final byte w : t) { if (w == '{' || w == '\\' || w == '.' && ++d > 1) return false; } } // favor full-text index requests over exact queries final int costs = data.costs(ft); if (costs != 0) ii.costs += Math.max(2, costs / 100); } } return true; }
/** * Inserts a data instance at the specified pre value. Note that the specified data instance must * differ from this instance. * * @param ipre value at which to insert new data * @param ipar parent pre value of node * @param clip data clip */ public final void insert(final int ipre, final int ipar, final DataClip clip) { meta.update(); // update value and document indexes if (meta.updindex) indexBegin(); resources.insert(ipre, clip); final int dsize = clip.size(); final int buf = Math.min(dsize, IO.BLOCKSIZE >> IO.NODEPOWER); // resize buffer to cache more entries buffer(buf); // find all namespaces in scope to avoid duplicate declarations final TokenMap nsScope = nspaces.scope(ipar, this); // loop through all entries final IntList preStack = new IntList(); final NSNode nsRoot = nspaces.current(); final HashSet<NSNode> newNodes = new HashSet<NSNode>(); final IntList flagPres = new IntList(); // indicates if database only contains a dummy node final Data data = clip.data; int c = 0; for (int dpre = clip.start; dpre < clip.end; ++dpre, ++c) { if (c != 0 && c % buf == 0) insert(ipre + c - buf); final int pre = ipre + c; final int dkind = data.kind(dpre); final int dpar = data.parent(dpre, dkind); // ipar < 0 if document nodes on top level are added final int dis = dpar >= 0 ? dpre - dpar : ipar >= 0 ? pre - ipar : 0; final int par = dis == 0 ? -1 : pre - dis; if (c == 0) nspaces.root(par, this); while (!preStack.isEmpty() && preStack.peek() > par) nspaces.close(preStack.pop()); switch (dkind) { case DOC: // add document nspaces.prepare(); final int s = data.size(dpre, dkind); doc(pre, s, data.text(dpre, true)); meta.ndocs++; preStack.push(pre); break; case ELEM: // add element nspaces.prepare(); boolean ne = false; if (data.nsFlag(dpre)) { final Atts at = data.ns(dpre); for (int a = 0; a < at.size(); ++a) { // see if prefix has been declared/ is part of current ns scope final byte[] old = nsScope.get(at.name(a)); if (old == null || !eq(old, at.value(a))) { // we have to keep track of all new NSNodes that are added // to the Namespace structure, as their pre values must not // be updated. I.e. if an NSNode N with pre value 3 existed // prior to inserting and two new nodes are inserted at // location pre == 3 we have to make sure N and only N gets // updated. newNodes.add(nspaces.add(at.name(a), at.value(a), pre)); ne = true; } } } byte[] nm = data.name(dpre, dkind); elem( dis, tagindex.index(nm, null, false), data.attSize(dpre, dkind), data.size(dpre, dkind), nspaces.uri(nm, true), ne); preStack.push(pre); break; case TEXT: case COMM: case PI: // add text text(pre, dis, data.text(dpre, true), dkind); break; case ATTR: // add attribute nm = data.name(dpre, dkind); // check if prefix already in nsScope or not final byte[] attPref = prefix(nm); // check if prefix of attribute has already been declared, otherwise // add declaration to parent node if (data.nsFlag(dpre) && nsScope.get(attPref) == null) { nspaces.add( par, preStack.isEmpty() ? -1 : preStack.peek(), attPref, data.nspaces.uri(data.uri(dpre, dkind)), this); // save pre value to set ns flag later for this node. can't be done // here as direct table access would interfere with the buffer flagPres.add(par); } attr( pre, dis, atnindex.index(nm, null, false), data.text(dpre, false), nspaces.uri(nm, false), false); break; } } // finalize and update namespace structure while (!preStack.isEmpty()) nspaces.close(preStack.pop()); nspaces.root(nsRoot); if (bp != 0) insert(ipre + c - 1 - (c - 1) % buf); // reset buffer to old size buffer(1); // set ns flags for (int f = 0; f < flagPres.size(); f++) { final int fl = flagPres.get(f); table.write2(fl, 1, name(fl) | 1 << 15); } // increase size of ancestors int p = ipar; while (p >= 0) { final int k = kind(p); size(p, k, size(p, k) + dsize); p = parent(p, k); } if (meta.updindex) { // add the entries to the ID -> PRE mapping: idmap.insert(ipre, id(ipre), dsize); indexEnd(); } if (!cache) updateDist(ipre + dsize, dsize); // propagate PRE value shifts to namespaces if (ipar != -1) nspaces.insert(ipre, dsize, newNodes); }
/** * Replaces parts of the database with the specified data instance. * * @param rpre pre value to be replaced * @param clip data clip */ public final void replace(final int rpre, final DataClip clip) { meta.update(); final int dsize = clip.size(); final Data data = clip.data; final int rkind = kind(rpre); final int rsize = size(rpre, rkind); final int rpar = parent(rpre, rkind); final int diff = dsize - rsize; buffer(dsize); resources.replace(rpre, rsize, clip); if (meta.updindex) { // update index indexDelete(rpre, rsize); indexBegin(); } for (int dpre = clip.start; dpre < clip.end; ++dpre) { final int dkind = data.kind(dpre); final int dpar = data.parent(dpre, dkind); final int pre = rpre + dpre - clip.start; final int dis = dpar >= 0 ? dpre - dpar : pre - rpar; switch (dkind) { case DOC: // add document doc(pre, data.size(dpre, dkind), data.text(dpre, true)); meta.ndocs++; break; case ELEM: // add element byte[] nm = data.name(dpre, dkind); elem( dis, tagindex.index(nm, null, false), data.attSize(dpre, dkind), data.size(dpre, dkind), nspaces.uri(nm, true), false); break; case TEXT: case COMM: case PI: // add text text(pre, dis, data.text(dpre, true), dkind); break; case ATTR: // add attribute nm = data.name(dpre, dkind); attr( pre, dis, atnindex.index(nm, null, false), data.text(dpre, false), nspaces.uri(nm, false), false); break; } } if (meta.updindex) { indexEnd(); // update ID -> PRE map: idmap.delete(rpre, id(rpre), -rsize); idmap.insert(rpre, meta.lastid - dsize + 1, dsize); } // update table: table.replace(rpre, buffer(), rsize); buffer(1); // no distance/size update if the two subtrees are of equal size if (diff == 0) return; // increase/decrease size of ancestors, adjust distances of siblings int p = rpar; while (p >= 0) { final int k = kind(p); size(p, k, size(p, k) + diff); p = parent(p, k); } if (!cache) updateDist(rpre + dsize, diff); // adjust attribute size of parent if attributes inserted. attribute size // of parent cannot be reduced via a replace expression. int dpre = clip.start; if (data.kind(dpre) == ATTR) { int d = 0; while (dpre < clip.end && data.kind(dpre++) == ATTR) d++; if (d > 1) attSize(rpar, kind(rpar), d + 1); } }
/** * Optimizes the structures of a database. * * @param data data * @param enforceText enforce creation or deletion of text index * @param enforceAttr enforce creation or deletion of attribute index * @param enforceToken enforce creation or deletion of token index * @param enforceFt enforce creation or deletion of full-text index * @param cmd calling command instance (may be {@code null}) * @throws IOException I/O Exception during index rebuild */ public static void optimize( final Data data, final boolean enforceText, final boolean enforceAttr, final boolean enforceToken, final boolean enforceFt, final Optimize cmd) throws IOException { // initialize structural indexes final MetaData md = data.meta; if (!md.uptodate) { data.paths.init(); data.elemNames.init(); data.attrNames.init(); md.dirty = true; final IntList pars = new IntList(), elms = new IntList(); int n = 0; for (int pre = 0; pre < md.size; ++pre) { final byte kind = (byte) data.kind(pre); final int par = data.parent(pre, kind); while (!pars.isEmpty() && pars.peek() > par) { pars.pop(); elms.pop(); } final int level = pars.size(); if (kind == Data.DOC) { data.paths.put(0, Data.DOC, level); pars.push(pre); elms.push(0); ++n; } else if (kind == Data.ELEM) { final int id = data.nameId(pre); data.elemNames.index(data.elemNames.key(id), null, true); data.paths.put(id, Data.ELEM, level); pars.push(pre); elms.push(id); } else if (kind == Data.ATTR) { final int id = data.nameId(pre); final byte[] val = data.text(pre, false); data.attrNames.index(data.attrNames.key(id), val, true); data.paths.put(id, Data.ATTR, level, val, md); } else { final byte[] val = data.text(pre, true); if (kind == Data.TEXT && level > 1) data.elemNames.index(elms.peek(), val); data.paths.put(0, kind, level, val, md); } if (cmd != null) cmd.pre = pre; } md.ndocs = n; md.uptodate = true; } // rebuild value indexes optimize(IndexType.TEXT, data, md.createtext, md.textindex, enforceText, cmd); optimize(IndexType.ATTRIBUTE, data, md.createattr, md.attrindex, enforceAttr, cmd); optimize(IndexType.TOKEN, data, md.createtoken, md.tokenindex, enforceToken, cmd); optimize(IndexType.FULLTEXT, data, md.createft, md.ftindex, enforceFt, cmd); }