예제 #1
0
    @Override
    public void execute(final GUI gui) {
      final DialogExport dialog = new DialogExport(gui);
      if (!dialog.ok()) return;

      final IOFile root = new IOFile(dialog.path());

      // check if existing files will be overwritten
      if (root.exists()) {
        IO file = null;
        boolean overwrite = false;
        final Data d = gui.context.data();
        final IntList il = d.resources.docs();
        final int is = il.size();
        for (int i = 0; i < is; i++) {
          file = root.merge(Token.string(d.text(il.get(i), true)));
          if (file.exists()) {
            if (overwrite) {
              // more than one file will be overwritten; check remaining tests
              file = null;
              break;
            }
            overwrite = true;
          }
        }
        if (overwrite) {
          // show message for overwriting files or directories
          final String msg = file == null ? FILES_REPLACE_X : FILE_EXISTS_X;
          if (file == null) file = root;
          if (!BaseXDialog.confirm(gui, Util.info(msg, file))) return;
        }
      }
      DialogProgress.execute(gui, new Export(root.path()));
    }
예제 #2
0
  /**
   * Initializes the input files, specified by the context nodes.
   *
   * @param nod variables
   * @param var documents
   * @param qp query processor
   * @param first call
   * @return string with input files
   * @throws Exception exception
   */
  private byte[] file(
      final Nodes nod, final Nodes var, final QueryProcessor qp, final boolean first)
      throws Exception {

    final TokenBuilder tb = new TokenBuilder();
    for (int c = 0; c < nod.size(); ++c) {
      final byte[] nm = data.atom(nod.list[c]);
      String src = srcs.get(string(nm));
      if (tb.size() != 0) tb.add(", ");
      tb.add(nm);

      Expr expr = null;
      if (src == null) {
        // assign collection
        expr = coll(nm, qp);
      } else {
        // assign document
        final String dbname = new IOFile(src).dbname();
        Function def = Function.DOC;
        // updates: drop updated document or open updated database
        if (updating()) {
          if (first) {
            new DropDB(dbname).execute(context);
          } else {
            def = Function.OPEN;
            src = dbname;
          }
        }
        expr = def.get(null, Str.get(src));
      }
      if (var != null) qp.bind(string(data.atom(var.list[c])), expr);
    }
    return tb.finish();
  }
예제 #3
0
 /**
  * Calculates the new distance value for the given node.
  *
  * @param preAfter the current PRE value of the node (after structural updates have been applied)
  * @return new distance for the given node
  */
 private int calculateNewDistance(final int preAfter) {
   final int kind = data.kind(preAfter);
   final int distanceBefore = data.dist(preAfter, kind);
   final int preBefore = calculatePreValue(preAfter, true);
   final int parentBefore = preBefore - distanceBefore;
   final int parentAfter = calculatePreValue(parentBefore, false);
   return preAfter - parentAfter;
 }
예제 #4
0
  /**
   * Performs the open-id and open-pre function.
   *
   * @param ctx query context
   * @param id id flag
   * @return result
   * @throws QueryException query exception
   */
  private DBNode open(final QueryContext ctx, final boolean id) throws QueryException {

    final Data data = data(0, ctx);
    final int v = (int) checkItr(expr[1], ctx);
    final int pre = id ? data.pre(v) : v;
    if (pre < 0 || pre >= data.meta.size) IDINVALID.thrw(input, this, v);
    return new DBNode(data, pre);
  }
예제 #5
0
 @Override
 public Item item(final QueryContext qc, final InputInfo ii) throws QueryException {
   final Data data = checkData(qc);
   final String path = path(1, qc);
   if (data.inMemory()) return Bln.FALSE;
   final IOFile io = data.meta.binary(path);
   return Bln.get(io.exists() && !io.isDir());
 }
예제 #6
0
 /**
  * Executes the updates. Resolving text node adjacency can be skipped if adjacent text nodes are
  * not to be expected.
  *
  * @param mergeTexts adjacent text nodes are to be expected and must be merged
  */
 public void execute(final boolean mergeTexts) {
   check();
   optimize();
   applyValueUpdates();
   if (cacheDistanceUpdates) data.cache = true;
   applyStructuralUpdates();
   updateDistances();
   if (mergeTexts) resolveTextAdjacency();
   data.cache = false;
 }
예제 #7
0
 /**
  * Returns the value for the specified pre value.
  *
  * @param pre pre value
  * @return item value
  */
 byte[] getValue(final int pre) {
   final Data data = plotData.context.data;
   final int limit = pre + data.size(pre, Data.ELEM);
   for (int p = pre; p < limit; ++p) {
     final int kind = data.kind(p);
     if ((kind == Data.ELEM && tag || kind == Data.ATTR && !tag) && attrID == data.name(p))
       return data.atom(p);
   }
   return EMPTY;
 }
예제 #8
0
  /**
   * Assigns the nodes to the specified variables.
   *
   * @param nod nodes
   * @param var variables
   * @param qp query processor
   * @throws QueryException query exception
   */
  private void var(final Nodes nod, final Nodes var, final QueryProcessor qp)
      throws QueryException {

    for (int c = 0; c < nod.size(); ++c) {
      final byte[] nm = data.atom(nod.list[c]);
      final String src = srcs.get(string(nm));

      final Item it = src == null ? coll(nm, qp) : Str.get(src);
      qp.bind(string(data.atom(var.list[c])), it);
    }
  }
예제 #9
0
  /**
   * Evaluates the the input files and assigns the result to the specified variables.
   *
   * @param nod variables
   * @param var documents
   * @param pth file path
   * @param qp query processor
   * @throws Exception exception
   */
  private void eval(final Nodes nod, final Nodes var, final String pth, final QueryProcessor qp)
      throws Exception {

    for (int c = 0; c < nod.size(); ++c) {
      final String file = pth + string(data.atom(nod.list[c])) + IO.XQSUFFIX;
      final String in = read(new IOFile(queries + file));
      final QueryProcessor xq = new QueryProcessor(in, context);
      final Value val = xq.value();
      qp.bind(string(data.atom(var.list[c])), val);
      xq.close();
    }
  }
예제 #10
0
  /**
   * Creates an item iterator for the given XML fragment.
   *
   * @param xml fragment
   * @param frag fragment flag
   * @return iterator
   */
  private ItemCache toIter(final String xml, final boolean frag) {
    final ItemCache it = new ItemCache();
    try {
      String str = frag ? "<X>" + xml + "</X>" : xml;
      final Data d = CreateDB.xml(IO.get(str), context);

      for (int p = frag ? 2 : 0; p < d.meta.size; p += d.size(p, d.kind(p)))
        it.add(new DBNode(d, p));
    } catch (final IOException ex) {
      return new ItemCache(new Item[] {Str.get(Long.toString(System.nanoTime()))}, 1);
    }
    return it;
  }
예제 #11
0
  /**
   * Creates new node ids and recreates updatable index structures.
   *
   * @param data data
   * @throws IOException I/O Exception during index rebuild
   */
  public static void ids(final Data data) throws IOException {
    final MetaData md = data.meta;
    final int size = md.size;
    for (int pre = 0; pre < size; ++pre) data.id(pre, pre);
    md.lastid = size - 1;
    md.dirty = true;

    if (data.meta.updindex) {
      data.idmap = new IdPreMap(md.lastid);
      if (data.meta.textindex) optimize(IndexType.TEXT, data, true, true, true, null);
      if (data.meta.attrindex) optimize(IndexType.ATTRIBUTE, data, true, true, true, null);
    }
  }
예제 #12
0
파일: List.java 프로젝트: JohnLeM/basex
  /**
   * Lists resources of the specified database.
   *
   * @return success flag
   * @throws IOException I/O exception
   */
  private boolean listDB() throws IOException {
    final String db = args[0];
    final String path = args[1] != null ? args[1] : "";
    if (!Databases.validName(db)) return error(NAME_INVALID_X, db);

    final Table table = new Table();
    table.description = RESOURCES;
    table.header.add(INPUT_PATH);
    table.header.add(TYPE);
    table.header.add(MimeTypes.CONTENT_TYPE);
    table.header.add(SIZE);

    try {
      // add xml documents
      final Data data = Open.open(db, context);
      final Resources res = data.resources;
      final IntList il = res.docs(path);
      final int ds = il.size();
      for (int i = 0; i < ds; i++) {
        final int pre = il.get(i);
        final TokenList tl = new TokenList(3);
        final byte[] file = data.text(pre, true);
        tl.add(file);
        tl.add(DataText.M_XML);
        tl.add(MimeTypes.APP_XML);
        tl.add(data.size(pre, Data.DOC));
        table.contents.add(tl);
      }
      // add binary resources
      for (final byte[] file : res.binaries(path)) {
        final String f = string(file);
        final TokenList tl = new TokenList(3);
        tl.add(file);
        tl.add(DataText.M_RAW);
        tl.add(MimeTypes.get(f));
        tl.add(data.meta.binary(f).length());
        table.contents.add(tl);
      }
      Close.close(data, context);
    } catch (final IOException ex) {
      return error(Util.message(ex));
    }
    out.println(table.sort().finish());
    return true;
  }
예제 #13
0
 @Override
 public void execute(final GUI gui) {
   // skip operation for root context
   final Context ctx = gui.context;
   if (ctx.root()) return;
   // check if all nodes are document nodes
   boolean doc = true;
   final Data data = ctx.data();
   for (final int pre : ctx.current().pres) doc &= data.kind(pre) == Data.DOC;
   if (doc) {
     // if yes, jump to database root
     ctx.update();
     gui.notify.context(ctx.current(), false, null);
   } else {
     // otherwise, jump to parent nodes
     gui.execute(new Cs(".."));
   }
 }
예제 #14
0
 /**
  * Returns the resulting query text (text node or attribute value).
  *
  * @param qu query
  * @param root root node
  * @return attribute value
  * @throws Exception exception
  */
 protected String text(final String qu, final Nodes root) throws Exception {
   final Nodes n = nodes(qu, root);
   final TokenBuilder tb = new TokenBuilder();
   for (int i = 0; i < n.size(); ++i) {
     if (i != 0) tb.add('/');
     tb.add(data.atom(n.list[i]));
   }
   return tb.toString();
 }
예제 #15
0
  /**
   * Performs the list function.
   *
   * @param ctx query context
   * @return iterator
   * @throws QueryException query exception
   */
  private Iter list(final QueryContext ctx) throws QueryException {
    final TokenList tl = new TokenList();
    final int el = expr.length;
    if (el == 0) {
      for (final String s : List.list(ctx.context)) tl.add(s);
    } else {
      final Data data = data(0, ctx);
      final String path = string(el == 1 ? EMPTY : checkStr(expr[1], ctx));
      // add xml resources
      final Resources res = data.resources;
      final IntList il = res.docs(path);
      final int is = il.size();
      for (int i = 0; i < is; i++) tl.add(data.text(il.get(i), true));
      // add binary resources
      for (final byte[] file : res.binaries(path)) tl.add(file);
    }
    tl.sort(!Prop.WIN);

    return new Iter() {
      int pos;

      @Override
      public Str get(final long i) {
        return Str.get(tl.get((int) i));
      }

      @Override
      public Str next() {
        return pos < size() ? get(pos++) : null;
      }

      @Override
      public boolean reset() {
        pos = 0;
        return true;
      }

      @Override
      public long size() {
        return tl.size();
      }
    };
  }
예제 #16
0
파일: FTWords.java 프로젝트: jefferya/basex
  @Override
  public boolean indexAccessible(final IndexInfo ii) {
    /* If the following conditions yield true, the index is accessed:
     * - all query terms are statically available
     * - no FTTimes option is specified
     * - explicitly set case, diacritics and stemming match options do not
     *   conflict with index options. */
    data = ii.ic.data;
    final MetaData md = data.meta;
    final FTOpt fto = ftt.opt;

    /* Index will be applied if no explicit match options have been set
     * that conflict with the index options. As a consequence, though, index-
     * based querying might yield other results than sequential scanning. */
    if (occ != null
        || fto.cs != null && md.casesens == (fto.cs == FTCase.INSENSITIVE)
        || fto.isSet(DC) && md.diacritics != fto.is(DC)
        || fto.isSet(ST) && md.stemming != fto.is(ST)
        || fto.ln != null && !fto.ln.equals(md.language)) return false;

    // adopt database options to tokenizer
    fto.copy(md);

    // estimate costs if text is not known at compile time
    if (tokens == null) {
      ii.costs = Math.max(2, data.meta.size / 30);
      return true;
    }

    // summarize number of hits; break loop if no hits are expected
    final FTLexer ft = new FTLexer(fto);
    ii.costs = 0;
    for (byte[] t : tokens) {
      ft.init(t);
      while (ft.hasNext()) {
        final byte[] tok = ft.nextToken();
        if (fto.sw != null && fto.sw.contains(tok)) continue;

        if (fto.is(WC)) {
          // don't use index if one of the terms starts with a wildcard
          t = ft.get();
          if (t[0] == '.') return false;
          // don't use index if certain characters or more than 1 dot are found
          int d = 0;
          for (final byte w : t) {
            if (w == '{' || w == '\\' || w == '.' && ++d > 1) return false;
          }
        }
        // favor full-text index requests over exact queries
        final int costs = data.costs(ft);
        if (costs != 0) ii.costs += Math.max(2, costs / 100);
      }
    }
    return true;
  }
예제 #17
0
  /**
   * Updates distances to restore parent-child relationships that have been invalidated by
   * structural updates.
   *
   * <p>Each structural update (insert/delete) leads to a shift of higher PRE values. This
   * invalidates parent-child relationships. Distances are only updated after all structural updates
   * have been carried out to make sure each node (that has to be updated) is only touched once.
   */
  public void updateDistances() {
    accumulatePreValueShifts();
    final IntSet alreadyUpdatedNodes = new IntSet();

    for (final BasicUpdate update : updStructural) {
      int newPreOfAffectedNode = update.preOfAffectedNode + update.accumulatedShifts;

      /* Update distance for the affected node and all following siblings of nodes
       * on the ancestor-or-self axis. */
      while (newPreOfAffectedNode < data.meta.size) {
        if (alreadyUpdatedNodes.contains(newPreOfAffectedNode)) break;
        data.dist(
            newPreOfAffectedNode,
            data.kind(newPreOfAffectedNode),
            calculateNewDistance(newPreOfAffectedNode));
        alreadyUpdatedNodes.add(newPreOfAffectedNode);
        newPreOfAffectedNode += data.size(newPreOfAffectedNode, data.kind(newPreOfAffectedNode));
      }
    }
  }
예제 #18
0
  @Override
  public BasicNodeIter iter(final QueryContext qc) {
    final boolean text = index.type() == IndexType.TEXT;
    final byte kind = text ? Data.TEXT : Data.ATTR;
    final Data data = ictx.data;
    final int ml = data.meta.maxlen;
    final IndexIterator ii =
        index.min.length <= ml
                && index.max.length <= ml
                && (text ? data.meta.textindex : data.meta.attrindex)
            ? data.iter(index)
            : scan();

    return new BasicNodeIter() {
      @Override
      public ANode next() {
        return ii.more() ? new DBNode(data, ii.pre(), kind) : null;
      }
    };
  }
예제 #19
0
  /**
   * Removes superfluous update operations. If a node T is deleted or replaced, all updates on the
   * descendant axis of T can be left out as they won't affect the database after all.
   *
   * <p>Superfluous updates can have a minimum PRE value of pre(T)+1 and a maximum PRE value of
   * pre(T)+size(T).
   *
   * <p>An update with location pre(T)+size(T) can only be removed if the update is an atomic insert
   * and the inserted node is then part of the subtree of T.
   */
  public void optimize() {
    if (opt) return;

    check();
    // traverse from lowest to highest PRE value
    int i = updStructural.size() - 1;
    while (i >= 0) {
      final BasicUpdate u = updStructural.get(i);
      // If this update can lead to superfluous updates ...
      if (u.destructive()) {
        // we determine the lowest and highest PRE values of a superfluous update
        final int pre = u.location;
        final int fol = pre + data.size(pre, data.kind(pre));
        i--;
        // and have a look at the next candidate
        while (i >= 0) {
          final BasicUpdate desc = updStructural.get(i);
          final int descpre = desc.location;
          // if the candidate operates on the subtree of T and inserts a node ...
          if (descpre <= fol
              && (desc instanceof Insert || desc instanceof InsertAttr)
              && desc.parent() >= pre
              && desc.parent() < fol) {
            // it is removed.
            updStructural.remove(i--);

            // Other updates (not inserting a node) that operate on the subtree of T can
            // only have a PRE value that is smaller than the following PRE of T
          } else if (descpre < fol) {
            // these we delete.
            updStructural.remove(i--);

            // Else there's nothing to delete
          } else break;
        }
      } else i--;
    }
    opt = true;
  }
예제 #20
0
파일: NSNode.java 프로젝트: phspaelti/basex
 /**
  * Finds the closest namespace node for the specified pre value.
  *
  * @param pre pre value
  * @param data data reference
  * @return node
  */
 NSNode find(final int pre, final Data data) {
   final int s = find(pre);
   // no match found: return current node
   if (s == -1) return this;
   final NSNode ch = children[s];
   final int cp = ch.pr;
   // return exact hit
   if (cp == pre) return ch;
   // found node is preceding sibling
   if (cp + data.size(cp, Data.ELEM) <= pre) return this;
   // continue recursive search
   return children[s].find(pre, data);
 }
예제 #21
0
  @Override
  protected boolean run() {
    final boolean create = context.user.has(Perm.CREATE);
    String path = MetaData.normPath(args[0]);
    if (path == null || path.endsWith(".")) return error(NAME_INVALID_X, args[0]);

    if (in == null) {
      final IO io = IO.get(args[1]);
      if (!io.exists() || io.isDir()) return error(RES_NOT_FOUND_X, create ? io : args[1]);
      in = io.inputSource();
      // set/add name of document
      if ((path.isEmpty() || path.endsWith("/")) && !(io instanceof IOContent)) path += io.name();
    }

    // ensure that the final name is not empty
    if (path.isEmpty()) return error(NAME_INVALID_X, path);

    // ensure that the name is not empty and contains no trailing dots
    final Data data = context.data();
    if (data.inMemory()) return error(NO_MAINMEM);

    final IOFile file = data.meta.binary(path);
    if (path.isEmpty() || path.endsWith(".") || file == null || file.isDir())
      return error(NAME_INVALID_X, create ? path : args[0]);

    // start update
    if (!data.startUpdate()) return error(DB_PINNED_X, data.meta.name);

    try {
      store(in, file);
      return info(QUERY_EXECUTED_X_X, "", perf);
    } catch (final IOException ex) {
      return error(FILE_NOT_STORED_X, Util.message(ex));
    } finally {
      data.finishUpdate();
    }
  }
예제 #22
0
  /**
   * Writes the entry for the specified pre value to the table.
   *
   * @param t table reference
   * @param data data reference
   * @param p node to be printed
   */
  private static void table(final Table t, final Data data, final int p) {
    final int k = data.kind(p);
    final TokenList tl = new TokenList();
    tl.add(p);
    tl.add(p - data.parent(p, k));
    tl.add(data.size(p, k));
    tl.add(data.attSize(p, k));
    final int u = data.uri(p, k);
    if (data.nsFlag(p)) tl.add("+" + u);
    else tl.add(u);
    tl.add(TABLEKINDS[k]);

    byte[] cont = null;
    if (k == Data.ELEM) {
      cont = data.name(p, k);
    } else if (k == Data.ATTR) {
      cont =
          new TokenBuilder(data.name(p, k)).add(ATT1).add(data.text(p, false)).add(ATT2).finish();
    } else {
      cont = data.text(p, true);
    }
    tl.add(replace(chop(cont, 64), '\n', ' '));
    t.contents.add(tl);
  }
예제 #23
0
 /** Inserts an attribute with namespace. */
 @Test
 public void insertAttributeWithNs() {
   create(1);
   query("insert node attribute { QName('ns', 'pref:local') } { } into /*");
   final Data data = context.data();
   assertEquals(false, data.nsFlag(0));
   assertEquals(true, data.nsFlag(1));
   assertEquals(false, data.nsFlag(2));
   assertEquals(0, data.uriId(1, data.kind(1)));
   assertEquals(1, data.uriId(2, data.kind(2)));
   assertEquals("ns", string(data.nspaces.uri(1)));
 }
예제 #24
0
  /**
   * Returns atomic text node merging operations if necessary for the given node PRE and its right
   * neighbor PRE+1.
   *
   * @param a node PRE value
   * @param d target data reference
   * @return list of text merging operations
   */
  private AtomicUpdateList necessaryMerges(final int a, final Data d) {
    final AtomicUpdateList mergeTwoNodes = new AtomicUpdateList(d);
    final int s = d.meta.size;
    final int b = a + 1;
    // don't leave table
    if (a >= s || b >= s || a < 0 || b < 0) return mergeTwoNodes;
    // only merge texts
    if (d.kind(a) != Data.TEXT || d.kind(b) != Data.TEXT) return mergeTwoNodes;
    // only merge neighboring texts
    if (d.parent(a, Data.TEXT) != d.parent(b, Data.TEXT)) return mergeTwoNodes;

    mergeTwoNodes.addDelete(b);
    mergeTwoNodes.addUpdateValue(a, Data.TEXT, Token.concat(d.text(a, true), d.text(b, true)));

    return mergeTwoNodes;
  }
예제 #25
0
파일: Data.java 프로젝트: JosuaKrause/basex
  /**
   * Replaces parts of the database with the specified data instance.
   *
   * @param rpre pre value to be replaced
   * @param clip data clip
   */
  public final void replace(final int rpre, final DataClip clip) {
    meta.update();

    final int dsize = clip.size();
    final Data data = clip.data;

    final int rkind = kind(rpre);
    final int rsize = size(rpre, rkind);
    final int rpar = parent(rpre, rkind);
    final int diff = dsize - rsize;
    buffer(dsize);
    resources.replace(rpre, rsize, clip);

    if (meta.updindex) {
      // update index
      indexDelete(rpre, rsize);
      indexBegin();
    }

    for (int dpre = clip.start; dpre < clip.end; ++dpre) {
      final int dkind = data.kind(dpre);
      final int dpar = data.parent(dpre, dkind);
      final int pre = rpre + dpre - clip.start;
      final int dis = dpar >= 0 ? dpre - dpar : pre - rpar;

      switch (dkind) {
        case DOC:
          // add document
          doc(pre, data.size(dpre, dkind), data.text(dpre, true));
          meta.ndocs++;
          break;
        case ELEM:
          // add element
          byte[] nm = data.name(dpre, dkind);
          elem(
              dis,
              tagindex.index(nm, null, false),
              data.attSize(dpre, dkind),
              data.size(dpre, dkind),
              nspaces.uri(nm, true),
              false);
          break;
        case TEXT:
        case COMM:
        case PI:
          // add text
          text(pre, dis, data.text(dpre, true), dkind);
          break;
        case ATTR:
          // add attribute
          nm = data.name(dpre, dkind);
          attr(
              pre,
              dis,
              atnindex.index(nm, null, false),
              data.text(dpre, false),
              nspaces.uri(nm, false),
              false);
          break;
      }
    }

    if (meta.updindex) {
      indexEnd();
      // update ID -> PRE map:
      idmap.delete(rpre, id(rpre), -rsize);
      idmap.insert(rpre, meta.lastid - dsize + 1, dsize);
    }

    // update table:
    table.replace(rpre, buffer(), rsize);
    buffer(1);

    // no distance/size update if the two subtrees are of equal size
    if (diff == 0) return;

    // increase/decrease size of ancestors, adjust distances of siblings
    int p = rpar;
    while (p >= 0) {
      final int k = kind(p);
      size(p, k, size(p, k) + diff);
      p = parent(p, k);
    }

    if (!cache) updateDist(rpre + dsize, diff);

    // adjust attribute size of parent if attributes inserted. attribute size
    // of parent cannot be reduced via a replace expression.
    int dpre = clip.start;
    if (data.kind(dpre) == ATTR) {
      int d = 0;
      while (dpre < clip.end && data.kind(dpre++) == ATTR) d++;
      if (d > 1) attSize(rpar, kind(rpar), d + 1);
    }
  }
예제 #26
0
  @Override
  public void paintComponent(final Graphics g) {
    super.paintComponent(g);

    // skip if view is unavailable
    if (tdata.rows == null) return;

    gui.painting = true;
    g.setFont(GUIConstants.font);

    final int w = getWidth() - scroll.getWidth();
    final int h = getHeight();
    final int fsz = gui.gprop.num(GUIProp.FONTSIZE);

    final Context context = tdata.context;
    final Data data = context.data();
    final int focus = gui.context.focused;
    final int rfocus = tdata.getRoot(data, focus);
    int mpos = 0;

    final int nCols = tdata.cols.length;
    final int nRows = tdata.rows.size();
    final int rowH = tdata.rowH;

    final TableIterator ti = new TableIterator(data, tdata);
    final TokenBuilder[] tb = new TokenBuilder[nCols];
    for (int i = 0; i < nCols; ++i) tb[i] = new TokenBuilder();

    focusedString = null;
    final Nodes marked = context.marked;
    int l = scroll.pos() / rowH - 1;
    int posY = -scroll.pos() + l * rowH;

    while (++l < nRows && marked != null) {
      // skip when all visible rows have been painted or if data has changed
      if (posY > h || l >= tdata.rows.size()) break;
      posY += rowH;

      final int pre = tdata.rows.get(l);
      while (mpos < marked.size() && marked.list[mpos] < pre) ++mpos;

      // draw line
      g.setColor(GUIConstants.color2);
      g.drawLine(0, posY + rowH - 1, w, posY + rowH - 1);
      g.setColor(Color.white);
      g.drawLine(0, posY + rowH, w, posY + rowH);

      // verify if current node is marked or focused
      final boolean rm = mpos < marked.size() && marked.list[mpos] == pre;
      final boolean rf = pre == rfocus;
      final int col = rm ? rf ? 5 : 4 : 3;
      if (rm || rf) {
        g.setColor(GUIConstants.color(col));
        g.fillRect(0, posY - 1, w, rowH);
        g.setColor(GUIConstants.color(col + 4));
        g.drawLine(0, posY - 1, w, posY - 1);
      }
      g.setColor(Color.black);

      // skip drawing of text during animation
      if (rowH < fsz) continue;

      // find all row contents
      ti.init(pre);
      int fcol = -1;
      while (ti.more()) {
        final int c = ti.col;
        if (ti.pre == focus || data.parent(ti.pre, data.kind(ti.pre)) == focus) fcol = c;

        // add content to column (skip too long contents)...
        if (tb[c].size() < 100) {
          if (tb[c].size() != 0) tb[c].add("; ");
          tb[c].add(data.text(ti.pre, ti.text));
        }
      }

      // add dots if content is too long
      for (final TokenBuilder t : tb) if (t.size() > 100) t.add(DOTS);

      // draw row contents
      byte[] focusStr = null;
      int fx = -1;
      double x = 1;
      for (int c = 0; c < nCols; ++c) {
        // draw single column
        final double cw = w * tdata.cols[c].width;
        final double ce = x + cw;

        if (ce != 0) {
          final byte[] str = tb[c].size() != 0 ? tb[c].finish() : null;
          if (str != null) {
            if (tdata.mouseX > x && tdata.mouseX < ce || fcol == c) {
              fx = (int) x;
              focusStr = str;
            }
            BaseXLayout.chopString(g, str, (int) x + 1, posY + 2, (int) cw - 4, fsz);
            tb[c].reset();
          }
        }
        x = ce;
      }

      // highlight focused entry
      if (rf || fcol != -1) {
        if (focusStr != null) {
          final int sw = BaseXLayout.width(g, focusStr) + 8;
          if (fx > w - sw - 2) fx = w - sw - 2;
          g.setColor(GUIConstants.color(col + 2));
          g.fillRect(fx - 2, posY, sw, rowH - 1);
          g.setColor(Color.black);
          BaseXLayout.chopString(g, focusStr, fx + 1, posY + 2, sw, fsz);

          // cache focused string
          focusedString = string(focusStr);
          final int i = focusedString.indexOf("; ");
          if (i != -1) focusedString = focusedString.substring(0, i);
        }
      }
    }
    gui.painting = false;
  }
예제 #27
0
  /**
   * Parses the specified test case.
   *
   * @param root root node
   * @throws Exception exception
   * @return true if the query, specified by {@link #single}, was evaluated
   */
  private boolean parse(final Nodes root) throws Exception {
    final String pth = text("@FilePath", root);
    final String outname = text("@name", root);
    if (single != null && !outname.startsWith(single)) return true;

    final Performance perf = new Performance();
    if (verbose) Util.out("- " + outname);

    boolean inspect = false;
    boolean correct = true;

    final Nodes nodes = states(root);
    for (int n = 0; n < nodes.size(); ++n) {
      final Nodes state = new Nodes(nodes.list[n], nodes.data);

      final String inname = text("*:query/@name", state);
      context.query = new IOFile(queries + pth + inname + IO.XQSUFFIX);
      final String in = read(context.query);
      String er = null;
      ItemCache iter = null;
      boolean doc = true;

      final Nodes cont = nodes("*:contextItem", state);
      Nodes curr = null;
      if (cont.size() != 0) {
        final Data d = Check.check(context, srcs.get(string(data.atom(cont.list[0]))));
        curr = new Nodes(d.doc(), d);
        curr.root = true;
      }

      context.prop.set(Prop.QUERYINFO, compile);
      final QueryProcessor xq = new QueryProcessor(in, curr, context);
      context.prop.set(Prop.QUERYINFO, false);

      // limit result sizes to 1MB
      final ArrayOutput ao = new ArrayOutput();
      final TokenBuilder files = new TokenBuilder();

      try {
        files.add(
            file(nodes("*:input-file", state), nodes("*:input-file/@variable", state), xq, n == 0));
        files.add(file(nodes("*:defaultCollection", state), null, xq, n == 0));

        var(nodes("*:input-URI", state), nodes("*:input-URI/@variable", state), xq);
        eval(nodes("*:input-query/@name", state), nodes("*:input-query/@variable", state), pth, xq);

        parse(xq, state);

        for (final int p : nodes("*:module", root).list) {
          final String uri = text("@namespace", new Nodes(p, data));
          final String file = mods.get(string(data.atom(p))) + IO.XQSUFFIX;
          xq.module(file, uri);
        }

        // evaluate and serialize query
        final SerializerProp sp = new SerializerProp();
        sp.set(SerializerProp.S_INDENT, context.prop.is(Prop.CHOP) ? DataText.YES : DataText.NO);
        final XMLSerializer xml = new XMLSerializer(ao, sp);

        iter = xq.value().cache();
        for (Item it; (it = iter.next()) != null; ) {
          doc &= it.type == NodeType.DOC;
          it.serialize(xml);
        }
        xml.close();
      } catch (final Exception ex) {
        if (!(ex instanceof QueryException || ex instanceof IOException)) {
          System.err.println("\n*** " + outname + " ***");
          System.err.println(in + "\n");
          ex.printStackTrace();
        }
        er = ex.getMessage();
        if (er.startsWith(STOPPED)) er = er.substring(er.indexOf('\n') + 1);
        if (er.startsWith("[")) er = er.replaceAll("\\[(.*?)\\] (.*)", "$1 $2");
        // unexpected error - dump stack trace
      }

      // print compilation steps
      if (compile) {
        Util.errln("---------------------------------------------------------");
        Util.err(xq.info());
        Util.errln(in);
      }

      final Nodes expOut = nodes("*:output-file/text()", state);
      final TokenList result = new TokenList();
      for (int o = 0; o < expOut.size(); ++o) {
        final String resFile = string(data.atom(expOut.list[o]));
        final IOFile exp = new IOFile(expected + pth + resFile);
        result.add(read(exp));
      }

      final Nodes cmpFiles = nodes("*:output-file/@compare", state);
      boolean xml = false;
      boolean frag = false;
      boolean ignore = false;
      for (int o = 0; o < cmpFiles.size(); ++o) {
        final byte[] type = data.atom(cmpFiles.list[o]);
        xml |= eq(type, XML);
        frag |= eq(type, FRAGMENT);
        ignore |= eq(type, IGNORE);
      }

      String expError = text("*:expected-error/text()", state);

      final StringBuilder log = new StringBuilder(pth + inname + IO.XQSUFFIX);
      if (files.size() != 0) {
        log.append(" [");
        log.append(files);
        log.append("]");
      }
      log.append(NL);

      /** Remove comments. */
      log.append(norm(in));
      log.append(NL);
      final String logStr = log.toString();
      // skip queries with variable results
      final boolean print = currTime || !logStr.contains("current-");

      boolean correctError = false;
      if (er != null && (expOut.size() == 0 || !expError.isEmpty())) {
        expError = error(pth + outname, expError);
        final String code = er.substring(0, Math.min(8, er.length()));
        for (final String e : SLASH.split(expError)) {
          if (code.equals(e)) {
            correctError = true;
            break;
          }
        }
      }

      if (correctError) {
        if (print) {
          logOK.append(logStr);
          logOK.append("[Right] ");
          logOK.append(norm(er));
          logOK.append(NL);
          logOK.append(NL);
          addLog(pth, outname + ".log", er);
        }
        ++ok;
      } else if (er == null) {
        int s = -1;
        final int rs = result.size();

        while (!ignore && ++s < rs) {
          inspect |= s < cmpFiles.list.length && eq(data.atom(cmpFiles.list[s]), INSPECT);

          final byte[] res = result.get(s), actual = ao.toArray();
          if (res.length == ao.size() && eq(res, actual)) break;

          if (xml || frag) {
            iter.reset();

            try {
              final ItemCache ic =
                  toIter(string(res).replaceAll("^<\\?xml.*?\\?>", "").trim(), frag);
              if (FNSimple.deep(null, iter, ic)) break;

              ic.reset();
              final ItemCache ia = toIter(string(actual), frag);
              if (FNSimple.deep(null, ia, ic)) break;
            } catch (final Throwable ex) {
              System.err.println("\n" + outname + ":");
              ex.printStackTrace();
            }
          }
        }
        if ((rs > 0 || !expError.isEmpty()) && s == rs && !inspect) {
          if (print) {
            if (expOut.size() == 0) result.add(error(pth + outname, expError));
            logErr.append(logStr);
            logErr.append("[" + testid + " ] ");
            logErr.append(norm(string(result.get(0))));
            logErr.append(NL);
            logErr.append("[Wrong] ");
            logErr.append(norm(ao.toString()));
            logErr.append(NL);
            logErr.append(NL);
            addLog(pth, outname + (xml ? IO.XMLSUFFIX : ".txt"), ao.toString());
          }
          correct = false;
          ++err;
        } else {
          if (print) {
            logOK.append(logStr);
            logOK.append("[Right] ");
            logOK.append(norm(ao.toString()));
            logOK.append(NL);
            logOK.append(NL);
            addLog(pth, outname + (xml ? IO.XMLSUFFIX : ".txt"), ao.toString());
          }
          ++ok;
        }
      } else {
        if (expOut.size() == 0 || !expError.isEmpty()) {
          if (print) {
            logOK2.append(logStr);
            logOK2.append("[" + testid + " ] ");
            logOK2.append(norm(expError));
            logOK2.append(NL);
            logOK2.append("[Rght?] ");
            logOK2.append(norm(er));
            logOK2.append(NL);
            logOK2.append(NL);
            addLog(pth, outname + ".log", er);
          }
          ++ok2;
        } else {
          if (print) {
            logErr2.append(logStr);
            logErr2.append("[" + testid + " ] ");
            logErr2.append(norm(string(result.get(0))));
            logErr2.append(NL);
            logErr2.append("[Wrong] ");
            logErr2.append(norm(er));
            logErr2.append(NL);
            logErr2.append(NL);
            addLog(pth, outname + ".log", er);
          }
          correct = false;
          ++err2;
        }
      }
      if (curr != null) Close.close(curr.data, context);
      xq.close();
    }

    if (reporting) {
      logReport.append("    <test-case name=\"");
      logReport.append(outname);
      logReport.append("\" result='");
      logReport.append(correct ? "pass" : "fail");
      if (inspect) logReport.append("' todo='inspect");
      logReport.append("'/>");
      logReport.append(NL);
    }

    // print verbose/timing information
    final long nano = perf.getTime();
    final boolean slow = nano / 1000000 > timer;
    if (verbose) {
      if (slow) Util.out(": " + Performance.getTimer(nano, 1));
      Util.outln();
    } else if (slow) {
      Util.out(NL + "- " + outname + ": " + Performance.getTimer(nano, 1));
    }

    return single == null || !outname.equals(single);
  }
예제 #28
0
  /**
   * Runs the test suite.
   *
   * @param args command-line arguments
   * @throws Exception exception
   */
  void run(final String[] args) throws Exception {
    final Args arg =
        new Args(
            args,
            this,
            " Test Suite [options] [pat]"
                + NL
                + " [pat] perform only tests with the specified pattern"
                + NL
                + " -c     print compilation steps"
                + NL
                + " -h     show this help"
                + NL
                + " -m     minimum conformance"
                + NL
                + " -g     <test-group> test group to test"
                + NL
                + " -C     run tests depending on current time"
                + NL
                + " -p     change path"
                + NL
                + " -r     create report"
                + NL
                + " -t[ms] list slowest queries"
                + NL
                + " -v     verbose output");

    while (arg.more()) {
      if (arg.dash()) {
        final char c = arg.next();
        if (c == 'r') {
          reporting = true;
          currTime = true;
        } else if (c == 'C') {
          currTime = true;
        } else if (c == 'c') {
          compile = true;
        } else if (c == 'm') {
          minimum = true;
        } else if (c == 'g') {
          group = arg.string();
        } else if (c == 'p') {
          path = arg.string() + "/";
        } else if (c == 't') {
          timer = arg.num();
        } else if (c == 'v') {
          verbose = true;
        } else {
          arg.check(false);
        }
      } else {
        single = arg.string();
        maxout = Integer.MAX_VALUE;
      }
    }
    if (!arg.finish()) return;

    queries = path + "Queries/XQuery/";
    expected = path + "ExpectedTestResults/";
    results = path + "ReportingResults/Results/";
    report = path + "ReportingResults/";
    sources = path + "TestSources/";

    final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
    final String dat = sdf.format(Calendar.getInstance().getTime());

    final Performance perf = new Performance();
    context.prop.set(Prop.CHOP, false);

    // new Check(path + input).execute(context);
    data = CreateDB.xml(new IOFile(path + input), context);

    final Nodes root = new Nodes(0, data);
    Util.outln(NL + Util.name(this) + " Test Suite " + text("/*:test-suite/@version", root));

    Util.outln(NL + "Caching Sources...");
    for (final int s : nodes("//*:source", root).list) {
      final Nodes srcRoot = new Nodes(s, data);
      final String val = (path + text("@FileName", srcRoot)).replace('\\', '/');
      srcs.put(text("@ID", srcRoot), val);
    }

    Util.outln("Caching Modules...");
    for (final int s : nodes("//*:module", root).list) {
      final Nodes srcRoot = new Nodes(s, data);
      final String val = (path + text("@FileName", srcRoot)).replace('\\', '/');
      mods.put(text("@ID", srcRoot), val);
    }

    Util.outln("Caching Collections...");
    for (final int c : nodes("//*:collection", root).list) {
      final Nodes nodes = new Nodes(c, data);
      final String cname = text("@ID", nodes);

      final TokenList dl = new TokenList();
      final Nodes doc = nodes("*:input-document", nodes);
      for (int d = 0; d < doc.size(); ++d) {
        dl.add(token(sources + string(data.atom(doc.list[d])) + IO.XMLSUFFIX));
      }
      colls.put(cname, dl.toArray());
    }
    init(root);

    if (reporting) {
      Util.outln("Delete old results...");
      delete(new File[] {new File(results)});
    }

    if (verbose) Util.outln();
    final Nodes nodes =
        minimum
            ? nodes("//*:test-group[starts-with(@name, 'Minim')]//*:test-case", root)
            : group != null
                ? nodes("//*:test-group[@name eq '" + group + "']//*:test-case", root)
                : nodes("//*:test-case", root);

    long total = nodes.size();
    Util.out("Parsing " + total + " Queries");
    for (int t = 0; t < total; ++t) {
      if (!parse(new Nodes(nodes.list[t], data))) break;
      if (!verbose && t % 500 == 0) Util.out(".");
    }
    Util.outln();
    total = ok + ok2 + err + err2;

    final String time = perf.getTimer();
    Util.outln("Writing log file..." + NL);
    BufferedWriter bw =
        new BufferedWriter(new OutputStreamWriter(new FileOutputStream(path + pathlog), UTF8));
    bw.write("TEST RESULTS ==================================================");
    bw.write(NL + NL + "Total #Queries: " + total + NL);
    bw.write("Correct / Empty Results: " + ok + " / " + ok2 + NL);
    bw.write("Conformance (w/Empty Results): ");
    bw.write(pc(ok, total) + " / " + pc(ok + ok2, total) + NL);
    bw.write("Wrong Results / Errors: " + err + " / " + err2 + NL);
    bw.write("WRONG =========================================================");
    bw.write(NL + NL + logErr + NL);
    bw.write("WRONG (ERRORS) ================================================");
    bw.write(NL + NL + logErr2 + NL);
    bw.write("CORRECT? (EMPTY) ==============================================");
    bw.write(NL + NL + logOK2 + NL);
    bw.write("CORRECT =======================================================");
    bw.write(NL + NL + logOK + NL);
    bw.write("===============================================================");
    bw.close();

    bw = new BufferedWriter(new FileWriter(path + pathhis, true));
    bw.write(dat + "\t" + ok + "\t" + ok2 + "\t" + err + "\t" + err2 + NL);
    bw.close();

    if (reporting) {
      bw =
          new BufferedWriter(
              new OutputStreamWriter(new FileOutputStream(report + NAME + IO.XMLSUFFIX), UTF8));
      write(bw, report + NAME + "Pre" + IO.XMLSUFFIX);
      bw.write(logReport.toString());
      write(bw, report + NAME + "Pos" + IO.XMLSUFFIX);
      bw.close();
    }

    Util.outln("Total #Queries: " + total);
    Util.outln("Correct / Empty results: " + ok + " / " + ok2);
    Util.out("Conformance (w/empty results): ");
    Util.outln(pc(ok, total) + " / " + pc(ok + ok2, total));
    Util.outln("Total Time: " + time);

    context.close();
  }
예제 #29
0
파일: Data.java 프로젝트: JosuaKrause/basex
  /**
   * Inserts a data instance at the specified pre value. Note that the specified data instance must
   * differ from this instance.
   *
   * @param ipre value at which to insert new data
   * @param ipar parent pre value of node
   * @param clip data clip
   */
  public final void insert(final int ipre, final int ipar, final DataClip clip) {
    meta.update();

    // update value and document indexes
    if (meta.updindex) indexBegin();
    resources.insert(ipre, clip);

    final int dsize = clip.size();
    final int buf = Math.min(dsize, IO.BLOCKSIZE >> IO.NODEPOWER);
    // resize buffer to cache more entries
    buffer(buf);

    // find all namespaces in scope to avoid duplicate declarations
    final TokenMap nsScope = nspaces.scope(ipar, this);

    // loop through all entries
    final IntList preStack = new IntList();
    final NSNode nsRoot = nspaces.current();
    final HashSet<NSNode> newNodes = new HashSet<NSNode>();
    final IntList flagPres = new IntList();

    // indicates if database only contains a dummy node
    final Data data = clip.data;
    int c = 0;
    for (int dpre = clip.start; dpre < clip.end; ++dpre, ++c) {
      if (c != 0 && c % buf == 0) insert(ipre + c - buf);

      final int pre = ipre + c;
      final int dkind = data.kind(dpre);
      final int dpar = data.parent(dpre, dkind);
      // ipar < 0 if document nodes on top level are added
      final int dis = dpar >= 0 ? dpre - dpar : ipar >= 0 ? pre - ipar : 0;
      final int par = dis == 0 ? -1 : pre - dis;

      if (c == 0) nspaces.root(par, this);

      while (!preStack.isEmpty() && preStack.peek() > par) nspaces.close(preStack.pop());

      switch (dkind) {
        case DOC:
          // add document
          nspaces.prepare();
          final int s = data.size(dpre, dkind);
          doc(pre, s, data.text(dpre, true));
          meta.ndocs++;
          preStack.push(pre);
          break;
        case ELEM:
          // add element
          nspaces.prepare();
          boolean ne = false;
          if (data.nsFlag(dpre)) {
            final Atts at = data.ns(dpre);
            for (int a = 0; a < at.size(); ++a) {
              // see if prefix has been declared/ is part of current ns scope
              final byte[] old = nsScope.get(at.name(a));
              if (old == null || !eq(old, at.value(a))) {
                // we have to keep track of all new NSNodes that are added
                // to the Namespace structure, as their pre values must not
                // be updated. I.e. if an NSNode N with pre value 3 existed
                // prior to inserting and two new nodes are inserted at
                // location pre == 3 we have to make sure N and only N gets
                // updated.
                newNodes.add(nspaces.add(at.name(a), at.value(a), pre));
                ne = true;
              }
            }
          }
          byte[] nm = data.name(dpre, dkind);
          elem(
              dis,
              tagindex.index(nm, null, false),
              data.attSize(dpre, dkind),
              data.size(dpre, dkind),
              nspaces.uri(nm, true),
              ne);
          preStack.push(pre);
          break;
        case TEXT:
        case COMM:
        case PI:
          // add text
          text(pre, dis, data.text(dpre, true), dkind);
          break;
        case ATTR:
          // add attribute
          nm = data.name(dpre, dkind);
          // check if prefix already in nsScope or not
          final byte[] attPref = prefix(nm);
          // check if prefix of attribute has already been declared, otherwise
          // add declaration to parent node
          if (data.nsFlag(dpre) && nsScope.get(attPref) == null) {
            nspaces.add(
                par,
                preStack.isEmpty() ? -1 : preStack.peek(),
                attPref,
                data.nspaces.uri(data.uri(dpre, dkind)),
                this);
            // save pre value to set ns flag later for this node. can't be done
            // here as direct table access would interfere with the buffer
            flagPres.add(par);
          }
          attr(
              pre,
              dis,
              atnindex.index(nm, null, false),
              data.text(dpre, false),
              nspaces.uri(nm, false),
              false);
          break;
      }
    }
    // finalize and update namespace structure
    while (!preStack.isEmpty()) nspaces.close(preStack.pop());
    nspaces.root(nsRoot);

    if (bp != 0) insert(ipre + c - 1 - (c - 1) % buf);
    // reset buffer to old size
    buffer(1);

    // set ns flags
    for (int f = 0; f < flagPres.size(); f++) {
      final int fl = flagPres.get(f);
      table.write2(fl, 1, name(fl) | 1 << 15);
    }

    // increase size of ancestors
    int p = ipar;
    while (p >= 0) {
      final int k = kind(p);
      size(p, k, size(p, k) + dsize);
      p = parent(p, k);
    }

    if (meta.updindex) {
      // add the entries to the ID -> PRE mapping:
      idmap.insert(ipre, id(ipre), dsize);
      indexEnd();
    }

    if (!cache) updateDist(ipre + dsize, dsize);

    // propagate PRE value shifts to namespaces
    if (ipar != -1) nspaces.insert(ipre, dsize, newNodes);
  }
예제 #30
0
 @Override
 void apply(final Data d) {
   d.insert(location, parent, insseq);
 }