Exemple #1
0
 /**
  * Scans XML text.
  *
  * @param ch current character
  * @throws IOException I/O exception
  */
 private void content(final int ch) throws IOException {
   type = Type.TEXT;
   boolean f = true;
   int c = ch;
   while (c != 0) {
     if (c != '<') {
       if (c == '&') {
         // scan entity
         final byte[] r = ref(true);
         if (r.length == 1) token.add(r);
         else if (!input.add(r, false)) error(RECENT);
       } else {
         if (c == ']') {
           // ']]>' not allowed in content
           if (consume() == ']') {
             if (consume() == '>') error(CONTCDATA);
             prev(1);
           }
           prev(1);
         }
         // add character to cached content
         token.add(c);
       }
     } else {
       if (!f && !isCDATA()) {
         text = false;
         prev(1);
         if (chop) token.trim();
         return;
       }
       cDATA();
     }
     c = consume();
     f = false;
   }
   // end of file
   if (!fragment) {
     if (!ws(token.finish())) error(AFTERROOT);
     type = Type.EOF;
   }
 }
Exemple #2
0
 /**
  * Caches and returns all unique tokens specified in a query.
  *
  * @param list token list
  * @return token set
  */
 private TokenSet unique(final TokenList list) {
   // cache all query tokens in a set (duplicates are removed)
   final TokenSet ts = new TokenSet();
   switch (mode) {
     case ALL:
     case ANY:
       for (final byte[] t : list) ts.add(t);
       break;
     case ALL_WORDS:
     case ANY_WORD:
       final FTLexer l = new FTLexer(ftt.opt);
       for (final byte[] t : list) {
         l.init(t);
         while (l.hasNext()) ts.add(l.nextToken());
       }
       break;
     case PHRASE:
       final TokenBuilder tb = new TokenBuilder();
       for (final byte[] t : list) tb.add(t).add(' ');
       ts.add(tb.trim().finish());
   }
   return ts;
 }