/** @tests java.io.StreamTokenizer#toString() */
  public void test_toString() throws IOException {
    setTest("ABC Hello World");
    st.nextToken();
    assertTrue("toString failed." + st.toString(), st.toString().equals("Token[ABC], line 1"));

    // Regression test for HARMONY-4070
    byte[] data = new byte[] {(byte) '-'};
    StreamTokenizer tokenizer = new StreamTokenizer(new ByteArrayInputStream(data));
    tokenizer.nextToken();
    String result = tokenizer.toString();
    assertEquals("Token['-'], line 1", result);
  }
Beispiel #2
0
  public static void main(String[] args) {
    try {
      InputStream is = StreamTokenering.class.getResourceAsStream("/input.txt");
      StreamTokenizer in = new StreamTokenizer(new InputStreamReader(is));

      in.ordinaryChar('.');
      in.ordinaryChar('\'');
      int wordCount = 0, numCount = 0, punctionCount = 0, count = 0;
      double token;
      while ((token = in.nextToken()) != StreamTokenizer.TT_EOF) {
        count++;
        if (token == StreamTokenizer.TT_WORD) {
          wordCount++;
        } else if (token == StreamTokenizer.TT_NUMBER) {
          numCount++;
        } else {
          punctionCount++;
        }
        System.out.println(in.toString());
      }
      System.out.println("单词总数为:" + count);
      System.out.println("单词数为:" + wordCount);
      System.out.println("数字数为:" + numCount);
      System.out.println("标点符号数为:" + punctionCount++);
    } catch (IOException e) {
      e.printStackTrace();
    }
  }
 /**
  * Set the params (analyzerName only), Comma-separate list of Analyzer class names. If the
  * Analyzer lives in org.apache.lucene.analysis, the name can be shortened by dropping the o.a.l.a
  * part of the Fully Qualified Class Name.
  *
  * <p>Analyzer names may also refer to previously defined AnalyzerFactory's.
  *
  * <p>Example Declaration: {"NewAnalyzer" NewAnalyzer(WhitespaceAnalyzer, SimpleAnalyzer,
  * StopAnalyzer, standard.StandardAnalyzer) >
  *
  * <p>Example AnalyzerFactory usage:
  *
  * <pre>
  * -AnalyzerFactory(name:'whitespace tokenized',WhitespaceTokenizer)
  * -NewAnalyzer('whitespace tokenized')
  * </pre>
  *
  * @param params analyzerClassName, or empty for the StandardAnalyzer
  */
 @Override
 public void setParams(String params) {
   super.setParams(params);
   final StreamTokenizer stok = new StreamTokenizer(new StringReader(params));
   stok.quoteChar('"');
   stok.quoteChar('\'');
   stok.eolIsSignificant(false);
   stok.ordinaryChar(',');
   try {
     while (stok.nextToken() != StreamTokenizer.TT_EOF) {
       switch (stok.ttype) {
         case ',':
           {
             // Do nothing
             break;
           }
         case '\'':
         case '\"':
         case StreamTokenizer.TT_WORD:
           {
             analyzerNames.add(stok.sval);
             break;
           }
         default:
           {
             throw new RuntimeException("Unexpected token: " + stok.toString());
           }
       }
     }
   } catch (RuntimeException e) {
     if (e.getMessage().startsWith("Line #")) {
       throw e;
     } else {
       throw new RuntimeException("Line #" + (stok.lineno() + getAlgLineNum()) + ": ", e);
     }
   } catch (Throwable t) {
     throw new RuntimeException("Line #" + (stok.lineno() + getAlgLineNum()) + ": ", t);
   }
 }
Beispiel #4
0
 public String toString() {
   return tokens.toString();
 }