Beispiel #1
0
 int ct() throws IOException {
   FileReader fr = new FileReader("mf.dat");
   StreamTokenizer st = new StreamTokenizer(fr);
   st.eolIsSignificant(true);
   int tok = st.nextToken();
   int ctr = 0;
   while (tok != -1) {
     switch (tok) {
       case -3:
         {
           break;
         }
       case -2:
         {
           break;
         }
       case 10:
         {
           ctr++;
           break;
         }
     }
     tok = st.nextToken();
   }
   return ctr;
 }
Beispiel #2
0
 private static void parseSplit(StreamTokenizer st, Split parent) throws Exception {
   int token;
   while ((token = st.nextToken()) != StreamTokenizer.TT_EOF) {
     if (token == ')') {
       break;
     } else if (token == StreamTokenizer.TT_WORD) {
       if (st.sval.equalsIgnoreCase("WEIGHT")) {
         parseAttribute(st.sval, st, parent);
       } else {
         addSplitChild(parent, new Leaf(st.sval));
       }
     } else if (token == '(') {
       if ((token = st.nextToken()) != StreamTokenizer.TT_WORD) {
         throwParseException(st, "invalid node type");
       }
       String nodeType = st.sval.toUpperCase();
       if (nodeType.equals("LEAF")) {
         parseLeaf(st, parent);
       } else if (nodeType.equals("ROW") || nodeType.equals("COLUMN")) {
         Split split = new Split();
         split.setRowLayout(nodeType.equals("ROW"));
         addSplitChild(parent, split);
         parseSplit(st, split);
       } else {
         throwParseException(st, "unrecognized node type '" + nodeType + "'");
       }
     }
   }
 }
Beispiel #3
0
  private static int[][] deserialize(String str) throws IOException {
    StreamTokenizer tok = new StreamTokenizer(new StringReader(str));
    tok.resetSyntax();
    tok.wordChars('0', '9');
    tok.whitespaceChars(' ', ' ');
    tok.parseNumbers();

    tok.nextToken();

    int rows = (int) tok.nval;
    int[][] out = new int[rows][];

    for (int i = 0; i < rows; i++) {
      tok.nextToken();

      int length = (int) tok.nval;
      int[] row = new int[length];
      out[i] = row;

      for (int j = 0; j < length; j++) {
        tok.nextToken();
        row[j] = (int) tok.nval;
      }
    }

    return out;
  }
 /** @tests java.io.StreamTokenizer#pushBack() */
 public void test_pushBack() throws IOException {
   // SM.
   setTest("Hello 897");
   st.nextToken();
   st.pushBack();
   assertTrue("PushBack failed.", st.nextToken() == StreamTokenizer.TT_WORD);
 }
Beispiel #5
0
  /**
   * Parses the S-Expression from the lexer output. The lexer should be positioned on the first
   * symbol after the opening parenthesis.
   *
   * @return the parse tree of the input
   * @throws IOException if a read error occurs in the lexer
   * @throws ParsingException if the input cannot be parsed successfully
   */
  private Expression parseSymbolicExpression() throws IOException, ParsingException {
    Expression expr = new Expression(lexer.sval);

    int t = lexer.nextToken();
    while (t != StreamTokenizer.TT_EOF) {
      switch (t) {
        case ')':
          if (stack.empty()) return expr;
          stack.peek().addOperand(expr);
          expr = stack.pop();
          break;
        case '(': // descend into a sub-expression
          stack.push(expr);
          if (lexer.nextToken() != StreamTokenizer.TT_WORD) {
            throw new ParsingException("Expected symbol. Got: " + lexer.ttype);
          }
          expr = new Expression(lexer.sval);
          break;
        case StreamTokenizer.TT_WORD:
          try {
            // test for a number
            expr.addOperand(Value.newInt(Integer.parseInt(lexer.sval)));
          } catch (NumberFormatException ignored) {
            // fall back on a symbol
            expr.addOperand(lexer.sval);
          }
          break;
        default:
          throw new ParsingException("Unknown token type: " + lexer.ttype);
      }
      t = lexer.nextToken();
    }

    throw new ParsingException("Expected end of input.  Got: " + lexer.ttype);
  }
Beispiel #6
0
  /**
   * Ensures the program's first token is '(' and is followed by a symbol.
   *
   * <p>The first symbol in the input will be the current value of the lexer after this call
   * completes.
   *
   * @throws IOException if reading the input fails
   * @throws ParsingException if this assumption fails
   */
  private void checkStart() throws ParsingException, IOException {
    lexer.nextToken();
    if (lexer.ttype != '(') throw new ParsingException("Program does not begin with '('.");

    if (lexer.nextToken() != StreamTokenizer.TT_WORD)
      throw new ParsingException("Expected symbol. Got: " + lexer.ttype);
  }
 /** @tests java.io.StreamTokenizer#slashSlashComments(boolean) */
 public void test_slashSlashCommentsZ() throws IOException {
   // SM.
   setTest("// foo \r\n /fiji \r\n -456");
   st.ordinaryChar('/');
   st.slashSlashComments(true);
   assertEquals("Test failed.", '/', st.nextToken());
   assertTrue("Test failed.", st.nextToken() == StreamTokenizer.TT_WORD);
 }
  /** @tests java.io.StreamTokenizer#slashStarComments(boolean) */
  public void test_slashStarComments_withSTClosed() throws IOException {
    Reader reader = new CharArrayReader("t /* t */ t".toCharArray());

    StreamTokenizer st = new StreamTokenizer(reader);
    st.slashStarComments(false);

    assertEquals(StreamTokenizer.TT_WORD, st.nextToken());
    assertEquals(StreamTokenizer.TT_EOF, st.nextToken());
  }
  /** @tests java.io.StreamTokenizer#slashSlashComments(boolean) */
  public void test_slashSlashComments_withSSOpen() throws IOException {
    Reader reader = new CharArrayReader("t // t t t".toCharArray());

    StreamTokenizer st = new StreamTokenizer(reader);
    st.slashSlashComments(true);

    assertEquals(StreamTokenizer.TT_WORD, st.nextToken());
    assertEquals(StreamTokenizer.TT_EOF, st.nextToken());
  }
 /** @tests java.io.StreamTokenizer#resetSyntax() */
 public void test_resetSyntax() throws IOException {
   // SM
   setTest("H 9\' ello World");
   st.resetSyntax();
   assertTrue("resetSyntax failed1." + (char) st.ttype, st.nextToken() == 'H');
   assertTrue("resetSyntax failed1." + (char) st.ttype, st.nextToken() == ' ');
   assertTrue("resetSyntax failed2." + (char) st.ttype, st.nextToken() == '9');
   assertTrue("resetSyntax failed3." + (char) st.ttype, st.nextToken() == '\'');
 }
 /** @tests java.io.StreamTokenizer#lineno() */
 public void test_lineno() throws IOException {
   setTest("d\n 8\n");
   assertEquals("the lineno should be 1", 1, st.lineno());
   st.nextToken();
   st.nextToken();
   assertEquals("the lineno should be 2", 2, st.lineno());
   st.nextToken();
   assertEquals("the next line no should be 3", 3, st.lineno());
 }
 /** @tests java.io.StreamTokenizer#parseNumbers() */
 public void test_parseNumbers() throws IOException {
   // SM
   setTest("9.9 678");
   assertTrue("Base behavior failed.", st.nextToken() == StreamTokenizer.TT_NUMBER);
   st.ordinaryChars('0', '9');
   assertEquals("setOrdinary failed.", '6', st.nextToken());
   st.parseNumbers();
   assertTrue("parseNumbers failed.", st.nextToken() == StreamTokenizer.TT_NUMBER);
 }
  /**
   * Construct a new instance and configure it.
   *
   * @param broker
   * @param conf
   */
  public TextSearchEngine(DBBroker broker, Configuration conf) {
    this.broker = broker;
    this.config = conf;
    String stopword, tokenizerClass;
    Boolean num, stemming, termFrequencies;
    if ((num = (Boolean) config.getProperty(PROPERTY_INDEX_NUMBERS)) != null)
      indexNumbers = num.booleanValue();
    if ((stemming = (Boolean) config.getProperty(PROPERTY_STEM)) != null)
      stem = stemming.booleanValue();
    if ((termFrequencies = (Boolean) config.getProperty(PROPERTY_STORE_TERM_FREQUENCY)) != null)
      termFreq = termFrequencies.booleanValue();
    String track = (String) config.getProperty(Serializer.PROPERTY_TAG_MATCHING_ELEMENTS);
    if (track != null)
      trackMatches =
          track.equalsIgnoreCase("yes") ? Serializer.TAG_ELEMENT_MATCHES : Serializer.TAG_NONE;
    track = (String) config.getProperty(Serializer.PROPERTY_TAG_MATCHING_ATTRIBUTES);
    if (track != null && track.equalsIgnoreCase("yes"))
      trackMatches = trackMatches | Serializer.TAG_ATTRIBUTE_MATCHES;

    if ((tokenizerClass = (String) config.getProperty(PROPERTY_TOKENIZER)) != null) {
      try {
        Class tokClass = Class.forName(tokenizerClass);
        tokenizer = (Tokenizer) tokClass.newInstance();
        LOG.debug("using tokenizer: " + tokenizerClass);
      } catch (ClassNotFoundException e) {
        LOG.debug(e);
      } catch (InstantiationException e) {
        LOG.debug(e);
      } catch (IllegalAccessException e) {
        LOG.debug(e);
      }
    }
    if (tokenizer == null) {
      LOG.debug("using simple tokenizer");
      tokenizer = new SimpleTokenizer();
    }

    if (stem) stemmer = new PorterStemmer();
    tokenizer.setStemming(stem);
    if ((stopword = (String) config.getProperty(PROPERTY_STOPWORD_FILE)) != null) {
      try {
        FileReader in = new FileReader(stopword);
        StreamTokenizer tok = new StreamTokenizer(in);
        int next = tok.nextToken();
        while (next != StreamTokenizer.TT_EOF) {
          if (next != StreamTokenizer.TT_WORD) continue;
          stoplist.add(tok.sval);
          next = tok.nextToken();
        }
      } catch (FileNotFoundException e) {
        LOG.debug(e);
      } catch (IOException e) {
        LOG.debug(e);
      }
    }
  }
 /** @tests java.io.StreamTokenizer#quoteChar(int) */
 public void test_quoteCharI() throws IOException {
   // SM
   setTest("<Hello World<    HelloWorldH");
   st.quoteChar('<');
   assertEquals("QuoteChar failed.", '<', st.nextToken());
   assertEquals("QuoteChar failed.", "Hello World", st.sval);
   st.quoteChar('H');
   st.nextToken();
   assertEquals("QuoteChar failed for word.", "elloWorld", st.sval);
 }
Beispiel #15
0
  protected void edgeStmt(StreamTokenizer tk, final int nindex) throws Exception {
    tk.nextToken();

    GraphEdge e = null;
    if (tk.ttype == '>') {
      tk.nextToken();
      if (tk.ttype == '{') {
        while (true) {
          tk.nextToken();
          if (tk.ttype == '}') {
            break;
          } else {
            nodeID(tk);
            e = new GraphEdge(nindex, m_nodes.indexOf(new GraphNode(tk.sval, null)), DIRECTED);
            if (m_edges != null && !(m_edges.contains(e))) {
              m_edges.add(e);
              // System.out.println("Added edge from "+
              // ((GraphNode)(m_nodes.get(nindex))).ID+
              // " to "+
              // ((GraphNode)(m_nodes.get(e.dest))).ID);
            }
          }
        }
      } else {
        nodeID(tk);
        e = new GraphEdge(nindex, m_nodes.indexOf(new GraphNode(tk.sval, null)), DIRECTED);
        if (m_edges != null && !(m_edges.contains(e))) {
          m_edges.add(e);
          // System.out.println("Added edge from "+
          // ((GraphNode)(m_nodes.get(nindex))).ID+" to "+
          // ((GraphNode)(m_nodes.get(e.dest))).ID);
        }
      }
    } else if (tk.ttype == '-') {
      System.err.println("Error at line " + tk.lineno() + ". Cannot deal with undirected edges");
      if (tk.ttype == StreamTokenizer.TT_WORD) {
        tk.pushBack();
      }
      return;
    } else {
      System.err.println("Error at line " + tk.lineno() + " in edgeStmt");
      if (tk.ttype == StreamTokenizer.TT_WORD) {
        tk.pushBack();
      }
      return;
    }

    tk.nextToken();

    if (tk.ttype == '[') {
      edgeAttrib(tk, e);
    } else {
      tk.pushBack();
    }
  }
  /** @tests java.io.StreamTokenizer#slashSlashComments(boolean) */
  public void test_slashSlashComments_withSSClosed() throws IOException {
    Reader reader = new CharArrayReader("// t".toCharArray());

    StreamTokenizer st = new StreamTokenizer(reader);
    st.slashSlashComments(false);
    st.ordinaryChar('/');

    assertEquals('/', st.nextToken());
    assertEquals('/', st.nextToken());
    assertEquals(StreamTokenizer.TT_WORD, st.nextToken());
  }
  /** @tests java.io.StreamTokenizer#toString() */
  public void test_toString() throws IOException {
    setTest("ABC Hello World");
    st.nextToken();
    assertTrue("toString failed." + st.toString(), st.toString().equals("Token[ABC], line 1"));

    // Regression test for HARMONY-4070
    byte[] data = new byte[] {(byte) '-'};
    StreamTokenizer tokenizer = new StreamTokenizer(new ByteArrayInputStream(data));
    tokenizer.nextToken();
    String result = tokenizer.toString();
    assertEquals("Token['-'], line 1", result);
  }
  public static void main(String[] args) throws Exception {
    StreamTokenizer st = new StreamTokenizer(new BufferedReader(new InputStreamReader(System.in)));
    while (st.nextToken() != StreamTokenizer.TT_EOF) {
      n = (int) st.nval;
      array = new int[n + 1];
      Arrays.fill(array, 1);
      openList = new ArrayList<Integer>();
      closeList = new ArrayList<Integer>();
      st.nextToken();
      c = (int) st.nval;
      if (c > 4) {
        if (c % 2 == 1) {
          c = 3;
        } else {
          c = 4;
        }
      }
      while (true) {
        st.nextToken();
        int num = (int) st.nval;
        if (num != -1) {
          openList.add(num);
        } else {
          break;
        }
      }

      while (true) {
        st.nextToken();
        int num = (int) st.nval;
        if (num != -1) {
          closeList.add(num);
        } else {
          break;
        }
      }
      success = false;
      len1 = openList.size();
      len2 = closeList.size();
      resultList = new ArrayList<String>();
      dfs(0);
      if (success) {
        Collections.sort(resultList);
        int size = resultList.size();
        for (int i = 0; i < size; i++) {
          System.out.println(resultList.get(i));
        }
      } else {
        System.out.println("IMPOSSIBLE");
      }
    }
  }
Beispiel #19
0
 /**
  * creates a NewAnnotationAction from the input processed by StreamTokenizer <I>tok</I>, which
  * should have the form <br>
  * add [type feature=value feature=value ...] <br>
  * or <br>
  * add [type feature=value feature=value ...] over spanVariable
  */
 public NewAnnotationAction(StreamTokenizer tok) throws IOException, PatternSyntaxError {
   if (tok.nextToken() == StreamTokenizer.TT_WORD && Character.isUpperCase(tok.sval.charAt(0))) {
     bindingVariable = new Variable(tok.sval);
     if (tok.nextToken() != '=') throw new PatternSyntaxError("= expected");
     tok.nextToken();
   }
   if (tok.ttype != '[') throw new PatternSyntaxError("[ expected");
   if (tok.nextToken() != StreamTokenizer.TT_WORD)
     throw new PatternSyntaxError("annotation type expected");
   type = tok.sval;
   features = new FeatureSet(tok, true, ']');
   if (tok.nextToken() == StreamTokenizer.TT_WORD && tok.sval.equalsIgnoreCase("over")) {
     if (tok.nextToken() == StreamTokenizer.TT_WORD && Character.isUpperCase(tok.sval.charAt(0))) {
       spanVariable = new Variable(tok.sval);
       tok.nextToken();
     } else if (tok.ttype == StreamTokenizer.TT_NUMBER && tok.nval == 0) {
       spanVariable = new Variable("0");
       tok.nextToken();
     } else {
       throw new PatternSyntaxError("variable expected after 'over'");
     }
   } else {
     spanVariable = null;
   }
 }
Beispiel #20
0
  public static void main(String[] args) throws Exception {
    BigInteger T = BigInteger.valueOf(0);
    BigInteger TB = BigInteger.valueOf(0);
    BigInteger NTB = BigInteger.valueOf(0);
    BigInteger S = BigInteger.valueOf(0);
    BigInteger MAX = BigInteger.valueOf(1);
    int j;
    for (j = 0; j < 100; j++) MAX = MAX.multiply(BigInteger.valueOf(10));
    for (; ; ) {
      int i, t, a, b;
      if (in.nextToken() != StreamTokenizer.TT_NUMBER) break;
      t = (int) in.nval;
      if (in.nextToken() != StreamTokenizer.TT_NUMBER) break;
      a = (int) in.nval;
      if (in.nextToken() != StreamTokenizer.TT_NUMBER) break;
      b = (int) in.nval;

      // System.out.print("(");
      // System.out.print(t);
      // System.out.print("^");
      // System.out.print(a);
      // System.out.print("-1)/(");
      // System.out.print(t);
      // System.out.print("^");
      // System.out.print(b);
      // System.out.print("-1) ");
      if (t == 1 || a % b != 0) {
        System.out.print("bad!\n");
        continue;
      }

      T = BigInteger.valueOf(t);
      TB = BigInteger.valueOf(1);
      for (i = 0; i < b; i++) {
        TB = TB.multiply(T);
        if (TB.compareTo(MAX) >= 0) break;
      }
      NTB = BigInteger.valueOf(1);
      S = BigInteger.valueOf(0);
      for (i = 0; i < a; i += b) {
        S = S.add(NTB);
        if (S.compareTo(MAX) >= 0) break;
        NTB = NTB.multiply(TB);
      }
      if (S.compareTo(MAX) >= 0) System.out.print("bad!");
      else System.out.print(S);
      System.out.print("\n");
    }
  }
 /** @tests java.io.StreamTokenizer#commentChar(int) */
 public void test_commentCharI() throws IOException {
   setTest("*comment \n / 8 'h' ");
   st.ordinaryChar('/');
   st.commentChar('*');
   assertEquals(
       "nextToken() did not return the character / skiping the comments starting with *",
       47,
       st.nextToken());
   assertTrue(
       "the next token returned should be the digit 8",
       st.nextToken() == StreamTokenizer.TT_NUMBER && st.nval == 8.0);
   assertTrue(
       "the next token returned should be the quote character",
       st.nextToken() == 39 && st.sval.equals("h"));
 }
  /** @tests java.io.StreamTokenizer#StreamTokenizer(java.io.InputStream) */
  @SuppressWarnings("deprecation")
  public void test_ConstructorLjava_io_InputStream() throws IOException {
    st = new StreamTokenizer(new StringBufferInputStream("/comments\n d 8 'h'"));

    assertEquals(
        "the next token returned should be the letter d", StreamTokenizer.TT_WORD, st.nextToken());
    assertEquals("the next token returned should be the letter d", "d", st.sval);

    assertEquals(
        "the next token returned should be the digit 8", StreamTokenizer.TT_NUMBER, st.nextToken());
    assertEquals("the next token returned should be the digit 8", 8.0, st.nval);

    assertEquals("the next token returned should be the quote character", 39, st.nextToken());
    assertEquals("the next token returned should be the quote character", "h", st.sval);
  }
  /** @tests java.io.StreamTokenizer#StreamTokenizer(java.io.Reader) */
  public void test_ConstructorLjava_io_Reader() throws IOException {
    setTest("/testing\n d 8 'h' ");
    assertEquals(
        "the next token returned should be the letter d skipping the comments",
        StreamTokenizer.TT_WORD,
        st.nextToken());
    assertEquals("the next token returned should be the letter d", "d", st.sval);

    assertEquals(
        "the next token returned should be the digit 8", StreamTokenizer.TT_NUMBER, st.nextToken());
    assertEquals("the next token returned should be the digit 8", 8.0, st.nval);

    assertEquals("the next token returned should be the quote character", 39, st.nextToken());
    assertEquals("the next token returned should be the quote character", "h", st.sval);
  }
 private double readNumber() throws IOException {
   int nextToken = in.nextToken();
   if (nextToken == StreamTokenizer.TT_NUMBER) {
     return in.nval;
   }
   throw new IllegalStateException("Number expected. Found: " + nextToken);
 }
 private String readWord() throws IOException {
   int nextToken = in.nextToken();
   if (nextToken == StreamTokenizer.TT_WORD) {
     return in.sval;
   }
   throw new IllegalStateException("Word expected. Found: " + nextToken);
 }
  static int check(InputStream in) throws IOException {
    Reader r = new BufferedReader(new InputStreamReader(in));
    StreamTokenizer st = new StreamTokenizer(r);
    int i, cnt = 0, num = 0, tmp, incorrect = 0;
    boolean first_read = false;

    while (true) {
      i = st.nextToken();
      if (i == StreamTokenizer.TT_EOF) break;
      tmp = (int) st.nval;
      if (!first_read) {
        first_read = true;
      } else {
        if (tmp != num + 1) {
          System.err.println(
              "Number read: "
                  + tmp
                  + ", previous number: "
                  + num
                  + " (lineno: "
                  + st.lineno()
                  + ")");
          incorrect++;
        }
      }
      num = tmp;
      cnt++;
      if (cnt > 0 && cnt % 1000 == 0) System.out.println("read " + cnt + " numbers");
    }
    return incorrect;
  }
    /**
     * Returns the next string from the stream
     *
     * @return String read. It will be a whitespace separated word
     * @throws IOException An internal I/O failure
     */
    public String getString() throws IOException {
      st.nextToken();

      if (st.ttype == StreamTokenizer.TT_WORD) return st.sval;
      else if (st.ttype == StreamTokenizer.TT_EOF) throw new EOFException();
      else throw new RuntimeException("Internal parser error");
    }
Beispiel #28
0
  /** ******** test functions *********** */
  public static void tokenize(String fname) {
    try {
      ESParser parser = new ESParser(fname);
      StreamTokenizer s = parser.scanner;

      while (s.ttype != StreamTokenizer.TT_EOF) {
        switch (s.ttype) {
          case StreamTokenizer.TT_WORD:
            System.out.println("WORD: " + s.sval);
            break;

          case StreamTokenizer.TT_NUMBER:
            System.out.println("NUM:  " + s.nval);
            break;

          default:
            char c = (char) s.ttype;
            if (c == '"') { // string literal
              System.out.println("STRING: \"" + s.sval + '"');
            } else {
              System.out.println("CHAR: " + (char) s.ttype);
            }
        }
        s.nextToken();
      }
    } catch (Throwable t) {
      t.printStackTrace();
    }
  }
Beispiel #29
0
  public static Vector parse(String line) throws IOException {
    Vector c = new Vector();

    StreamTokenizer st = new StreamTokenizer(new StringReader(line));

    /** Iterate through each token in the String */
    while (st.nextToken() != st.TT_EOF) {
      /** Token is number */
      if (st.ttype == (StreamTokenizer.TT_NUMBER)) {
        c.add(new Double(st.nval));
      }

      /** Token is a String */
      else if (st.ttype == (StreamTokenizer.TT_WORD)) {
        c.add(st.sval);
      }

      /** Should never reach this case */
      else if (st.sval != null) {
        c.add(st.sval);
      }
    }

    return c;
  }
Beispiel #30
0
  public static void main(String[] args) {
    try {
      InputStream is = StreamTokenering.class.getResourceAsStream("/input.txt");
      StreamTokenizer in = new StreamTokenizer(new InputStreamReader(is));

      in.ordinaryChar('.');
      in.ordinaryChar('\'');
      int wordCount = 0, numCount = 0, punctionCount = 0, count = 0;
      double token;
      while ((token = in.nextToken()) != StreamTokenizer.TT_EOF) {
        count++;
        if (token == StreamTokenizer.TT_WORD) {
          wordCount++;
        } else if (token == StreamTokenizer.TT_NUMBER) {
          numCount++;
        } else {
          punctionCount++;
        }
        System.out.println(in.toString());
      }
      System.out.println("单词总数为:" + count);
      System.out.println("单词数为:" + wordCount);
      System.out.println("数字数为:" + numCount);
      System.out.println("标点符号数为:" + punctionCount++);
    } catch (IOException e) {
      e.printStackTrace();
    }
  }