示例#1
0
  private void setupTokenizer() {
    st.resetSyntax();
    st.wordChars('a', 'z');
    st.wordChars('A', 'Z');
    st.wordChars('0', '9');
    st.wordChars(':', ':');
    st.wordChars('.', '.');
    st.wordChars('_', '_');
    st.wordChars('-', '-');
    st.wordChars('/', '/');
    st.wordChars('\\', '\\');
    st.wordChars('$', '$');
    st.wordChars('{', '{'); // need {} for property subst
    st.wordChars('}', '}');
    st.wordChars('*', '*');
    st.wordChars('+', '+');
    st.wordChars('~', '~');
    // XXX check ASCII table and add all other characters except special

    // special: #="(),
    st.whitespaceChars(0, ' ');
    st.commentChar('#');
    st.eolIsSignificant(true);
    st.quoteChar('\"');
  }
示例#2
0
 public void loadTree() {
   System.out.println("Loading tree");
   StreamTokenizer stream = null;
   try {
     FileInputStream f = new FileInputStream(tree);
     Reader input = new BufferedReader(new InputStreamReader(f));
     stream = new StreamTokenizer(input);
     stream.resetSyntax();
     stream.wordChars(32, 127);
   } catch (Exception e) {
     System.out.println("Error opening " + tree);
     System.exit(1);
   }
   list = new ArrayList();
   try {
     // read the file to the end
     while (stream.nextToken() != StreamTokenizer.TT_EOF) {
       // is a word being read
       if (stream.ttype == StreamTokenizer.TT_WORD) {
         list.add(new String(stream.sval));
       }
       // is a number being read
       if (stream.ttype == StreamTokenizer.TT_NUMBER) {
         list.add(new Double(stream.nval));
       }
     }
   } catch (Exception e) {
     System.out.println("\nError reading " + tree + ". Exiting...");
     System.exit(1);
   }
 }
示例#3
0
  /** Create an HTTP tokenizer, given a StreamTokenizer for the web page. */
  public HttpTokenizer(StreamTokenizer tokens) throws IOException {
    // Create a stream tokenizer
    this.tokens = tokens;

    // Set up the appropriate defaults
    tokens.eolIsSignificant(false);
    tokens.lowerCaseMode(true);
    tokens.wordChars('<', '<');
    tokens.wordChars('>', '>');
    tokens.wordChars('/', '/');
    tokens.wordChars('=', '=');
    tokens.wordChars('@', '@');
    tokens.wordChars('!', '!');
    tokens.wordChars('-', '-');
    tokens.ordinaryChar('.');
    tokens.ordinaryChar('?');
  }
示例#4
0
  /**
   * Configure the lexical analyzer.
   *
   * @param reader the input stream reader
   * @return an s-expression lexer
   */
  private StreamTokenizer createLexer(Reader reader) {
    StreamTokenizer tokenizer = new StreamTokenizer(reader);

    tokenizer.resetSyntax();
    tokenizer.eolIsSignificant(false);
    tokenizer.whitespaceChars(0, ' ');
    tokenizer.wordChars('!', '!');
    tokenizer.wordChars('*', 'z');

    return tokenizer;
  }
示例#5
0
  public static Map<String, String> getStyles(String str) throws IOException {
    HashMap<String, String> styles = new HashMap<String, String>();
    if (str == null) return styles;

    StreamTokenizer tt = new StreamTokenizer(new StringReader(str));
    tt.resetSyntax();
    tt.wordChars('!', '9');
    tt.wordChars('<', '~');
    tt.wordChars(128 + 32, 255);
    tt.whitespaceChars(0, ' ');

    while (tt.nextToken() != StreamTokenizer.TT_EOF) {
      if (tt.ttype != ';') {
        String key, value;
        if (tt.ttype != StreamTokenizer.TT_WORD) {
          throw new IOException(
              "Key token expected in " + str + " " + Integer.toHexString(tt.ttype));
        }
        key = tt.sval;
        if (tt.nextToken() != ':') {
          throw new IOException("Colon expected after " + key + " in " + str);
        }
        if (tt.nextToken() != StreamTokenizer.TT_WORD) {
          throw new IOException(
              "Value token expected after " + key + " in " + str + " " + tt.ttype);
        }
        value = tt.sval;
        while (tt.nextToken() == StreamTokenizer.TT_WORD) {
          value += ' ' + tt.sval;
        }
        tt.pushBack();
        styles.put(key, value);
      }
    }

    return styles;
  }
  public CSVReader(BufferedReader input, char customizedSeparator) {
    this.separator = customizedSeparator;

    parser = new StreamTokenizer(input);
    parser.ordinaryChars(0, 255);
    parser.wordChars(0, 255);
    parser.ordinaryChar('\"');
    parser.ordinaryChar(customizedSeparator);

    // Need to do set EOL significance after setting ordinary and word
    // chars, and need to explicitly set \n and \r as whitespace chars
    // for EOL detection to work
    parser.eolIsSignificant(true);
    parser.whitespaceChars('\n', '\n');
    parser.whitespaceChars('\r', '\r');
    atEOF = false;
  }
  public String[] parseTokens(String line) throws IOException {
    List tokens = new ArrayList();

    /*StringTokenizer st = new StringTokenizer(line);
    String token;
    while((token = st.nextToken()) != null) {
        tokens.add(token);
    } */

    StreamTokenizer st = new StreamTokenizer(new StringReader(line));
    st.parseNumbers();
    st.wordChars('_', '_'); // A word can be THIS_IS_A_WORD

    int token = st.nextToken();
    while (token != StreamTokenizer.TT_EOF) {
      String element = null;
      switch (token) {
        case StreamTokenizer.TT_NUMBER:
          element = String.valueOf(st.nval);
          break;
        case StreamTokenizer.TT_WORD:
          element = st.sval;
          break;
        case '"':
        case '\'':
          element = st.sval;
          break;
        case StreamTokenizer.TT_EOL:
          break;
        case StreamTokenizer.TT_EOF:
          break;
        default:
          element = String.valueOf((char) st.ttype);
          break;
      }
      if (element != null) tokens.add(element);
      token = st.nextToken();
    }

    String[] result = new String[tokens.size()];
    for (int index = 0; index < tokens.size(); index++) result[index] = (String) tokens.get(index);
    return result;
  }
示例#8
0
  public void run() {
    Socket sock = null;

    try {
      int code = StreamTokenizer.TT_EOL;
      FileReader reader = new FileReader(filename);
      StreamTokenizer tokenizer = new StreamTokenizer(reader);
      tokenizer.ordinaryChars('0', '9');
      tokenizer.wordChars('0', '9');
      tokenizer.slashSlashComments(true);

      System.out.println("Connecting to socket 10576.");
      try {
        sock = new Socket("127.0.0.1", 10576);
        System.out.println("Connection to socket 10576 established.");
      } catch (Exception e) {
        System.out.println(
            "Inputting packets from file must be done while running Tossim with the -ri option");
        System.exit(-1);
      }

      DataOutputStream output = new DataOutputStream(sock.getOutputStream());

      while (true) {
        code = tokenizer.nextToken();
        if (code == tokenizer.TT_EOF) {
          break;
        } else if (code == StreamTokenizer.TT_EOL) {
        } else if (code == StreamTokenizer.TT_WORD) {
          String word = tokenizer.sval;
          long lval = Long.parseLong(word);

          code = tokenizer.nextToken();
          if (code != StreamTokenizer.TT_WORD) {
            break;
          }
          word = tokenizer.sval;
          short sval = Short.parseShort(word);

          byte[] data = new byte[36];
          for (int i = 0; i < 36; i++) {
            code = tokenizer.nextToken();
            if (code != StreamTokenizer.TT_WORD) {
              break;
            }
            String datum = tokenizer.sval;
            try {
              data[i] = (byte) (Integer.parseInt(datum, 16) & 0xff);
            } catch (NumberFormatException e) {
              System.out.println(e);
              System.out.println(datum);
            }
          }

          output.writeLong(lval);
          output.writeShort(sval);
          output.write(data);
        } else if (code == StreamTokenizer.TT_NUMBER) {
        }
      }
    } catch (Exception exception) {
      System.err.println("Exception thrown.");
      exception.printStackTrace();
    } finally {
      try {
        sock.close();
      } catch (Exception e) {
      }
    }
    /// ServerSocket server = new ServerSocket(10576, 1);
    // System.out.println("Waiting on socket 10576.");
    // Socket sock = server.accept();
    // System.out.println("Accepted connection from " + sock);

    // DataOutputStream input = new DataOutputStream(sock.getOutputStream());
  }
示例#9
0
  public static AffineTransform getTransform(String str) throws IOException {
    AffineTransform t = new AffineTransform();

    if (str != null) {

      StreamTokenizer tt = new StreamTokenizer(new StringReader(str));
      tt.resetSyntax();
      tt.wordChars('a', 'z');
      tt.wordChars('A', 'Z');
      tt.wordChars(128 + 32, 255);
      tt.whitespaceChars(0, ' ');
      tt.whitespaceChars(',', ',');
      tt.parseNumbers();

      while (tt.nextToken() != StreamTokenizer.TT_EOF) {
        if (tt.ttype != StreamTokenizer.TT_WORD) {
          throw new IOException("Illegal transform " + str);
        }
        String type = tt.sval;
        if (tt.nextToken() != '(') {
          throw new IOException("'(' not found in transform " + str);
        }
        if (type.equals("matrix")) {
          double[] m = new double[6];
          for (int i = 0; i < 6; i++) {
            if (tt.nextToken() != StreamTokenizer.TT_NUMBER) {
              throw new IOException(
                  "Matrix value "
                      + i
                      + " not found in transform "
                      + str
                      + " token:"
                      + tt.ttype
                      + " "
                      + tt.sval);
            }
            if (tt.nextToken() == StreamTokenizer.TT_WORD && tt.sval.startsWith("E")) {
              double mantissa = tt.nval;
              tt.nval = Double.valueOf(tt.nval + tt.sval);
            } else {
              tt.pushBack();
            }
            m[i] = tt.nval;
          }
          t.concatenate(new AffineTransform(m));

        } else if (type.equals("translate")) {
          double tx, ty;
          if (tt.nextToken() != StreamTokenizer.TT_NUMBER) {
            throw new IOException("X-translation value not found in transform " + str);
          }
          tx = tt.nval;
          if (tt.nextToken() == StreamTokenizer.TT_NUMBER) {
            ty = tt.nval;
          } else {
            tt.pushBack();
            ty = 0;
          }
          t.translate(tx, ty);

        } else if (type.equals("scale")) {
          double sx, sy;
          if (tt.nextToken() != StreamTokenizer.TT_NUMBER) {
            throw new IOException("X-scale value not found in transform " + str);
          }
          sx = tt.nval;
          if (tt.nextToken() == StreamTokenizer.TT_NUMBER) {
            sy = tt.nval;
          } else {
            tt.pushBack();
            sy = sx;
          }
          t.scale(sx, sy);

        } else if (type.equals("rotate")) {
          double angle, cx, cy;
          if (tt.nextToken() != StreamTokenizer.TT_NUMBER) {
            throw new IOException("Angle value not found in transform " + str);
          }
          angle = tt.nval;
          if (tt.nextToken() == StreamTokenizer.TT_NUMBER) {
            cx = tt.nval;
            if (tt.nextToken() != StreamTokenizer.TT_NUMBER) {
              throw new IOException("Y-center value not found in transform " + str);
            }
            cy = tt.nval;
          } else {
            tt.pushBack();
            cx = cy = 0;
          }
          t.rotate(angle * Math.PI / 180d, cx * Math.PI / 180d, cy * Math.PI / 180d);

        } else if (type.equals("skewX")) {
          double angle;
          if (tt.nextToken() != StreamTokenizer.TT_NUMBER) {
            throw new IOException("Skew angle not found in transform " + str);
          }
          angle = tt.nval;
          t.concatenate(new AffineTransform(1, 0, Math.tan(angle * Math.PI / 180), 1, 0, 0));

        } else if (type.equals("skewY")) {
          double angle;
          if (tt.nextToken() != StreamTokenizer.TT_NUMBER) {
            throw new IOException("Skew angle not found in transform " + str);
          }
          angle = tt.nval;
          t.concatenate(new AffineTransform(1, Math.tan(angle * Math.PI / 180), 0, 1, 0, 0));

        } else {
          throw new IOException("Unknown transform " + type + " in " + str);
        }
        if (tt.nextToken() != ')') {
          throw new IOException("')' not found in transform " + str);
        }
      }
    }
    return t;
  }
示例#10
0
  /**
   * Return an interned VarInfoAux that represents a given string. Elements are separated by commas,
   * in the form:
   *
   * <p>x = a, "a key" = "a value"
   *
   * <p>Parse allow for quoted elements. White space to the left and right of keys and values do not
   * matter, but inbetween does.
   */
  public static /*@Interned*/ VarInfoAux parse(String inString) throws IOException {
    Reader inStringReader = new StringReader(inString);
    StreamTokenizer tok = new StreamTokenizer(inStringReader);
    tok.resetSyntax();
    tok.wordChars(0, Integer.MAX_VALUE);
    tok.quoteChar('\"');
    tok.whitespaceChars(' ', ' ');
    tok.ordinaryChar('[');
    tok.ordinaryChar(']');
    tok.ordinaryChars(',', ',');
    tok.ordinaryChars('=', '=');
    Map</*@Interned*/ String, /*@Interned*/ String> map = theDefault.map;

    String key = "";
    String value = "";
    boolean seenEqual = false;
    boolean insideVector = false;
    for (int tokInfo = tok.nextToken();
        tokInfo != StreamTokenizer.TT_EOF;
        tokInfo = tok.nextToken()) {
      @SuppressWarnings("interning") // initialization-checking pattern
      boolean mapUnchanged = (map == theDefault.map);
      if (mapUnchanged) {
        // We use default values if none are specified.  We initialize
        // here rather than above to save time when there are no tokens.

        map = new HashMap</*@Interned*/ String, /*@Interned*/ String>(theDefault.map);
      }

      /*@Interned*/ String token;
      if (tok.ttype == StreamTokenizer.TT_WORD || tok.ttype == '\"') {
        assert tok.sval != null
            : "@AssumeAssertion(nullness): representation invariant of StreamTokenizer";
        token = tok.sval.trim().intern();
      } else {
        token = ((char) tok.ttype + "").intern();
      }

      debug.fine("Token info: " + tokInfo + " " + token);

      if (token == "[") { // interned
        if (!seenEqual) throw new IOException("Aux option did not contain an '='");
        if (insideVector) throw new IOException("Vectors cannot be nested in an aux option");
        if (value.length() > 0) throw new IOException("Cannot mix scalar and vector values");

        insideVector = true;
        value = "";
      } else if (token == "]") { // interned
        if (!insideVector) throw new IOException("']' without preceding '['");
        insideVector = false;
      } else if (token == ",") { // interned
        if (!seenEqual) throw new IOException("Aux option did not contain an '='");
        if (insideVector) throw new IOException("',' cannot be used inside a vector");
        map.put(key.intern(), value.intern());
        key = "";
        value = "";
        seenEqual = false;
      } else if (token == "=") { // interned
        if (seenEqual) throw new IOException("Aux option contained more than one '='");
        if (insideVector) throw new IOException("'=' cannot be used inside a vector");
        seenEqual = true;
      } else {
        if (!seenEqual) {
          key = (key + " " + token).trim();
        } else if (insideVector) {
          value = value + " \"" + token.trim() + "\"";
        } else {
          value = (value + " " + token).trim();
        }
      }
    }

    if (seenEqual) {
      map.put(key.intern(), value.intern());
    }

    // Interning
    VarInfoAux result = new VarInfoAux(map).intern();
    assert interningMap != null
        : "@AssumeAssertion(nullness):  application invariant:  postcondition of intern(), which was just called";
    if (debug.isLoggable(Level.FINE)) {
      debug.fine("New parse " + result);
      debug.fine("Intern table size: " + new Integer(interningMap.size()));
    }
    return result;
  }
示例#11
0
 /**
  * Start a transcript file that logs all input and output <br>
  * The file is saved to the same directory as the project file <br>
  * Under Windows 95, use Cafe/Word/WordPad to view the text file
  *
  * @param fn string to call transcript file
  * @return none
  */
 public static void recognizeAsLetter(char c) {
   tokenizer.wordChars(c, c);
 }