/** This method is a hack. I really want another constructor. */ public static Obj3d parseFile(InputStream is, ModelViewer modelViewer, boolean register) throws IOException, FileFormatException { StreamTokenizer st = new StreamTokenizer(new BufferedReader(new InputStreamReader(is))); st.eolIsSignificant(true); st.commentChar('#'); Obj3d obj3d = new Obj3d(st, modelViewer, register); // should be at end of file is.close(); if (st.ttype != StreamTokenizer.TT_EOF) { throw new FileFormatException(is.toString()); } return obj3d; }
int ct() throws IOException { FileReader fr = new FileReader("mf.dat"); StreamTokenizer st = new StreamTokenizer(fr); st.eolIsSignificant(true); int tok = st.nextToken(); int ctr = 0; while (tok != -1) { switch (tok) { case -3: { break; } case -2: { break; } case 10: { ctr++; break; } } tok = st.nextToken(); } return ctr; }
private void setupTokenizer() { st.resetSyntax(); st.wordChars('a', 'z'); st.wordChars('A', 'Z'); st.wordChars('0', '9'); st.wordChars(':', ':'); st.wordChars('.', '.'); st.wordChars('_', '_'); st.wordChars('-', '-'); st.wordChars('/', '/'); st.wordChars('\\', '\\'); st.wordChars('$', '$'); st.wordChars('{', '{'); // need {} for property subst st.wordChars('}', '}'); st.wordChars('*', '*'); st.wordChars('+', '+'); st.wordChars('~', '~'); // XXX check ASCII table and add all other characters except special // special: #="(), st.whitespaceChars(0, ' '); st.commentChar('#'); st.eolIsSignificant(true); st.quoteChar('\"'); }
public void setPaths(ClassLoader cl) throws FileNotFoundException, IOException { // open the file that contains all the paths // FileReader inFile = new FileReader(); // System.out.println("ABC: " + inFile.ready()); StreamTokenizer tokens = new StreamTokenizer(cl.getResource("atlas/resources/Paths.dat").openStream()); String Prefs[] = {"", "", "", "", "", "", ""}; // treat endofline as a token tokens.eolIsSignificant(false); try { for (int i = 0; i < 7 && (tokens.nextToken() != StreamTokenizer.TT_EOF); i++) { Prefs[i] = tokens.sval; } } catch (IOException e) { e.printStackTrace(); } // save the paths adlc = Prefs[0]; berkeleyInc = Prefs[1]; berkeleyLib = Prefs[2]; adlInc = Prefs[3]; adlLib = Prefs[4]; imdbInc = Prefs[5]; imdbLib = Prefs[6]; }
public static String StreamTokenizer(StringReader reader) throws IOException { StringBuilder buffer = new StringBuilder(); StreamTokenizer tokenizer = new StreamTokenizer(reader); tokenizer.lowerCaseMode(true); tokenizer.eolIsSignificant(false); tokenizer.whitespaceChars('.', '.'); while (tokenizer.nextToken() != StreamTokenizer.TT_EOF) { switch (tokenizer.ttype) { case StreamTokenizer.TT_WORD: buffer.append(tokenizer.sval + " "); break; case StreamTokenizer.TT_NUMBER: buffer.append(tokenizer.nval + " "); break; case StreamTokenizer.TT_EOL: break; default: break; } } return buffer.toString(); }
/** * Initializes the stream tokenizer. * * @param tokenizer the tokenizer to initialize */ private void initTokenizer(StreamTokenizer tokenizer) { tokenizer.resetSyntax(); tokenizer.whitespaceChars(0, (' ' - 1)); tokenizer.wordChars(' ', '\u00FF'); tokenizer.whitespaceChars(m_FieldSeparator.charAt(0), m_FieldSeparator.charAt(0)); tokenizer.commentChar('%'); tokenizer.quoteChar('"'); tokenizer.quoteChar('\''); tokenizer.eolIsSignificant(true); }
/** @tests java.io.StreamTokenizer#nextToken() */ @SuppressWarnings("deprecation") public void test_nextToken() throws IOException { // SM. setTest( "\r\n/* fje fje 43.4 f \r\n f g */ 456.459 \r\n" + "Hello / \r\n \r\n \n \r \257 Hi \'Hello World\'"); st.ordinaryChar('/'); st.slashStarComments(true); st.nextToken(); assertTrue("Wrong Token type1: " + (char) st.ttype, st.ttype == StreamTokenizer.TT_NUMBER); st.nextToken(); assertTrue("Wrong Token type2: " + st.ttype, st.ttype == StreamTokenizer.TT_WORD); st.nextToken(); assertTrue("Wrong Token type3: " + st.ttype, st.ttype == '/'); st.nextToken(); assertTrue("Wrong Token type4: " + st.ttype, st.ttype == StreamTokenizer.TT_WORD); st.nextToken(); assertTrue("Wrong Token type5: " + st.ttype, st.ttype == StreamTokenizer.TT_WORD); st.nextToken(); assertTrue("Wrong Token type6: " + st.ttype, st.ttype == '\''); assertTrue("Wrong Token type7: " + st.ttype, st.sval.equals("Hello World")); st.nextToken(); assertTrue("Wrong Token type8: " + st.ttype, st.ttype == -1); final PipedInputStream pin = new PipedInputStream(); PipedOutputStream pout = new PipedOutputStream(pin); pout.write("hello\n\r\r".getBytes("UTF-8")); StreamTokenizer s = new StreamTokenizer(pin); s.eolIsSignificant(true); assertTrue( "Wrong token 1,1", s.nextToken() == StreamTokenizer.TT_WORD && s.sval.equals("hello")); assertTrue("Wrong token 1,2", s.nextToken() == '\n'); assertTrue("Wrong token 1,3", s.nextToken() == '\n'); assertTrue("Wrong token 1,4", s.nextToken() == '\n'); pout.close(); assertTrue("Wrong token 1,5", s.nextToken() == StreamTokenizer.TT_EOF); StreamTokenizer tokenizer = new StreamTokenizer(new Support_StringReader("\n \r\n#")); tokenizer.ordinaryChar('\n'); // make \n ordinary tokenizer.eolIsSignificant(true); assertTrue("Wrong token 2,1", tokenizer.nextToken() == '\n'); assertTrue("Wrong token 2,2", tokenizer.nextToken() == '\n'); assertEquals("Wrong token 2,3", '#', tokenizer.nextToken()); }
/** * Configure the lexical analyzer. * * @param reader the input stream reader * @return an s-expression lexer */ private StreamTokenizer createLexer(Reader reader) { StreamTokenizer tokenizer = new StreamTokenizer(reader); tokenizer.resetSyntax(); tokenizer.eolIsSignificant(false); tokenizer.whitespaceChars(0, ' '); tokenizer.wordChars('!', '!'); tokenizer.wordChars('*', 'z'); return tokenizer; }
/** Creates a StreamTokenizer for reading ARFF files. */ private StreamTokenizer createTokenizer(Reader in) { StreamTokenizer tokenizer = new StreamTokenizer(in); tokenizer.resetSyntax(); tokenizer.whitespaceChars(0, ' '); tokenizer.wordChars(' ' + 1, '\u00FF'); tokenizer.whitespaceChars(',', ','); tokenizer.commentChar('%'); tokenizer.quoteChar('"'); tokenizer.quoteChar('\''); tokenizer.ordinaryChar('{'); tokenizer.ordinaryChar('}'); tokenizer.eolIsSignificant(true); return tokenizer; }
/** * Constructs a Parser for the given string. * * @param text The string to be parsed. */ public Parser(String text) { Reader reader = new StringReader(text); tokenizer = new StreamTokenizer(reader); tokenizer.parseNumbers(); tokenizer.eolIsSignificant(true); tokenizer.slashStarComments(true); tokenizer.slashSlashComments(true); tokenizer.lowerCaseMode(false); tokenizer.ordinaryChars(33, 47); tokenizer.ordinaryChars(58, 64); tokenizer.ordinaryChars(91, 96); tokenizer.ordinaryChars(123, 126); tokenizer.quoteChar('\"'); lineNumber = 1; }
// s is the effective dimension if > 0, otherwise it is dim private void readData(Reader re, int r1, int s1) throws IOException, NumberFormatException { try { StreamTokenizer st = new StreamTokenizer(re); if (st == null) return; st.eolIsSignificant(false); st.slashSlashComments(true); int i = st.nextToken(); if (i != StreamTokenizer.TT_NUMBER) throw new NumberFormatException(); b = (int) st.nval; st.nextToken(); numCols = (int) st.nval; st.nextToken(); numRows = (int) st.nval; st.nextToken(); numPoints = (int) st.nval; st.nextToken(); dim = (int) st.nval; if (dim < 1) { System.err.println(PrintfFormat.NEWLINE + "DigitalNetBase2FromFile: dimension dim <= 0"); throw new IllegalArgumentException("dimension dim <= 0"); } if (r1 > numRows) throw new IllegalArgumentException( "DigitalNetBase2FromFile: One must have r1 <= Max num rows"); if (s1 > dim) { throw new IllegalArgumentException("s1 is too large"); } if (s1 > 0) dim = s1; if (r1 > 0) numRows = r1; if (b != 2) { System.err.println("***** DigitalNetBase2FromFile: only base 2 allowed"); throw new IllegalArgumentException("only base 2 allowed"); } genMat = new int[dim * numCols]; for (i = 0; i < dim; i++) for (int c = 0; c < numCols; c++) { st.nextToken(); genMat[i * numCols + c] = (int) st.nval; } } catch (NumberFormatException e) { System.err.println(" DigitalNetBase2FromFile: not a number " + e); throw e; } }
public CSVReader(BufferedReader input, char customizedSeparator) { this.separator = customizedSeparator; parser = new StreamTokenizer(input); parser.ordinaryChars(0, 255); parser.wordChars(0, 255); parser.ordinaryChar('\"'); parser.ordinaryChar(customizedSeparator); // Need to do set EOL significance after setting ordinary and word // chars, and need to explicitly set \n and \r as whitespace chars // for EOL detection to work parser.eolIsSignificant(true); parser.whitespaceChars('\n', '\n'); parser.whitespaceChars('\r', '\r'); atEOF = false; }
/** * Initializes the stream tokenizer. * * @param tokenizer the tokenizer to initialize */ private void initTokenizer(StreamTokenizer tokenizer) { tokenizer.resetSyntax(); tokenizer.whitespaceChars(0, (' ' - 1)); tokenizer.wordChars(' ', '\u00FF'); tokenizer.whitespaceChars(m_FieldSeparator.charAt(0), m_FieldSeparator.charAt(0)); // tokenizer.commentChar('%'); String[] parts = m_Enclosures.split(","); for (String e : parts) { if (e.length() > 1 || e.length() == 0) { throw new IllegalArgumentException("Enclosures can only be single characters"); } tokenizer.quoteChar(e.charAt(0)); } tokenizer.eolIsSignificant(true); }
/** Create an HTTP tokenizer, given a StreamTokenizer for the web page. */ public HttpTokenizer(StreamTokenizer tokens) throws IOException { // Create a stream tokenizer this.tokens = tokens; // Set up the appropriate defaults tokens.eolIsSignificant(false); tokens.lowerCaseMode(true); tokens.wordChars('<', '<'); tokens.wordChars('>', '>'); tokens.wordChars('/', '/'); tokens.wordChars('=', '='); tokens.wordChars('@', '@'); tokens.wordChars('!', '!'); tokens.wordChars('-', '-'); tokens.ordinaryChar('.'); tokens.ordinaryChar('?'); }
/** @tests java.io.StreamTokenizer#eolIsSignificant(boolean) */ public void test_eolIsSignificantZ() throws IOException { setTest("d 8\n"); // by default end of line characters are not significant assertTrue( "nextToken did not return d", st.nextToken() == StreamTokenizer.TT_WORD && st.sval.equals("d")); assertTrue( "nextToken did not return 8", st.nextToken() == StreamTokenizer.TT_NUMBER && st.nval == 8.0); assertTrue("nextToken should be the end of file", st.nextToken() == StreamTokenizer.TT_EOF); setTest("d\n"); st.eolIsSignificant(true); // end of line characters are significant assertTrue( "nextToken did not return d", st.nextToken() == StreamTokenizer.TT_WORD && st.sval.equals("d")); assertTrue("nextToken is the end of line", st.nextToken() == StreamTokenizer.TT_EOL); }
/** * Read a matrix from a stream. The format is the same the print method, so printed matrices can * be read back in (provided they were printed using US Locale). Elements are separated by * whitespace, all the elements for each row appear on a single line, the last row is followed by * a blank line. * * @param input the input stream. */ public static Matrix read(BufferedReader input) throws java.io.IOException { StreamTokenizer tokenizer = new StreamTokenizer(input); // Although StreamTokenizer will parse numbers, it doesn't recognize // scientific notation (E or D); however, Double.valueOf does. // The strategy here is to disable StreamTokenizer's number parsing. // We'll only get whitespace delimited words, EOL's and EOF's. // These words should all be numbers, for Double.valueOf to parse. tokenizer.resetSyntax(); tokenizer.wordChars(0, 255); tokenizer.whitespaceChars(0, ' '); tokenizer.eolIsSignificant(true); java.util.Vector v = new java.util.Vector(); // Ignore initial empty lines while (tokenizer.nextToken() == StreamTokenizer.TT_EOL) ; if (tokenizer.ttype == StreamTokenizer.TT_EOF) throw new java.io.IOException("Unexpected EOF on matrix read."); do { v.addElement(Double.valueOf(tokenizer.sval)); // Read & store 1st row. } while (tokenizer.nextToken() == StreamTokenizer.TT_WORD); int n = v.size(); // Now we've got the number of columns! double row[] = new double[n]; for (int j = 0; j < n; j++) // extract the elements of the 1st row. row[j] = ((Double) v.elementAt(j)).doubleValue(); v.removeAllElements(); v.addElement(row); // Start storing rows instead of columns. while (tokenizer.nextToken() == StreamTokenizer.TT_WORD) { // While non-empty lines v.addElement(row = new double[n]); int j = 0; do { if (j >= n) throw new java.io.IOException("Row " + v.size() + " is too long."); row[j++] = Double.valueOf(tokenizer.sval).doubleValue(); } while (tokenizer.nextToken() == StreamTokenizer.TT_WORD); if (j < n) throw new java.io.IOException("Row " + v.size() + " is too short."); } int m = v.size(); // Now we've got the number of rows. double[][] A = new double[m][]; v.copyInto(A); // copy the rows out of the vector return new Matrix(A); }
/** * This method sets the syntax of the StreamTokenizer. i.e. set the whitespace, comment and * delimit chars. */ protected void setSyntax(StreamTokenizer tk) { tk.resetSyntax(); tk.eolIsSignificant(false); tk.slashStarComments(true); tk.slashSlashComments(true); tk.whitespaceChars(0, ' '); tk.wordChars(' ' + 1, '\u00ff'); tk.ordinaryChar('['); tk.ordinaryChar(']'); tk.ordinaryChar('{'); tk.ordinaryChar('}'); tk.ordinaryChar('-'); tk.ordinaryChar('>'); tk.ordinaryChar('/'); tk.ordinaryChar('*'); tk.quoteChar('"'); tk.whitespaceChars(';', ';'); tk.ordinaryChar('='); }
/** Creates a new input stream for the given reader */ public StringInputStream(Reader r) { st = new StreamTokenizer(r); st.resetSyntax(); st.eolIsSignificant(false); // Parse numbers as words st.wordChars('0', '9'); st.wordChars('-', '.'); // Characters as words st.wordChars('\u0000', '\u00FF'); // Skip everything after '%' on the same line st.commentChar('%'); // Skip whitespace and newlines st.whitespaceChars(' ', ' '); st.whitespaceChars('\u0009', '\u000e'); }
/** * Set the params (analyzerName only), Comma-separate list of Analyzer class names. If the * Analyzer lives in org.apache.lucene.analysis, the name can be shortened by dropping the o.a.l.a * part of the Fully Qualified Class Name. * * <p>Analyzer names may also refer to previously defined AnalyzerFactory's. * * <p>Example Declaration: {"NewAnalyzer" NewAnalyzer(WhitespaceAnalyzer, SimpleAnalyzer, * StopAnalyzer, standard.StandardAnalyzer) > * * <p>Example AnalyzerFactory usage: * * <pre> * -AnalyzerFactory(name:'whitespace tokenized',WhitespaceTokenizer) * -NewAnalyzer('whitespace tokenized') * </pre> * * @param params analyzerClassName, or empty for the StandardAnalyzer */ @Override public void setParams(String params) { super.setParams(params); final StreamTokenizer stok = new StreamTokenizer(new StringReader(params)); stok.quoteChar('"'); stok.quoteChar('\''); stok.eolIsSignificant(false); stok.ordinaryChar(','); try { while (stok.nextToken() != StreamTokenizer.TT_EOF) { switch (stok.ttype) { case ',': { // Do nothing break; } case '\'': case '\"': case StreamTokenizer.TT_WORD: { analyzerNames.add(stok.sval); break; } default: { throw new RuntimeException("Unexpected token: " + stok.toString()); } } } } catch (RuntimeException e) { if (e.getMessage().startsWith("Line #")) { throw e; } else { throw new RuntimeException("Line #" + (stok.lineno() + getAlgLineNum()) + ": ", e); } } catch (Throwable t) { throw new RuntimeException("Line #" + (stok.lineno() + getAlgLineNum()) + ": ", t); } }
/** @param ribFileReader rib file reader */ public Rib2Xml(FileReader ribFileReader) { /* Configure log4j, read conf out of jar file */ Class clazz = getClass(); URL url = clazz.getResource("/conf/log4j.xml"); if (url == null) { /* Try reading via filename */ DOMConfigurator.configure("../conf/log4j.xml"); System.err.println("Error: Configuration file for Log4j (log4j.xml) not found, aborting..."); System.exit(1); } DOMConfigurator.configure(url); /* Create the ribfactory which deal with all the rib elements */ Config config = Config.instance(); RibFactory ribFac = new RibFactory(config); Vector ribNames = config.getNames(); StreamTokenizer thTokens = new StreamTokenizer(ribFileReader); // thTokens.resetSyntax(); thTokens.commentChar('#'); thTokens.eolIsSignificant(false); thTokens.parseNumbers(); thTokens.ordinaryChar('['); thTokens.ordinaryChar(']'); thTokens.quoteChar('"'); int count = 0; String factoryInput = ""; try { while (thTokens.nextToken() != StreamTokenizer.TT_EOF) { logger.debug(thTokens.lineno() + ": " + thTokens.sval + ": ttype: " + thTokens.ttype); if (thTokens.ttype == StreamTokenizer.TT_NUMBER) { logger.debug(thTokens.lineno() + ": " + thTokens.nval); factoryInput += " " + String.valueOf(thTokens.nval); count++; } else if (thTokens.ttype == StreamTokenizer.TT_WORD) { if (ribNames.contains(thTokens.sval)) { logger.debug(factoryInput); // AbstractRib Factory called to add an element to xml document logger.debug("Elements: " + count + ": " + factoryInput); ribFac.processRibElement(factoryInput); factoryInput = thTokens.sval; } else { factoryInput += " " + thTokens.sval; } logger.debug(thTokens.lineno() + ": " + thTokens.sval); count++; } else { if (thTokens.ttype != '"') { logger.debug(thTokens.lineno() + ": " + (char) thTokens.ttype); factoryInput += " " + (char) thTokens.ttype; count++; } else if (thTokens.sval != null) { logger.debug( thTokens.lineno() + ": " + (char) thTokens.ttype + thTokens.sval + (char) thTokens.ttype); factoryInput += " " + (char) thTokens.ttype + thTokens.sval + (char) thTokens.ttype; count++; } } } } catch (IOException e) { logger.error(e.toString()); } logger.info("Tokens: " + count); RibDocument ribDoc = RibDocument.newInstance(); ribDoc.toFile(); }
/** * Requests new model inputs from user. * * <p>Will return -1 if user signals end-of-input ( <code>^d</code>) * * @param inVec A {@link VectorInfoArrayList} listing inputs and default values * @return int -1 to quit, 0 to keep going * @throws IOException; */ public int loadInputs(VectorInfoArrayList inVec) throws IOException { int tokenType; // The following mumbo-jumbo necessary to take stdin and parse it FileReader frin = new FileReader(FileDescriptor.in); StreamTokenizer st = new StreamTokenizer(frin); // Tell parser to look for double-precision numbers and EOLs // Note - can't use exponential notation; only 0-9, -, . st.parseNumbers(); st.eolIsSignificant(true); // Write header System.out.println(); System.out.println(" Specify input values:"); // Loop for each input block that might need value Iterator<VectorInfo> in = inVec.iterator(); while (in.hasNext()) { boolean blankLine = false; VectorInfo inVal = in.next(); // write name, units, default value System.out.print( " " + inVal.getName() + " (" + inVal.getUnits() + ") [" + inVal.getValue() + "] : "); // look for number in input stream; abort on EOF; skip to next on EOL do { // look for number or EOL or EOF tokenType = st.nextToken(); // System.out.println("tokenType was " + tokenType); if (tokenType == StreamTokenizer.TT_EOF) { return -1; // quit } if (tokenType == StreamTokenizer.TT_EOL) { // skip to next param blankLine = true; break; } } while (tokenType != StreamTokenizer.TT_NUMBER); // keep looking until number found if (!blankLine) { // if not empty line, interpret number and save in block // System.out.println("Input value was " + st.nval); try { inVal.setValue(st.nval); // System.out.println("setValue called for " + inVal.getName() + " with value // of " + st.nval); } catch (NumberFormatException e) { // take no action - leave value as is } // look for EOL so we can ignore it do { tokenType = st.nextToken(); // System.out.println("skipping tokenType " + tokenType ); if (tokenType == StreamTokenizer.TT_EOF) { return -1; } } while (tokenType != StreamTokenizer.TT_EOL); } } return 0; // indicate keep going }
private static void readConfig(File confFile) { System.out.println("Reading configuration file: " + confFile); try { StreamTokenizer tokenizer = new StreamTokenizer(new BufferedReader(new FileReader(confFile))); tokenizer.eolIsSignificant(true); tokenizer.slashStarComments(true); boolean EOF = false; int tokType = 0; Vector words = new Vector(); while (!EOF) { if ((tokType = tokenizer.nextToken()) == StreamTokenizer.TT_EOF) { EOF = true; } else if (tokType != StreamTokenizer.TT_EOL) { if (tokenizer.sval != null) { words.addElement(tokenizer.sval); } } else { if (words.size() == 2) { String key = (String) words.elementAt(0); String value = (String) words.elementAt(1); if (key.equals("SRSServer")) { srsServer = new String(value); } else if (key.equals("Database")) { database = new String(value); } else if (key.equals("Layout")) { layout = new String(value); } else if (key.equals("AutosaveInterval")) { if (value.equals("none")) { setAutosaveInterval(-1); } else { try { setAutosaveInterval(Integer.parseInt(value)); } catch (NumberFormatException e) { System.err.println("Can't parse number: " + value); } } } else if (key.equals("ColourSchemeInstall")) { try { String installString = value; int breakIndex = installString.indexOf(":"); if (breakIndex < 0) { // adapterRegistry.installDataAdapter(installString); } else { String driverName = installString.substring(0, breakIndex); String driverDesc = installString.substring(breakIndex + 1); // adapterRegistry.installDataAdapter(driverName); } } catch (Throwable e) { System.err.println("Could not install driver " + value + " because of " + e); } } else if (key.equals("FormatAdapterInstall")) { try { String installString = value; int breakIndex = installString.indexOf(":"); if (breakIndex < 0) { // adapterRegistry.installDataAdapter(installString); } else { String driverName = installString.substring(0, breakIndex); String driverDesc = installString.substring(breakIndex + 1); // adapterRegistry.installDataAdapter(driverName); } } catch (Throwable e) { System.err.println("Could not install driver " + value + " because of " + e); } } else { System.out.println("Unknown config key " + key); } } else { if (words.size() != 0) { System.out.println( "Too many words on line beginning " + (String) words.elementAt(0) + " in config file"); } } words.removeAllElements(); } } return; } catch (Exception ex) { System.out.println(ex); return; } }
public static Sequence Tokenizer() { AST ass = new AST(); Sequence seq = ass.new Sequence(); try { // ArrayList<Node> Nodes = new ArrayList<Node>(); FileReader inFile = new FileReader("Test.scn"); StreamTokenizer st = new StreamTokenizer(inFile); st.ordinaryChar('.'); st.ordinaryChar('/'); st.eolIsSignificant(true); String ID; int x; int y; int w; int h; String fileName; int token = st.nextToken(); while (token != StreamTokenizer.TT_EOF) { char ch; String s; switch (token) { case StreamTokenizer.TT_WORD: s = st.sval; if (s.equals("Move")) { st.nextToken(); st.nextToken(); ID = st.sval; st.nextToken(); st.nextToken(); x = (int) st.nval; st.nextToken(); st.nextToken(); y = (int) st.nval; st.nextToken(); seq.elements.add(new Move(new Id(ID), new Number(x), new Number(y))); System.out.println("Move " + ID + " " + x + ", " + y); } /*else if(s.equals("Object") || s.equals("Camera") || s.equals("Sprite")) { System.out.print("<Type> " + s + " "); }*/ else if (s.equals("Object")) { st.nextToken(); ID = st.sval; st.nextToken(); st.nextToken(); x = (int) st.nval; st.nextToken(); st.nextToken(); y = (int) st.nval; st.nextToken(); st.nextToken(); w = (int) st.nval; st.nextToken(); st.nextToken(); h = (int) st.nval; st.nextToken(); st.nextToken(); fileName = st.sval; st.nextToken(); seq.elements.add( new Obj( new Id(ID), new Variables( new Number(x), new Number(y), new Number(w), new Number(h), new Id(fileName)))); System.out.println( "Object " + ID + " " + x + ", " + y + ", " + w + ", " + h + ", " + fileName); } else if (s.equals("Camera")) { st.nextToken(); ID = st.sval; st.nextToken(); st.nextToken(); x = (int) st.nval; st.nextToken(); st.nextToken(); y = (int) st.nval; st.nextToken(); seq.elements.add(new Camera(new Id(ID), new Variables(new Number(x), new Number(y)))); System.out.println("Camera " + ID + " " + x + ", " + y); } else if (s.equals("Sprite")) { st.nextToken(); ID = st.sval; st.nextToken(); st.nextToken(); x = (int) st.nval; st.nextToken(); st.nextToken(); y = (int) st.nval; st.nextToken(); st.nextToken(); w = (int) st.nval; st.nextToken(); st.nextToken(); h = (int) st.nval; st.nextToken(); st.nextToken(); fileName = st.sval; st.nextToken(); seq.elements.add( new Sprite( new Id(ID), new Variables( new Number(x), new Number(y), new Number(w), new Number(h), new Id(fileName)))); System.out.println( "Sprite " + ID + " " + x + ", " + y + ", " + w + ", " + h + ", " + fileName); } else { System.out.print("<ID> " + s + " "); } break; case StreamTokenizer.TT_NUMBER: int n = (int) st.nval; System.out.print("<Number> " + n); seq.elements.add(new Number(n)); break; case '(': ch = (char) st.ttype; System.out.print("<Variables>" + ch); break; case ')': ch = (char) st.ttype; System.out.print(ch); break; case ',': ch = (char) st.ttype; System.out.print(ch + " "); break; case '"': s = st.sval; System.out.print("<ID> " + "\"" + s + "\""); break; case StreamTokenizer.TT_EOL: System.out.println(); case '\0': break; default: s = st.sval; System.out.println("ERROR: Unrecognized Token: " + s); break; } token = st.nextToken(); } inFile.close(); System.out.println(); } catch (IOException e) { System.out.println("Error: " + e); } return seq; }