/** This method is a hack. I really want another constructor. */ public static Obj3d parseFile(InputStream is, ModelViewer modelViewer, boolean register) throws IOException, FileFormatException { StreamTokenizer st = new StreamTokenizer(new BufferedReader(new InputStreamReader(is))); st.eolIsSignificant(true); st.commentChar('#'); Obj3d obj3d = new Obj3d(st, modelViewer, register); // should be at end of file is.close(); if (st.ttype != StreamTokenizer.TT_EOF) { throw new FileFormatException(is.toString()); } return obj3d; }
/** * Parses the S-Expression from the lexer output. The lexer should be positioned on the first * symbol after the opening parenthesis. * * @return the parse tree of the input * @throws IOException if a read error occurs in the lexer * @throws ParsingException if the input cannot be parsed successfully */ private Expression parseSymbolicExpression() throws IOException, ParsingException { Expression expr = new Expression(lexer.sval); int t = lexer.nextToken(); while (t != StreamTokenizer.TT_EOF) { switch (t) { case ')': if (stack.empty()) return expr; stack.peek().addOperand(expr); expr = stack.pop(); break; case '(': // descend into a sub-expression stack.push(expr); if (lexer.nextToken() != StreamTokenizer.TT_WORD) { throw new ParsingException("Expected symbol. Got: " + lexer.ttype); } expr = new Expression(lexer.sval); break; case StreamTokenizer.TT_WORD: try { // test for a number expr.addOperand(Value.newInt(Integer.parseInt(lexer.sval))); } catch (NumberFormatException ignored) { // fall back on a symbol expr.addOperand(lexer.sval); } break; default: throw new ParsingException("Unknown token type: " + lexer.ttype); } t = lexer.nextToken(); } throw new ParsingException("Expected end of input. Got: " + lexer.ttype); }
public void loadTree() { System.out.println("Loading tree"); StreamTokenizer stream = null; try { FileInputStream f = new FileInputStream(tree); Reader input = new BufferedReader(new InputStreamReader(f)); stream = new StreamTokenizer(input); stream.resetSyntax(); stream.wordChars(32, 127); } catch (Exception e) { System.out.println("Error opening " + tree); System.exit(1); } list = new ArrayList(); try { // read the file to the end while (stream.nextToken() != StreamTokenizer.TT_EOF) { // is a word being read if (stream.ttype == StreamTokenizer.TT_WORD) { list.add(new String(stream.sval)); } // is a number being read if (stream.ttype == StreamTokenizer.TT_NUMBER) { list.add(new Double(stream.nval)); } } } catch (Exception e) { System.out.println("\nError reading " + tree + ". Exiting..."); System.exit(1); } }
int ct() throws IOException { FileReader fr = new FileReader("mf.dat"); StreamTokenizer st = new StreamTokenizer(fr); st.eolIsSignificant(true); int tok = st.nextToken(); int ctr = 0; while (tok != -1) { switch (tok) { case -3: { break; } case -2: { break; } case 10: { ctr++; break; } } tok = st.nextToken(); } return ctr; }
public static void main(String[] args) { try { InputStream is = StreamTokenering.class.getResourceAsStream("/input.txt"); StreamTokenizer in = new StreamTokenizer(new InputStreamReader(is)); in.ordinaryChar('.'); in.ordinaryChar('\''); int wordCount = 0, numCount = 0, punctionCount = 0, count = 0; double token; while ((token = in.nextToken()) != StreamTokenizer.TT_EOF) { count++; if (token == StreamTokenizer.TT_WORD) { wordCount++; } else if (token == StreamTokenizer.TT_NUMBER) { numCount++; } else { punctionCount++; } System.out.println(in.toString()); } System.out.println("单词总数为:" + count); System.out.println("单词数为:" + wordCount); System.out.println("数字数为:" + numCount); System.out.println("标点符号数为:" + punctionCount++); } catch (IOException e) { e.printStackTrace(); } }
static int check(InputStream in) throws IOException { Reader r = new BufferedReader(new InputStreamReader(in)); StreamTokenizer st = new StreamTokenizer(r); int i, cnt = 0, num = 0, tmp, incorrect = 0; boolean first_read = false; while (true) { i = st.nextToken(); if (i == StreamTokenizer.TT_EOF) break; tmp = (int) st.nval; if (!first_read) { first_read = true; } else { if (tmp != num + 1) { System.err.println( "Number read: " + tmp + ", previous number: " + num + " (lineno: " + st.lineno() + ")"); incorrect++; } } num = tmp; cnt++; if (cnt > 0 && cnt % 1000 == 0) System.out.println("read " + cnt + " numbers"); } return incorrect; }
/** * Ensures the program's first token is '(' and is followed by a symbol. * * <p>The first symbol in the input will be the current value of the lexer after this call * completes. * * @throws IOException if reading the input fails * @throws ParsingException if this assumption fails */ private void checkStart() throws ParsingException, IOException { lexer.nextToken(); if (lexer.ttype != '(') throw new ParsingException("Program does not begin with '('."); if (lexer.nextToken() != StreamTokenizer.TT_WORD) throw new ParsingException("Expected symbol. Got: " + lexer.ttype); }
/** * Configure the lexical analyzer. * * @param reader the input stream reader * @return an s-expression lexer */ private StreamTokenizer createLexer(Reader reader) { StreamTokenizer tokenizer = new StreamTokenizer(reader); tokenizer.resetSyntax(); tokenizer.eolIsSignificant(false); tokenizer.whitespaceChars(0, ' '); tokenizer.wordChars('!', '!'); tokenizer.wordChars('*', 'z'); return tokenizer; }
/** * creates a NewAnnotationAction from the input processed by StreamTokenizer <I>tok</I>, which * should have the form <br> * add [type feature=value feature=value ...] <br> * or <br> * add [type feature=value feature=value ...] over spanVariable */ public NewAnnotationAction(StreamTokenizer tok) throws IOException, PatternSyntaxError { if (tok.nextToken() == StreamTokenizer.TT_WORD && Character.isUpperCase(tok.sval.charAt(0))) { bindingVariable = new Variable(tok.sval); if (tok.nextToken() != '=') throw new PatternSyntaxError("= expected"); tok.nextToken(); } if (tok.ttype != '[') throw new PatternSyntaxError("[ expected"); if (tok.nextToken() != StreamTokenizer.TT_WORD) throw new PatternSyntaxError("annotation type expected"); type = tok.sval; features = new FeatureSet(tok, true, ']'); if (tok.nextToken() == StreamTokenizer.TT_WORD && tok.sval.equalsIgnoreCase("over")) { if (tok.nextToken() == StreamTokenizer.TT_WORD && Character.isUpperCase(tok.sval.charAt(0))) { spanVariable = new Variable(tok.sval); tok.nextToken(); } else if (tok.ttype == StreamTokenizer.TT_NUMBER && tok.nval == 0) { spanVariable = new Variable("0"); tok.nextToken(); } else { throw new PatternSyntaxError("variable expected after 'over'"); } } else { spanVariable = null; } }
public static void main(String[] args) throws Exception { BigInteger T = BigInteger.valueOf(0); BigInteger TB = BigInteger.valueOf(0); BigInteger NTB = BigInteger.valueOf(0); BigInteger S = BigInteger.valueOf(0); BigInteger MAX = BigInteger.valueOf(1); int j; for (j = 0; j < 100; j++) MAX = MAX.multiply(BigInteger.valueOf(10)); for (; ; ) { int i, t, a, b; if (in.nextToken() != StreamTokenizer.TT_NUMBER) break; t = (int) in.nval; if (in.nextToken() != StreamTokenizer.TT_NUMBER) break; a = (int) in.nval; if (in.nextToken() != StreamTokenizer.TT_NUMBER) break; b = (int) in.nval; // System.out.print("("); // System.out.print(t); // System.out.print("^"); // System.out.print(a); // System.out.print("-1)/("); // System.out.print(t); // System.out.print("^"); // System.out.print(b); // System.out.print("-1) "); if (t == 1 || a % b != 0) { System.out.print("bad!\n"); continue; } T = BigInteger.valueOf(t); TB = BigInteger.valueOf(1); for (i = 0; i < b; i++) { TB = TB.multiply(T); if (TB.compareTo(MAX) >= 0) break; } NTB = BigInteger.valueOf(1); S = BigInteger.valueOf(0); for (i = 0; i < a; i += b) { S = S.add(NTB); if (S.compareTo(MAX) >= 0) break; NTB = NTB.multiply(TB); } if (S.compareTo(MAX) >= 0) System.out.print("bad!"); else System.out.print(S); System.out.print("\n"); } }
private String readWord() throws IOException { int nextToken = in.nextToken(); if (nextToken == StreamTokenizer.TT_WORD) { return in.sval; } throw new IllegalStateException("Word expected. Found: " + nextToken); }
private double readNumber() throws IOException { int nextToken = in.nextToken(); if (nextToken == StreamTokenizer.TT_NUMBER) { return in.nval; } throw new IllegalStateException("Number expected. Found: " + nextToken); }
private void addField(ArrayList<String> record, StringBuilder fieldValue) throws CSVParseException { record.add(fieldValue == null ? null : fieldValue.toString()); if (record.size() > maxColumnsPerRow) { throw new CSVParseException( "Exceeded max number of columns per record : " + maxColumnsPerRow, parser.lineno()); } }
public void readProblem(String fileName) throws FileNotFoundException, IOException { Reader inputFile = new BufferedReader(new InputStreamReader(new FileInputStream(fileName))); StreamTokenizer token = new StreamTokenizer(inputFile); try { token.nextToken(); numberOfCities_ = (int) token.nval; distanceMatrix_ = new double[numberOfCities_][numberOfCities_]; flujo1 = new double[numberOfCities_][numberOfCities_]; flujo2 = new double[numberOfCities_][numberOfCities_]; // Cargar objetivo 1 for (int k = 0; k < numberOfCities_; k++) { for (int j = 0; j < numberOfCities_; j++) { token.nextToken(); flujo1[k][j] = token.nval; } } // Cargar objetivo 2 for (int k = 0; k < numberOfCities_; k++) { for (int j = 0; j < numberOfCities_; j++) { token.nextToken(); flujo2[k][j] = token.nval; } } // Carga de distancias for (int k = 0; k < numberOfCities_; k++) { for (int j = 0; j < numberOfCities_; j++) { token.nextToken(); distanceMatrix_[k][j] = token.nval; } } } // try catch (Exception e) { System.err.println("QAP.readProblem(): error when reading data file " + e); System.exit(1); } // catch } // readProblem
public String[] parseTokens(String line) throws IOException { List tokens = new ArrayList(); /*StringTokenizer st = new StringTokenizer(line); String token; while((token = st.nextToken()) != null) { tokens.add(token); } */ StreamTokenizer st = new StreamTokenizer(new StringReader(line)); st.parseNumbers(); st.wordChars('_', '_'); // A word can be THIS_IS_A_WORD int token = st.nextToken(); while (token != StreamTokenizer.TT_EOF) { String element = null; switch (token) { case StreamTokenizer.TT_NUMBER: element = String.valueOf(st.nval); break; case StreamTokenizer.TT_WORD: element = st.sval; break; case '"': case '\'': element = st.sval; break; case StreamTokenizer.TT_EOL: break; case StreamTokenizer.TT_EOF: break; default: element = String.valueOf((char) st.ttype); break; } if (element != null) tokens.add(element); token = st.nextToken(); } String[] result = new String[tokens.size()]; for (int index = 0; index < tokens.size(); index++) result[index] = (String) tokens.get(index); return result; }
// s is the effective dimension if > 0, otherwise it is dim private void readData(Reader re, int r1, int s1) throws IOException, NumberFormatException { try { StreamTokenizer st = new StreamTokenizer(re); if (st == null) return; st.eolIsSignificant(false); st.slashSlashComments(true); int i = st.nextToken(); if (i != StreamTokenizer.TT_NUMBER) throw new NumberFormatException(); b = (int) st.nval; st.nextToken(); numCols = (int) st.nval; st.nextToken(); numRows = (int) st.nval; st.nextToken(); numPoints = (int) st.nval; st.nextToken(); dim = (int) st.nval; if (dim < 1) { System.err.println(PrintfFormat.NEWLINE + "DigitalNetBase2FromFile: dimension dim <= 0"); throw new IllegalArgumentException("dimension dim <= 0"); } if (r1 > numRows) throw new IllegalArgumentException( "DigitalNetBase2FromFile: One must have r1 <= Max num rows"); if (s1 > dim) { throw new IllegalArgumentException("s1 is too large"); } if (s1 > 0) dim = s1; if (r1 > 0) numRows = r1; if (b != 2) { System.err.println("***** DigitalNetBase2FromFile: only base 2 allowed"); throw new IllegalArgumentException("only base 2 allowed"); } genMat = new int[dim * numCols]; for (i = 0; i < dim; i++) for (int c = 0; c < numCols; c++) { st.nextToken(); genMat[i * numCols + c] = (int) st.nval; } } catch (NumberFormatException e) { System.err.println(" DigitalNetBase2FromFile: not a number " + e); throw e; } }
@Override public boolean hasMoreTokens() { if (streamTokenizer.ttype != StreamTokenizer.TT_EOF) { try { streamTokenizer.nextToken(); } catch (IOException e1) { throw new RuntimeException(e1); } } return streamTokenizer.ttype != StreamTokenizer.TT_EOF && streamTokenizer.ttype != -1; }
private void checkRecordExceptions(List<String> line) throws IOException { int rowSizeInCharacters = 0; if (line != null) { for (String value : line) { if (value != null) { rowSizeInCharacters += value.length(); } } if (rowSizeInCharacters > maxRowSizeInCharacters) { throw new CSVParseException( "Exceeded max length for one record: " + rowSizeInCharacters + ". Max length for one record should be less than or equal to " + maxRowSizeInCharacters, parser.lineno()); } fileSizeInCharacters += rowSizeInCharacters; if (fileSizeInCharacters > maxFileSizeInCharacters) { throw new CSVParseException( "Exceeded max file size: " + fileSizeInCharacters + ". Max file size in characters should be less than or equal to " + maxFileSizeInCharacters, parser.lineno()); } rowsInFile++; if (rowsInFile > maxRowsInFile) { throw new CSVParseException( "Exceeded number of records : " + rowsInFile + ". Number of records should be less than or equal to " + maxRowsInFile, parser.lineno()); } } }
/** * This method reads in passenger data and builds a new passenger object. * * @return new Passenger object built from file inputs */ private static Passenger newPassenger() { String passengerName = ""; byte floor = 1; Passenger newArrival = null; boolean okToContinue = true; try { in.nextToken(); passengerName = in.sval; in.nextToken(); floor = (byte) in.nval; } catch (IOException e) { System.err.println("Unexpected error in file! Can't continue."); okToContinue = false; newArrival = null; } if (okToContinue) { newArrival = new Passenger(passengerName, floor); } return newArrival; } // end newPassenger
private StringBuilder appendFieldValue(StringBuilder fieldValue, String token) throws CSVParseException { if (fieldValue == null) { fieldValue = new StringBuilder(); } fieldValue.append(token); if (token.length() > maxSizeOfIndividualCell) { throw new CSVParseException("Exceeded max field size: " + token.length(), parser.lineno()); } return fieldValue; }
public static Map<String, String> getStyles(String str) throws IOException { HashMap<String, String> styles = new HashMap<String, String>(); if (str == null) return styles; StreamTokenizer tt = new StreamTokenizer(new StringReader(str)); tt.resetSyntax(); tt.wordChars('!', '9'); tt.wordChars('<', '~'); tt.wordChars(128 + 32, 255); tt.whitespaceChars(0, ' '); while (tt.nextToken() != StreamTokenizer.TT_EOF) { if (tt.ttype != ';') { String key, value; if (tt.ttype != StreamTokenizer.TT_WORD) { throw new IOException( "Key token expected in " + str + " " + Integer.toHexString(tt.ttype)); } key = tt.sval; if (tt.nextToken() != ':') { throw new IOException("Colon expected after " + key + " in " + str); } if (tt.nextToken() != StreamTokenizer.TT_WORD) { throw new IOException( "Value token expected after " + key + " in " + str + " " + tt.ttype); } value = tt.sval; while (tt.nextToken() == StreamTokenizer.TT_WORD) { value += ' ' + tt.sval; } tt.pushBack(); styles.put(key, value); } } return styles; }
public static void main(String[] args) throws IOException { final StreamTokenizer in = new StreamTokenizer(new BufferedReader(new InputStreamReader(System.in, "ISO-8859-1"))); final PrintWriter out = new PrintWriter(new OutputStreamWriter(System.out, "ISO-8859-1")); in.nextToken(); int n = (int) in.nval; short[] list = new short[n]; int i; // read the first list for (i = 0; i < n; i++) { in.nextToken(); list[i] = (short) in.nval; } in.nextToken(); n = (int) in.nval; // read second list searching for desired sum int j = 0, k; for (i = 0; i < n; i++) { in.nextToken(); k = (short) in.nval; while (list[j] + k < DESIRED_SUM && j < list.length - 1) j++; while (list[j] + k > DESIRED_SUM && j > 0) j--; if (list[j] + k == DESIRED_SUM) { out.print("YES"); out.flush(); return; } } out.print("NO"); out.flush(); }
/** * Returns the string array associated with a key, assuming it is defined. It is recommended to * check that it is defined first with {@link #hasValue(String)}. * * @throws RuntimeException if the key is not defined. * @see #hasValue(String) */ public String[] getList(/*@KeyFor("this.map")*/ String key) { try { if (!hasValue(key)) { throw new RuntimeException(String.format("Key '%s' is not defined", key)); } final String sValue = getValue(key); StreamTokenizer tok = new StreamTokenizer(new StringReader(sValue)); tok.quoteChar('"'); tok.whitespaceChars(' ', ' '); ArrayList<String> lValues = new ArrayList<String>(); int tokInfo = tok.nextToken(); while (tokInfo != StreamTokenizer.TT_EOF) { if (tok.ttype != '"') continue; assert tok.sval != null : "@AssumeAssertion(nullness)"; // tok.type == '"' guarantees not null lValues.add(tok.sval.trim()); tokInfo = tok.nextToken(); } return lValues.toArray(new String[] {}); } catch (IOException ex) { throw new RuntimeException(String.format("Parsing for key '%s' failed", key), ex); } }
/** Create an HTTP tokenizer, given a StreamTokenizer for the web page. */ public HttpTokenizer(StreamTokenizer tokens) throws IOException { // Create a stream tokenizer this.tokens = tokens; // Set up the appropriate defaults tokens.eolIsSignificant(false); tokens.lowerCaseMode(true); tokens.wordChars('<', '<'); tokens.wordChars('>', '>'); tokens.wordChars('/', '/'); tokens.wordChars('=', '='); tokens.wordChars('@', '@'); tokens.wordChars('!', '!'); tokens.wordChars('-', '-'); tokens.ordinaryChar('.'); tokens.ordinaryChar('?'); }
@Override public String nextToken() { StringBuffer sb = new StringBuffer(); if (streamTokenizer.ttype == StreamTokenizer.TT_WORD) { sb.append(streamTokenizer.sval); } else if (streamTokenizer.ttype == StreamTokenizer.TT_NUMBER) { sb.append(streamTokenizer.nval); } else if (streamTokenizer.ttype == StreamTokenizer.TT_EOL) { try { while (streamTokenizer.ttype == StreamTokenizer.TT_EOL) streamTokenizer.nextToken(); } catch (IOException e) { throw new RuntimeException(e); } } else if (hasMoreTokens()) return nextToken(); String ret = sb.toString(); if (tokenPreProcess != null) ret = tokenPreProcess.preProcess(ret); return ret; }
static void scanTo(int tt) { // scans to a given token type: // TT_NUMBER or TT_WORD boolean found = false; try { while (!found) { int ttype = tokenizer.nextToken(); if (ttype == tt) { found = true; } else if (ttype == tokenizer.TT_EOF) { println("End of File reached while scanning for input."); found = true; } else if (ttype == tokenizer.TT_EOL) { // skip over end of line } else if ((tt == tokenizer.TT_WORD) && (ttype != tokenizer.TT_NUMBER)) { found = true; } } } catch (IOException e) { println("IOException while scanning for input."); } }
public CSVReader(BufferedReader input, char customizedSeparator) { this.separator = customizedSeparator; parser = new StreamTokenizer(input); parser.ordinaryChars(0, 255); parser.wordChars(0, 255); parser.ordinaryChar('\"'); parser.ordinaryChar(customizedSeparator); // Need to do set EOL significance after setting ordinary and word // chars, and need to explicitly set \n and \r as whitespace chars // for EOL detection to work parser.eolIsSignificant(true); parser.whitespaceChars('\n', '\n'); parser.whitespaceChars('\r', '\r'); atEOF = false; }
static int nextInt() throws IOException { input.nextToken(); return (int) input.nval; }
static double nextDouble() throws IOException { in.nextToken(); return in.nval; }
static long nextLong() throws IOException { in.nextToken(); return (long) in.nval; }