/** * Start one module after the other and try to compile the input file. Then generate the assembler * file and create this file for jasmin. */ private static void parseCompile(String input, String fileName) throws ParserException, LexerException, IOException { String output; // Init output string StringReader reader = new StringReader(input); // Standard routine to start the parser, lexer, ... PushbackReader r = new PushbackReader(reader, 100); Lexer l = new Lexer(r); Parser parser = new Parser(l); Start start = parser.parse(); // ASTPrinter printer = new ASTPrinter(); // start.apply(printer); TypeChecker typeChecker = new TypeChecker(); // Starting TypeChecker start.apply(typeChecker); CodeGenerator codeGenerator = new CodeGenerator(typeChecker.getSymbolTable()); copySymbolTable( typeChecker, codeGenerator); // To get all the identifiers copied with an index to CodeGen start.apply(codeGenerator); output = createOutput(codeGenerator, fileName); Writer wout = new BufferedWriter( // Write everything to the outputfile.j new OutputStreamWriter(new FileOutputStream(fileName + ".j"), "UTF8")); wout.append(output); wout.close(); }
@Test public final void testParseForSearch() { for (int i = 0; i < SEARCH_CMD.length - 800; i++) assertNotNull(testObj.parseForSearch(SEARCH_CMD[i])); assertNotNull(testObj.parseForSearch("'comments' details 'comments2' at 12am")); assertNotNull(testObj.parseForSearch("from 2pm to 4pm")); assertNotNull(testObj.parseForSearch("*'comments' details 'comments2' at 12am")); }
private void verifyAST(AST result) throws Exception { transcript.println("vvvvvvvvvvvvvvv"); String pretty = Parser.unparse(result); transcript.println( pretty); // this is automatically verified against transcript log if it exists. // As an extra safety against possible human error in verifying that the // captured transcript output is actually correct, we also double check that the // the pretty printed output can be parsed again and results in the same parse tree. AST parsedAgain = Parser.parse(pretty); Assert.assertEquals(pretty, Parser.unparse(parsedAgain)); }
/** Test of evaluate method, of class Parser. */ @Test public void testEvaluate() throws Exception { System.out.println("parseExpression"); String input = "-2^3"; String expResult = ""; System.out.println(expResult); Parser instance = new Parser(); String result = instance.evaluate(input); System.out.println(result); assertEquals(expResult, result); }
@Test public void testEqual() throws InvalidSemanticsException, InvalidArgumentsException { for (int i = 1; i < 10; i++) { int x = myRandom.nextInt(i * 100); int y = myRandom.nextInt(i * 100); List<SyntaxNode> n = myParser.parseCommand("equal? " + x + " " + y); int val = (x == y) ? 1 : 0; assertEquals(n.get(0).evaluate(null), val); n = myParser.parseCommand("equalp " + x + " " + y); assertEquals(n.get(0).evaluate(null), val); } }
/** * Check the code with Lexer, Parser and TypeChecker. After this was all correct, start the * Liveness analysis */ private static void parseLiveness(String input) throws ParserException, LexerException, IOException { StringReader reader = new StringReader(input); // Standard routine to start the parser, lexer, ... PushbackReader r = new PushbackReader(reader, 100); Lexer l = new Lexer(r); Parser parser = new Parser(l); Start start = parser.parse(); TypeChecker typeChecker = new TypeChecker(); // Starting TypeChecker start.apply(typeChecker); GraphVisitor analysis = new GraphVisitor(); start.apply(analysis); new Liveness(analysis, typeChecker); // Start Liveness analysis }
public static void main(String[] args) throws ParseException, BadTokenException { LALRRuleSet<BracketsType> rules = new LALRRuleSet<BracketsType>(); rules.addStartRule(new BracketsRule()); rules.addRule(new BracketRule()); LALRParserGenerator<BracketsType> generator = new LALRParserGenerator<BracketsType>(rules); generator.generate(BracketsType.GENERATED_START_RULE); BracketTokenizer tokenizer = new BracketTokenizer(); Parser<BracketsType> parser = new Parser<BracketsType>(generator.getStartState(), tokenizer); Token<BracketsType> result = parser.parse(); System.out.println("Success! got: " + result.getType()); Bracket[] brackets = (Bracket[]) result.getValue(); for (Bracket b : brackets) { System.out.print(b); } System.out.println(); }
@Test public void testNot() throws InvalidSemanticsException, InvalidArgumentsException { for (int i = 1; i < 10; i++) { int x = myRandom.nextInt(1); List<SyntaxNode> n = myParser.parseCommand("not " + x); int val = (x == 1) ? 0 : 1; assertEquals(n.get(0).evaluate(null), val); } }
private void test(Parser p, DataSet d) { int dsize = d.size(); testSize = dsize; for (int i = 0; i < dsize; i++) { String words = d.sent(i); Exp sem = d.sem(i); if (verbose) { System.out.println( i + ": ==================(" + correctParses + " -- " + wrongParses + ")"); System.out.println(words); System.out.println(sem); } String mes = null; if (verbose) mes = "Test"; p.parseTimed(words, null, mes); isCorrect(words, sem, p); } }
public Parser(Parser parent) { this(parent.tokenizer()); }
@Test public final void testValidateEmailAdd() { assertTrue(testObj.validateEmailAdd("*****@*****.**")); }
@Test public final void testFetchGCalDes() { assertNotNull( testObj.fetchGCalDes("<CMPT:false><IMPT:false><DEAD:false><RECUR:><RECURID:><LABEL:>")); }
@Test public final void testParseForAdd() { assertNull(testObj.parseForAdd("blabla")); assertNotNull( testObj.parseForAdd( "*cs2013 presentation from 2pm 14th aug to 4pm 27th nov yearly-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation by 4pm 31st oct yearly-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation at 4pm 31st oct yearly-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation from 2pm to 4pm yearly-13 @work")); assertNotNull( testObj.parseForAdd( "*cs2013 presentation from 2pm 14th aug to 4pm 27th nov monthly-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation by 4pm 31st oct monthly-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation at 4pm 31st oct monthly-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation from 2pm to 4pm monthly-13 @work")); assertNotNull( testObj.parseForAdd( "*cs2013 presentation from 2pm 14th aug to 4pm 27th nov daily-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation by 4pm 31st oct daily-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation at 4pm 31st oct daily-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation from 2pm to 4pm daily-13 @work")); assertNotNull( testObj.parseForAdd( "*cs2013 presentation from 2pm 14th aug to 4pm 27th nov weekly-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation by 4pm 31st oct weekly-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation at 4pm 31st oct weekly-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation from 2pm to 4pm weekly-13 @work")); assertNull(testObj.parseForAdd("*cs2013 presentation from 4pm to 2pm weekly-13 @work")); assertNotNull(testObj.parseForAdd("*cs2013 presentation from 1am to 2am weekly-13 @work")); assertNull(testObj.parseForAdd("project deadline by 1:00 today")); assertNotNull(testObj.parseForAdd("project deadline by 11pm")); assertNull(testObj.parseForAdd("drive to work at 12:30am today")); assertNotNull(testObj.parseForAdd("drive to work at 12:30pm today")); assertNull(testObj.parseForAdd("*cs2013 presentation from 1am to 2am weekly-100 @work")); }
// this function returns a set of lexical entries from // Split and Merge operations on the maximum scoring // correct parse. public List<LexEntry> makeLexEntriesChart(String words, Exp sem, Parser parser) { String mes = null; if (verbose) mes = "MakeLex"; parser.parseTimed(words, sem, mes); return parser.getChart().splitAndMergeLex(sem); }
@Test public final void testFetchTaskIds() { assertNotNull(testObj.fetchTaskIds("$$__30-06-2012160000B__$$ $$__31-06-2012160000B__$$")); // fail("Not yet implemented"); // TODO }
@Test public final void testGetErrorCode() { testObj.parseForAdd("*cs2013 presentation from 4pm to 3pm weekly-100 @work"); assertEquals(OperationFeedback.START_DATE_TIME_MORE_THAN_END_DATE_TIME, testObj.getErrorCode()); }
public static TypeChecked parseAndCheck(String input) throws TypeCheckerException, Exception { Program program = Parser.parse(input); return new TypeCheckerImplementation(program).typeCheck(); }
/** * Parse the contents of a given file and typecheck it. If type checking succeeds the method * returns normally. * * <p>If the program has a type error or undeclared identifier error then an appropriate * TypeCheckerException must be raised. The type checker may try to continue checking after the * first error is encountered, but should nevertheless still raise a TypeCheckerException (this * can be done by postponing the raising of the Exception until all of the input has been * processed). See the class {@link ErrorReport} * * <p>Other Exceptions may be raised if the parsing or reading of the file fails. * * <p>The TypeChecker may return some representation of the TypeChecked program along with useful * information derived by the checker. * * <p>This information will be passed along to the next phase of the compiler. * * <p>Right now what information goes in TypeChecked is irrelevant. All that matters is that * errors get discovered. In later stages of the compiler however, you may find that your type * checker computed valuable information (such as the symbol table from phase 1, which you may * want to use again). At this point you will be able to add information into the TypeChecked * object without breaking the type checker tests. */ public static TypeChecked parseAndCheck(File file) throws TypeCheckerException, Exception { Program program = Parser.parse(file); return new TypeCheckerImplementation(program).typeCheck(); }
public void stocGradTrain(Parser parser, boolean testEachRound) { int numUpdates = 0; List<LexEntry> fixedEntries = new LinkedList<LexEntry>(); fixedEntries.addAll(parser.returnLex().getLexicon()); // add all sentential lexical entries. for (int l = 0; l < trainData.size(); l++) { parser.addLexEntries(trainData.getDataSet(l).makeSentEntries()); } parser.setGlobals(); DataSet data = null; // for each pass over the data for (int j = 0; j < EPOCHS; j++) { System.out.println("Training, iteration " + j); int total = 0, correct = 0, wrong = 0, looCorrect = 0, looWrong = 0; for (int l = 0; l < trainData.size(); l++) { // the variables to hold the current training example String words = null; Exp sem = null; data = trainData.getDataSet(l); if (verbose) System.out.println("---------------------"); String filename = trainData.getFilename(l); if (verbose) System.out.println("DataSet: " + filename); if (verbose) System.out.println("---------------------"); // loop through the training examples // try to create lexical entries for each training example for (int i = 0; i < data.size(); i++) { // print running stats if (verbose) { if (total != 0) { double r = (double) correct / total; double p = (double) correct / (correct + wrong); System.out.print(i + ": =========== r:" + r + " p:" + p); System.out.println(" (epoch:" + j + " file:" + l + " " + filename + ")"); } else System.out.println(i + ": ==========="); } // get the training example words = data.sent(i); sem = data.sem(i); if (verbose) { System.out.println(words); System.out.println(sem); } List<String> tokens = Parser.tokenize(words); if (tokens.size() > maxSentLen) continue; total++; String mes = null; boolean hasCorrect = false; // first, get all possible lexical entries from // a manipulation of the best parse. List<LexEntry> lex = makeLexEntriesChart(words, sem, parser); if (verbose) { System.out.println("Adding:"); for (LexEntry le : lex) { System.out.println(le + " : " + LexiconFeatSet.initialWeight(le)); } } parser.addLexEntries(lex); if (verbose) System.out.println("Lex Size: " + parser.returnLex().size()); // first parse to see if we are currently correct if (verbose) mes = "First"; parser.parseTimed(words, null, mes); Chart firstChart = parser.getChart(); Exp best = parser.bestSem(); // this just collates and outputs the training // accuracy. if (sem.equals(best)) { // System.out.println(parser.bestParses().get(0)); if (verbose) { System.out.println("CORRECT:" + best); lex = parser.getMaxLexEntriesFor(sem); System.out.println("Using:"); printLex(lex); if (lex.size() == 0) { System.out.println("ERROR: empty lex"); } } correct++; } else { if (verbose) { System.out.println("WRONG: " + best); lex = parser.getMaxLexEntriesFor(best); System.out.println("Using:"); printLex(lex); if (best != null && lex.size() == 0) { System.out.println("ERROR: empty lex"); } } wrong++; } // compute first half of parameter update: // subtract the expectation of parameters // under the distribution that is conditioned // on the sentence alone. double norm = firstChart.computeNorm(); HashVector update = new HashVector(); HashVector firstfeats = null, secondfeats = null; if (norm != 0.0) { firstfeats = firstChart.computeExpFeatVals(); firstfeats.divideBy(norm); firstfeats.dropSmallEntries(); firstfeats.addTimesInto(-1.0, update); } else continue; firstChart = null; if (verbose) mes = "Second"; parser.parseTimed(words, sem, mes); hasCorrect = parser.hasParseFor(sem); // compute second half of parameter update: // add the expectation of parameters // under the distribution that is conditioned // on the sentence and correct logical form. if (!hasCorrect) continue; Chart secondChart = parser.getChart(); double secnorm = secondChart.computeNorm(sem); if (norm != 0.0) { secondfeats = secondChart.computeExpFeatVals(sem); secondfeats.divideBy(secnorm); secondfeats.dropSmallEntries(); secondfeats.addTimesInto(1.0, update); lex = parser.getMaxLexEntriesFor(sem); data.setBestLex(i, lex); if (verbose) { System.out.println("Best LexEntries:"); printLex(lex); if (lex.size() == 0) { System.out.println("ERROR: empty lex"); } } } else continue; // now do the update double scale = alpha_0 / (1.0 + c * numUpdates); if (verbose) System.out.println("Scale: " + scale); update.multiplyBy(scale); update.dropSmallEntries(); numUpdates++; if (verbose) { System.out.println("Update:"); System.out.println(update); } if (!update.isBad()) { if (!update.valuesInRange(-100, 100)) { System.out.println("WARNING: large update"); System.out.println("first feats: " + firstfeats); System.out.println("second feats: " + secondfeats); } parser.updateParams(update); } else { System.out.println( "ERROR: Bad Update: " + update + " -- norm: " + norm + " -- feats: "); parser.getParams().printValues(update); System.out.println(); } } // end for each training example } // end for each data set double r = (double) correct / total; // we can prune the lexical items that were not used // in a max scoring parse. if (pruneLex) { Lexicon cur = new Lexicon(); cur.addLexEntries(fixedEntries); cur.addLexEntries(data.getBestLex()); parser.setLexicon(cur); } if (testEachRound) { System.out.println("Testing"); test(parser, false); } } // end epochs loop }
public boolean isCorrect(String words, Exp sem, Parser parser) { List<ParseResult> parses = parser.bestParses(); if (parses.size() > 0) { noAnswer = false; } else { noAnswer = true; } if (parses.size() == 1) { ParseResult p = parses.get(0); Exp e = p.getExp(); e = e.copy(); e.simplify(); List l = p.getLexEntries(); parsed++; if (e.equals(sem)) { if (verbose) { System.out.println("CORRECT"); printLex(l); } int lits = sem.allLitsCount(); correctParses++; return true; } else { // one parse, it was wrong... oh well... if (verbose) { System.out.println("WRONG"); System.out.println(parses.size() + " parses: " + parses); printLex(l); } wrongParses++; boolean hasCorrect = parser.hasParseFor(sem); if (verbose) { System.out.println("Had correct parse: " + hasCorrect); System.out.print("Feats: "); Exp eb = parser.bestSem(); Chart c = parser.getChart(); HashVector h = c.computeExpFeatVals(eb); h.divideBy(c.computeNorm(eb)); h.dropSmallEntries(); System.out.println(h); } } } else { noParses++; if (parses.size() > 1) { // There are more than one equally high scoring // logical forms. If this is the case, we abstain // from returning a result. if (verbose) { System.out.println("too many parses"); System.out.println(parses.size() + " parses: " + parses); } Exp e = parses.get(0).getExp(); ParseResult p = parses.get(0); boolean hasCorrect = parser.hasParseFor(sem); if (verbose) System.out.println("Had correct parse: " + hasCorrect); } else { // no parses, potentially reparse with word skipping if (verbose) System.out.println("no parses"); if (emptyTest) { List<LexEntry> emps = new LinkedList<LexEntry>(); for (int j = 0; j < Globals.tokens.size(); j++) { List l = Globals.tokens.subList(j, j + 1); LexEntry le = new LexEntry(l, Cat.EMP); emps.add(le); } parser.setTempLexicon(new Lexicon(emps)); String mes = null; if (verbose) mes = "EMPTY"; parser.parseTimed(words, null, mes); parser.setTempLexicon(null); parses = parser.bestParses(); if (parses.size() == 1) { ParseResult p = parses.get(0); List l = p.getLexEntries(); Exp e = p.getExp(); e = e.copy(); e.simplify(); int noEmpty = p.noEmpty(); if (e.equals(sem)) { if (verbose) { System.out.println("CORRECT"); printLex(l); } emptyCorrect++; } else { // one parse, but wrong if (verbose) { System.out.println("WRONG: " + e); printLex(l); boolean hasCorrect = parser.hasParseFor(sem); System.out.println("Had correct parse: " + hasCorrect); } } } else { // too many parses or no parses emptyNoParses++; if (verbose) { System.out.println("WRONG:" + parses); boolean hasCorrect = parser.hasParseFor(sem); System.out.println("Had correct parse: " + hasCorrect); } } } } } return false; }
protected void accept(File input) throws Exception { transcript.println("---------------"); transcript.println("File: " + input); AST result = Parser.parse(input); verifyAST(result); }