Example #1
0
  /**
   * Create NFA, DFA and generate code for grammar. Create NFA for any delegates first. Once all NFA
   * are created, it's ok to create DFA, which must check for left-recursion. That check is done by
   * walking the full NFA, which therefore must be complete. After all NFA, comes DFA conversion for
   * root grammar then code gen for root grammar. DFA and code gen for delegates comes next.
   */
  protected void generateRecognizer(Grammar grammar) {
    String language = (String) grammar.getOption("language");
    if (language != null) {
      CodeGenerator generator = new CodeGenerator(this, grammar, language);
      grammar.setCodeGenerator(generator);
      generator.setDebug(isDebug());
      generator.setProfile(isProfile());
      generator.setTrace(isTrace());

      // generate NFA early in case of crash later (for debugging)
      if (isGenerate_NFA_dot()) {
        generateNFAs(grammar);
      }

      // GENERATE CODE
      generator.genRecognizer();

      if (isGenerate_DFA_dot()) {
        generateDFAs(grammar);
      }

      List<Grammar> delegates = grammar.getDirectDelegates();
      for (int i = 0; delegates != null && i < delegates.size(); i++) {
        Grammar delegate = (Grammar) delegates.get(i);
        if (delegate != grammar) { // already processing this one
          generateRecognizer(delegate);
        }
      }
    }
  }
Example #2
0
 public void sortGrammarFiles() throws IOException {
   // System.out.println("Grammar names "+getGrammarFileNames());
   Graph g = new Graph();
   List<String> missingFiles = new ArrayList<String>();
   for (String gfile : grammarFileNames) {
     try {
       GrammarSpelunker grammar = new GrammarSpelunker(inputDirectory, gfile);
       grammar.parse();
       String vocabName = grammar.getTokenVocab();
       String grammarName = grammar.getGrammarName();
       // Make all grammars depend on any tokenVocab options
       if (vocabName != null) g.addEdge(gfile, vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
       // Make all generated tokens files depend on their grammars
       g.addEdge(grammarName + CodeGenerator.VOCAB_FILE_EXTENSION, gfile);
     } catch (FileNotFoundException fnfe) {
       ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE, gfile);
       missingFiles.add(gfile);
     }
   }
   List<Object> sorted = g.sort();
   // System.out.println("sorted="+sorted);
   grammarFileNames.clear(); // wipe so we can give new ordered list
   for (int i = 0; i < sorted.size(); i++) {
     String f = (String) sorted.get(i);
     if (missingFiles.contains(f)) continue;
     if (!(f.endsWith(".g") || f.endsWith(".g3"))) continue;
     grammarFileNames.add(f);
   }
   // System.out.println("new grammars="+grammarFileNames);
 }
Example #3
0
 public void addGrammarFile(String grammarFileName) {
   if (!grammarFileNames.contains(grammarFileName)) {
     grammarFileNames.add(grammarFileName);
   }
 }