예제 #1
0
파일: Tool.java 프로젝트: laiello/perseph
  /**
   * This method is used by all code generators to create new output files. If the outputDir set by
   * -o is not present it will be created. The final filename is sensitive to the output directory
   * and the directory where the grammar file was found. If -o is /tmp and the original grammar file
   * was foo/t.g then output files go in /tmp/foo.
   *
   * <p>The output dir -o spec takes precedence if it's absolute. E.g., if the grammar file dir is
   * absolute the output dir is given precendence. "-o /tmp /usr/lib/t.g" results in "/tmp/T.java"
   * as output (assuming t.g holds T.java).
   *
   * <p>If no -o is specified, then just write to the directory where the grammar file was found.
   *
   * <p>If outputDirectory==null then write a String.
   */
  public Writer getOutputFile(Grammar g, String fileName) throws IOException {
    if (getOutputDirectory() == null) {
      return new StringWriter();
    }
    // output directory is a function of where the grammar file lives
    // for subdir/T.g, you get subdir here.  Well, depends on -o etc...
    // But, if this is a .tokens file, then we force the output to
    // be the base output directory (or current directory if there is not a -o)
    //
    File outputDir;
    if (fileName.endsWith(CodeGenerator.VOCAB_FILE_EXTENSION)) {
      if (haveOutputDir) {
        outputDir = new File(getOutputDirectory());
      } else {
        outputDir = new File(".");
      }
    } else {
      outputDir = getOutputDirectory(g.getFileName());
    }
    File outputFile = new File(outputDir, fileName);

    if (!outputDir.exists()) {
      outputDir.mkdirs();
    }
    FileWriter fw = new FileWriter(outputFile);
    return new BufferedWriter(fw);
  }
예제 #2
0
파일: Tool.java 프로젝트: laiello/perseph
  /**
   * Return the directory containing the grammar file for this grammar. normally this is a relative
   * path from current directory. People will often do "java org.antlr.Tool grammars/*.g3" So the
   * file will be "grammars/foo.g3" etc... This method returns "grammars".
   *
   * <p>If we have been given a specific input directory as a base, then we must find the directory
   * relative to this directory, unless the file name is given to us in absolute terms.
   */
  public String getFileDirectory(String fileName) {

    File f;
    if (haveInputDir && !fileName.startsWith(File.separator)) {
      f = new File(inputDirectory, fileName);
    } else {
      f = new File(fileName);
    }
    // And ask Java what the base directory of this location is
    //
    return f.getParent();
  }
예제 #3
0
파일: Tool.java 프로젝트: laiello/perseph
  /**
   * Checks to see if the list of outputFiles all exist, and have last-modified timestamps which are
   * later than the last-modified timestamp of all the grammar files involved in build the output
   * (imports must be checked). If these conditions hold, the method returns false, otherwise, it
   * returns true.
   *
   * @param grammarFileName The grammar file we are checking
   */
  public boolean buildRequired(String grammarFileName) throws IOException, ANTLRException {
    BuildDependencyGenerator bd = new BuildDependencyGenerator(this, grammarFileName);

    List<File> outputFiles = bd.getGeneratedFileList();
    List<File> inputFiles = bd.getDependenciesFileList();
    // Note that input directory must be set to use buildRequired
    File grammarFile;
    if (haveInputDir) {
      grammarFile = new File(inputDirectory, grammarFileName);
    } else {
      grammarFile = new File(grammarFileName);
    }
    long grammarLastModified = grammarFile.lastModified();
    for (File outputFile : outputFiles) {
      if (!outputFile.exists() || grammarLastModified > outputFile.lastModified()) {
        // One of the output files does not exist or is out of date, so we must build it
        return true;
      }
      // Check all of the imported grammars and see if any of these are younger
      // than any of the output files.
      if (inputFiles != null) {
        for (File inputFile : inputFiles) {

          if (inputFile.lastModified() > outputFile.lastModified()) {
            // One of the imported grammar files has been updated so we must build
            return true;
          }
        }
      }
    }
    if (isVerbose()) {
      System.out.println("Grammar " + grammarFile + " is up to date - build skipped");
    }
    return false;
  }
예제 #4
0
파일: Tool.java 프로젝트: laiello/perseph
  /**
   * Name a file from the -lib dir. Imported grammars and .tokens files
   *
   * <p>If we do not locate the file in the library directory, then we try the location of the
   * originating grammar.
   *
   * @param fileName input name we are looking for
   * @return Path to file that we think shuold be the import file
   * @throws java.io.IOException
   */
  public String getLibraryFile(String fileName) throws IOException {

    // First, see if we can find the file in the library directory
    //
    File f = new File(getLibraryDirectory() + File.separator + fileName);

    if (f.exists()) {

      // Found in the library directory
      //
      return f.getAbsolutePath();
    }

    // Need to assume it is in the same location as the input file. Note that
    // this is only relevant for external build tools and when the input grammar
    // was specified relative to the source directory (working directory if using
    // the command line.
    //
    return parentGrammarDirectory + File.separator + fileName;
  }
예제 #5
0
파일: Tool.java 프로젝트: laiello/perseph
  /**
   * Return a File descriptor for vocab file. Look in library or in -o output path. antlr -o foo T.g
   * U.g where U needs T.tokens won't work unless we look in foo too. If we do not find the file in
   * the lib directory then must assume that the .tokens file is going to be generated as part of
   * this build and we have defined .tokens files so that they ALWAYS are generated in the base
   * output directory, which means the current directory for the command line tool if there was no
   * output directory specified.
   */
  public File getImportedVocabFile(String vocabName) {

    File f =
        new File(
            getLibraryDirectory(), File.separator + vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
    if (f.exists()) {
      return f;
    }

    // We did not find the vocab file in the lib directory, so we need
    // to look for it in the output directory which is where .tokens
    // files are generated (in the base, not relative to the input
    // location.)
    //
    if (haveOutputDir) {
      f = new File(getOutputDirectory(), vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
    } else {
      f = new File(vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
    }
    return f;
  }
예제 #6
0
파일: Tool.java 프로젝트: laiello/perseph
  /** Get a grammar mentioned on the command-line and any delegates */
  public Grammar getRootGrammar(String grammarFileName) throws IOException {
    // StringTemplate.setLintMode(true);
    // grammars mentioned on command line are either roots or single grammars.
    // create the necessary composite in case it's got delegates; even
    // single grammar needs it to get token types.
    CompositeGrammar composite = new CompositeGrammar();
    Grammar grammar = new Grammar(this, grammarFileName, composite);
    composite.setDelegationRoot(grammar);
    FileReader fr = null;
    File f = null;

    if (haveInputDir) {
      f = new File(inputDirectory, grammarFileName);
    } else {
      f = new File(grammarFileName);
    }

    // Store the location of this grammar as if we import files, we can then
    // search for imports in the same location as the original grammar as well as in
    // the lib directory.
    //
    parentGrammarDirectory = f.getParent();

    if (grammarFileName.lastIndexOf(File.separatorChar) == -1) {
      grammarOutputDirectory = ".";
    } else {
      grammarOutputDirectory =
          grammarFileName.substring(0, grammarFileName.lastIndexOf(File.separatorChar));
    }
    fr = new FileReader(f);
    BufferedReader br = new BufferedReader(fr);
    grammar.parseAndBuildAST(br);
    composite.watchNFAConversion = internalOption_watchNFAConversion;
    br.close();
    fr.close();
    return grammar;
  }
예제 #7
0
파일: Tool.java 프로젝트: laiello/perseph
  public void process() {
    boolean exceptionWhenWritingLexerFile = false;
    String lexerGrammarFileName = null; // necessary at this scope to have access in the catch below

    // Have to be tricky here when Maven or build tools call in and must new Tool()
    // before setting options. The banner won't display that way!
    if (isVerbose() && showBanner) {
      ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
      showBanner = false;
    }

    try {
      sortGrammarFiles(); // update grammarFileNames
    } catch (Exception e) {
      ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, e);
    } catch (Error e) {
      ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, e);
    }

    for (String grammarFileName : grammarFileNames) {
      // If we are in make mode (to support build tools like Maven) and the
      // file is already up to date, then we do not build it (and in verbose mode
      // we will say so).
      if (make) {
        try {
          if (!buildRequired(grammarFileName)) continue;
        } catch (Exception e) {
          ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, e);
        }
      }

      if (isVerbose() && !isDepend()) {
        System.out.println(grammarFileName);
      }
      try {
        if (isDepend()) {
          BuildDependencyGenerator dep = new BuildDependencyGenerator(this, grammarFileName);
          /*
          List outputFiles = dep.getGeneratedFileList();
          List dependents = dep.getDependenciesFileList();
          System.out.println("output: "+outputFiles);
          System.out.println("dependents: "+dependents);
           */
          System.out.println(dep.getDependencies());
          continue;
        }

        Grammar grammar = getRootGrammar(grammarFileName);
        // we now have all grammars read in as ASTs
        // (i.e., root and all delegates)
        grammar.composite.assignTokenTypes();
        grammar.composite.defineGrammarSymbols();
        grammar.composite.createNFAs();

        generateRecognizer(grammar);

        if (isPrintGrammar()) {
          grammar.printGrammar(System.out);
        }

        if (isReport()) {
          GrammarReport greport = new GrammarReport(grammar);
          System.out.println(greport.toString());
          // print out a backtracking report too (that is not encoded into log)
          System.out.println(greport.getBacktrackingReport());
          // same for aborted NFA->DFA conversions
          System.out.println(greport.getAnalysisTimeoutReport());
        }
        if (isProfile()) {
          GrammarReport greport = new GrammarReport(grammar);
          Stats.writeReport(GrammarReport.GRAMMAR_STATS_FILENAME, greport.toNotifyString());
        }

        // now handle the lexer if one was created for a merged spec
        String lexerGrammarStr = grammar.getLexerGrammar();
        // System.out.println("lexer grammar:\n"+lexerGrammarStr);
        if (grammar.type == Grammar.COMBINED && lexerGrammarStr != null) {
          lexerGrammarFileName = grammar.getImplicitlyGeneratedLexerFileName();
          try {
            Writer w = getOutputFile(grammar, lexerGrammarFileName);
            w.write(lexerGrammarStr);
            w.close();
          } catch (IOException e) {
            // emit different error message when creating the implicit lexer fails
            // due to write permission error
            exceptionWhenWritingLexerFile = true;
            throw e;
          }
          try {
            StringReader sr = new StringReader(lexerGrammarStr);
            Grammar lexerGrammar = new Grammar();
            lexerGrammar.composite.watchNFAConversion = internalOption_watchNFAConversion;
            lexerGrammar.implicitLexer = true;
            lexerGrammar.setTool(this);
            File lexerGrammarFullFile =
                new File(getFileDirectory(lexerGrammarFileName), lexerGrammarFileName);
            lexerGrammar.setFileName(lexerGrammarFullFile.toString());

            lexerGrammar.importTokenVocabulary(grammar);
            lexerGrammar.parseAndBuildAST(sr);

            sr.close();

            lexerGrammar.composite.assignTokenTypes();
            lexerGrammar.composite.defineGrammarSymbols();
            lexerGrammar.composite.createNFAs();

            generateRecognizer(lexerGrammar);
          } finally {
            // make sure we clean up
            if (deleteTempLexer) {
              File outputDir = getOutputDirectory(lexerGrammarFileName);
              File outputFile = new File(outputDir, lexerGrammarFileName);
              outputFile.delete();
            }
          }
        }
      } catch (IOException e) {
        if (exceptionWhenWritingLexerFile) {
          ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, lexerGrammarFileName, e);
        } else {
          ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE, grammarFileName);
        }
      } catch (Exception e) {
        ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, grammarFileName, e);
      }
      /*
      finally {
      System.out.println("creates="+ Interval.creates);
      System.out.println("hits="+ Interval.hits);
      System.out.println("misses="+ Interval.misses);
      System.out.println("outOfRange="+ Interval.outOfRange);
      }
       */
    }
  }
예제 #8
0
파일: Tool.java 프로젝트: laiello/perseph
  public void processArgs(String[] args) {

    if (isVerbose()) {
      ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
      showBanner = false;
    }

    if (args == null || args.length == 0) {
      help();
      return;
    }
    for (int i = 0; i < args.length; i++) {
      if (args[i].equals("-o") || args[i].equals("-fo")) {
        if (i + 1 >= args.length) {
          System.err.println("missing output directory with -fo/-o option; ignoring");
        } else {
          if (args[i].equals("-fo")) { // force output into dir
            setForceAllFilesToOutputDir(true);
          }
          i++;
          outputDirectory = args[i];
          if (outputDirectory.endsWith("/") || outputDirectory.endsWith("\\")) {
            outputDirectory = outputDirectory.substring(0, getOutputDirectory().length() - 1);
          }
          File outDir = new File(outputDirectory);
          haveOutputDir = true;
          if (outDir.exists() && !outDir.isDirectory()) {
            ErrorManager.error(ErrorManager.MSG_OUTPUT_DIR_IS_FILE, outputDirectory);
            setLibDirectory(".");
          }
        }
      } else if (args[i].equals("-lib")) {
        if (i + 1 >= args.length) {
          System.err.println("missing library directory with -lib option; ignoring");
        } else {
          i++;
          setLibDirectory(args[i]);
          if (getLibraryDirectory().endsWith("/") || getLibraryDirectory().endsWith("\\")) {
            setLibDirectory(getLibraryDirectory().substring(0, getLibraryDirectory().length() - 1));
          }
          File outDir = new File(getLibraryDirectory());
          if (!outDir.exists()) {
            ErrorManager.error(ErrorManager.MSG_DIR_NOT_FOUND, getLibraryDirectory());
            setLibDirectory(".");
          }
        }
      } else if (args[i].equals("-nfa")) {
        setGenerate_NFA_dot(true);
      } else if (args[i].equals("-dfa")) {
        setGenerate_DFA_dot(true);
      } else if (args[i].equals("-debug")) {
        setDebug(true);
      } else if (args[i].equals("-trace")) {
        setTrace(true);
      } else if (args[i].equals("-report")) {
        setReport(true);
      } else if (args[i].equals("-profile")) {
        setProfile(true);
      } else if (args[i].equals("-print")) {
        setPrintGrammar(true);
      } else if (args[i].equals("-depend")) {
        setDepend(true);
      } else if (args[i].equals("-verbose")) {
        setVerbose(true);
      } else if (args[i].equals("-version")) {
        version();
        exitNow = true;
      } else if (args[i].equals("-make")) {
        setMake(true);
      } else if (args[i].equals("-message-format")) {
        if (i + 1 >= args.length) {
          System.err.println("missing output format with -message-format option; using default");
        } else {
          i++;
          ErrorManager.setFormat(args[i]);
        }
      } else if (args[i].equals("-Xgrtree")) {
        internalOption_PrintGrammarTree = true; // print grammar tree
      } else if (args[i].equals("-Xdfa")) {
        internalOption_PrintDFA = true;
      } else if (args[i].equals("-Xnoprune")) {
        DFAOptimizer.PRUNE_EBNF_EXIT_BRANCHES = false;
      } else if (args[i].equals("-Xnocollapse")) {
        DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES = false;
      } else if (args[i].equals("-Xdbgconversion")) {
        NFAToDFAConverter.debug = true;
      } else if (args[i].equals("-Xmultithreaded")) {
        NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION = false;
      } else if (args[i].equals("-Xnomergestopstates")) {
        DFAOptimizer.MERGE_STOP_STATES = false;
      } else if (args[i].equals("-Xdfaverbose")) {
        internalOption_ShowNFAConfigsInDFA = true;
      } else if (args[i].equals("-Xwatchconversion")) {
        internalOption_watchNFAConversion = true;
      } else if (args[i].equals("-XdbgST")) {
        CodeGenerator.EMIT_TEMPLATE_DELIMITERS = true;
      } else if (args[i].equals("-Xmaxinlinedfastates")) {
        if (i + 1 >= args.length) {
          System.err.println("missing max inline dfa states -Xmaxinlinedfastates option; ignoring");
        } else {
          i++;
          CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE = Integer.parseInt(args[i]);
        }
      } else if (args[i].equals("-Xmaxswitchcaselabels")) {
        if (i + 1 >= args.length) {
          System.err.println(
              "missing max switch case labels -Xmaxswitchcaselabels option; ignoring");
        } else {
          i++;
          CodeGenerator.MAX_SWITCH_CASE_LABELS = Integer.parseInt(args[i]);
        }
      } else if (args[i].equals("-Xminswitchalts")) {
        if (i + 1 >= args.length) {
          System.err.println("missing min switch alternatives -Xminswitchalts option; ignoring");
        } else {
          i++;
          CodeGenerator.MIN_SWITCH_ALTS = Integer.parseInt(args[i]);
        }
      } else if (args[i].equals("-Xm")) {
        if (i + 1 >= args.length) {
          System.err.println("missing max recursion with -Xm option; ignoring");
        } else {
          i++;
          NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = Integer.parseInt(args[i]);
        }
      } else if (args[i].equals("-Xmaxdfaedges")) {
        if (i + 1 >= args.length) {
          System.err.println("missing max number of edges with -Xmaxdfaedges option; ignoring");
        } else {
          i++;
          DFA.MAX_STATE_TRANSITIONS_FOR_TABLE = Integer.parseInt(args[i]);
        }
      } else if (args[i].equals("-Xconversiontimeout")) {
        if (i + 1 >= args.length) {
          System.err.println("missing max time in ms -Xconversiontimeout option; ignoring");
        } else {
          i++;
          DFA.MAX_TIME_PER_DFA_CREATION = Integer.parseInt(args[i]);
        }
      } else if (args[i].equals("-Xnfastates")) {
        DecisionProbe.verbose = true;
      } else if (args[i].equals("-X")) {
        Xhelp();
      } else {
        if (args[i].charAt(0) != '-') {
          // Must be the grammar file
          addGrammarFile(args[i]);
        }
      }
    }
  }