public final void arrayVals() throws RecognitionException, TokenStreamException {

    Token f = null;
    Token r = null;

    f = LT(1);
    match(STRING_LITERAL);
    if (inputState.guessing == 0) {
      first(f.getText());
    }
    {
      _loop14:
      do {
        if ((LA(1) == COMMA)) {
          match(COMMA);
          r = LT(1);
          match(STRING_LITERAL);
          if (inputState.guessing == 0) {
            rest(r.getText());
          }
        } else {
          break _loop14;
        }

      } while (true);
    }
  }
示例#2
0
  /**
   * Create block comment from token.
   *
   * @param token Token object.
   * @return DetailAST with BLOCK_COMMENT type.
   */
  private static DetailAST createBlockCommentNode(Token token) {
    final DetailAST blockComment = new DetailAST();
    blockComment.initialize(TokenTypes.BLOCK_COMMENT_BEGIN, "/*");

    // column counting begins from 0
    blockComment.setColumnNo(token.getColumn() - 1);
    blockComment.setLineNo(token.getLine());

    final DetailAST blockCommentContent = new DetailAST();
    blockCommentContent.initialize(token);
    blockCommentContent.setType(TokenTypes.COMMENT_CONTENT);

    // column counting begins from 0
    // plus length of '/*'
    blockCommentContent.setColumnNo(token.getColumn() - 1 + 2);
    blockCommentContent.setLineNo(token.getLine());
    blockCommentContent.setText(token.getText());

    final DetailAST blockCommentClose = new DetailAST();
    blockCommentClose.initialize(TokenTypes.BLOCK_COMMENT_END, "*/");

    final Entry<Integer, Integer> linesColumns =
        countLinesColumns(token.getText(), token.getLine(), token.getColumn());
    blockCommentClose.setLineNo(linesColumns.getKey());
    blockCommentClose.setColumnNo(linesColumns.getValue());

    blockComment.addChild(blockCommentContent);
    blockComment.addChild(blockCommentClose);
    return blockComment;
  }
  public final void keyDef() throws RecognitionException, TokenStreamException {

    Token s = null;
    Token i = null;

    {
      switch (LA(1)) {
        case STRING_LITERAL:
          {
            s = LT(1);
            match(STRING_LITERAL);
            if (inputState.guessing == 0) {
              key(s.getText());
            }
            break;
          }
        case IDENT:
          {
            i = LT(1);
            match(IDENT);
            if (inputState.guessing == 0) {
              key(i.getText());
            }
            break;
          }
        default:
          {
            throw new NoViableAltException(LT(1), getFilename());
          }
      }
    }
    match(DOUBLEDOT);
  }
示例#4
0
 /**
  * Overrides the base behavior to retry keywords as identifiers.
  *
  * @param token The token.
  * @param ex The recognition exception.
  * @return AST - The new AST.
  * @throws antlr.RecognitionException if the substitution was not possible.
  * @throws antlr.TokenStreamException if the substitution was not possible.
  */
 public AST handleIdentifierError(Token token, RecognitionException ex)
     throws RecognitionException, TokenStreamException {
   // If the token can tell us if it could be an identifier...
   if (token instanceof HqlToken) {
     HqlToken hqlToken = (HqlToken) token;
     // ... and the token could be an identifer and the error is
     // a mismatched token error ...
     if (hqlToken.isPossibleID() && (ex instanceof MismatchedTokenException)) {
       MismatchedTokenException mte = (MismatchedTokenException) ex;
       // ... and the expected token type was an identifier, then:
       if (mte.expecting == HqlTokenTypes.IDENT) {
         // Use the token as an identifier.
         reportWarning(
             "Keyword  '"
                 + token.getText()
                 + "' is being interpreted as an identifier due to: "
                 + mte.getMessage());
         // Add the token to the AST.
         ASTPair currentAST = new ASTPair();
         token.setType(HqlTokenTypes.WEIRD_IDENT);
         astFactory.addASTChild(currentAST, astFactory.create(token));
         consume();
         AST identifierAST = currentAST.root;
         return identifierAST;
       }
     } // if
   } // if
   // Otherwise, handle the error normally.
   return super.handleIdentifierError(token, ex);
 }
 public String getDocumentation() {
   String result = "";
   Token t = getHiddenBefore();
   if (t != null && t.getType() == DataScriptParserTokenTypes.DOC) {
     result = t.getText();
   }
   return result;
 }
示例#6
0
  public final String htmlDocument() throws RecognitionException, TokenStreamException {
    String charset;

    Token token1 = null;
    Token token2 = null;
    charset = null;

    try { // for error handling
      switch (LA(1)) {
        case META_CONTENT_TYPE:
          {
            {
              token1 = LT(1);
              match(META_CONTENT_TYPE);
            }
            charset = token1.getText();
            break;
          }
        case XML_ENCODING_DECL:
          {
            {
              token2 = LT(1);
              match(XML_ENCODING_DECL);
            }
            charset = token2.getText();
            break;
          }
        case EOF:
          {
            charset = null;
            break;
          }
        default:
          {
            throw new NoViableAltException(LT(1), getFilename());
          }
      }
    } catch (RecognitionException ex) {
      reportError(ex);
      consume();
      consumeUntil(_tokenSet_0);
    }
    return charset;
  }
  public final void valueDef() throws RecognitionException, TokenStreamException {

    Token s = null;

    s = LT(1);
    match(STRING_LITERAL);
    if (inputState.guessing == 0) {
      value(s.getText());
    }
  }
示例#8
0
文件: Main.java 项目: yurius-r/decaf
  public static void main(String[] args) {
    try {
      CLI.parse(args, new String[0]);

      InputStream inputStream =
          args.length == 0 ? System.in : new java.io.FileInputStream(CLI.infile);

      if (CLI.target == CLI.SCAN) {
        DecafScanner lexer = new DecafScanner(new DataInputStream(inputStream));
        Token token;
        boolean done = false;
        while (!done) {
          try {
            for (token = lexer.nextToken();
                token.getType() != DecafParserTokenTypes.EOF;
                token = lexer.nextToken()) {
              String type = "";
              String text = token.getText();

              switch (token.getType()) {
                case DecafScannerTokenTypes.ID:
                  type = " IDENTIFIER";
                  break;
              }
              System.out.println(token.getLine() + type + " " + text);
            }
            done = true;
          } catch (Exception e) {
            // print the error:
            System.out.println(CLI.infile + " " + e);
            lexer.consume();
          }
        }
      } else if (CLI.target == CLI.PARSE || CLI.target == CLI.DEFAULT) {
        DecafScanner lexer = new DecafScanner(new DataInputStream(inputStream));
        DecafParser parser = new DecafParser(lexer);
        parser.program();
      }

    } catch (Exception e) {
      // print the error:
      System.out.println(CLI.infile + " " + e);
    }
  }
示例#9
0
  public final String record() throws RecognitionException, TokenStreamException {
    String rec;

    Token r = null;

    rec = null;

    try { // for error handling
      {
        {
          r = LT(1);
          match(RECORD);

          rec = r.getText();
        }
        {
          switch (LA(1)) {
            case COMMA:
              {
                match(COMMA);
                break;
              }
            case EOF:
            case NEWLINE:
            case RECORD:
              {
                break;
              }
            default:
              {
                throw new NoViableAltException(LT(1), getFilename());
              }
          }
        }
      }
    } catch (RecognitionException ex) {
      reportError(ex);
      recover(ex, _tokenSet_2);
    }
    return rec;
  }
示例#10
0
  public final void mSTRING(boolean _createToken)
      throws RecognitionException, CharStreamException, TokenStreamException {
    int _ttype;
    Token _token = null;
    int _begin = text.length();
    _ttype = STRING;
    int _saveIndex;
    Token escaped = null;
    char normal = '\0';
    StringBuilder lBuf = new StringBuilder();

    match('"');
    {
      _loop26:
      do {
        if ((LA(1) == '\\')) {
          mESC(true);
          escaped = _returnToken;
          lBuf.append(escaped.getText());
        } else if ((_tokenSet_1.member(LA(1)))) {
          {
            normal = LA(1);
            match(_tokenSet_1);
          }
          lBuf.append(normal);
        } else {
          break _loop26;
        }

      } while (true);
    }
    match('"');
    text.setLength(_begin);
    text.append(lBuf.toString());
    if (_createToken && _token == null && _ttype != Token.SKIP) {
      _token = makeToken(_ttype);
      _token.setText(new String(text.getBuffer(), _begin, text.length() - _begin));
    }
    _returnToken = _token;
  }
示例#11
0
  /**
   * Create single-line comment from token.
   *
   * @param token Token object.
   * @return DetailAST with SINGLE_LINE_COMMENT type.
   */
  private static DetailAST createSlCommentNode(Token token) {
    final DetailAST slComment = new DetailAST();
    slComment.setType(TokenTypes.SINGLE_LINE_COMMENT);
    slComment.setText("//");

    // column counting begins from 0
    slComment.setColumnNo(token.getColumn() - 1);
    slComment.setLineNo(token.getLine());

    final DetailAST slCommentContent = new DetailAST();
    slCommentContent.initialize(token);
    slCommentContent.setType(TokenTypes.COMMENT_CONTENT);

    // column counting begins from 0
    // plus length of '//'
    slCommentContent.setColumnNo(token.getColumn() - 1 + 2);
    slCommentContent.setLineNo(token.getLine());
    slCommentContent.setText(token.getText());

    slComment.addChild(slCommentContent);
    return slComment;
  }
示例#12
0
  protected final void mESC(boolean _createToken)
      throws RecognitionException, CharStreamException, TokenStreamException {
    int _ttype;
    Token _token = null;
    int _begin = text.length();
    _ttype = ESC;
    int _saveIndex;
    Token i = null;
    Token j = null;
    Token k = null;
    Token l = null;

    match('\\');
    {
      switch (LA(1)) {
        case 'n':
          {
            match('n');
            text.setLength(_begin);
            text.append("\n");
            break;
          }
        case 'r':
          {
            match('r');
            text.setLength(_begin);
            text.append("\r");
            break;
          }
        case 't':
          {
            match('t');
            text.setLength(_begin);
            text.append("\t");
            break;
          }
        case 'b':
          {
            match('b');
            text.setLength(_begin);
            text.append("\b");
            break;
          }
        case 'f':
          {
            match('f');
            text.setLength(_begin);
            text.append("\f");
            break;
          }
        case '"':
          {
            match('"');
            text.setLength(_begin);
            text.append("\"");
            break;
          }
        case '\'':
          {
            match('\'');
            text.setLength(_begin);
            text.append("\'");
            break;
          }
        case '/':
          {
            match('/');
            text.setLength(_begin);
            text.append("/");
            break;
          }
        case '\\':
          {
            match('\\');
            text.setLength(_begin);
            text.append("\\");
            break;
          }
        case 'u':
          {
            {
              int _cnt30 = 0;
              _loop30:
              do {
                if ((LA(1) == 'u')) {
                  match('u');
                } else {
                  if (_cnt30 >= 1) {
                    break _loop30;
                  } else {
                    throw new NoViableAltForCharException(
                        (char) LA(1), getFilename(), getLine(), getColumn());
                  }
                }

                _cnt30++;
              } while (true);
            }
            mHEX_DIGIT(true);
            i = _returnToken;
            mHEX_DIGIT(true);
            j = _returnToken;
            mHEX_DIGIT(true);
            k = _returnToken;
            mHEX_DIGIT(true);
            l = _returnToken;
            text.setLength(_begin);
            text.append(ParserUtil.hexToChar(i.getText(), j.getText(), k.getText(), l.getText()));
            break;
          }
        default:
          {
            throw new NoViableAltForCharException(
                (char) LA(1), getFilename(), getLine(), getColumn());
          }
      }
    }
    if (_createToken && _token == null && _ttype != Token.SKIP) {
      _token = makeToken(_ttype);
      _token.setText(new String(text.getBuffer(), _begin, text.length() - _begin));
    }
    _returnToken = _token;
  }
示例#13
0
  public static void main(String[] args) {
    try {
      CLI.parse(args, new String[0]);

      InputStream inputStream =
          args.length == 0 ? System.in : new java.io.FileInputStream(CLI.infile);

      if (CLI.target == CLI.SCAN) {
        DecafScanner lexer = new DecafScanner(new DataInputStream(inputStream));
        Token token;
        boolean done = false;
        while (!done) {
          try {
            for (token = lexer.nextToken();
                token.getType() != DecafParserTokenTypes.EOF;
                token = lexer.nextToken()) {
              String type = "";
              String text = token.getText();

              switch (token.getType()) {
                case DecafScannerTokenTypes.ID:
                  type = " IDENTIFIER";
                  break;
                case DecafScannerTokenTypes.CHAR:
                  type = " CHARLITERAL";
                  break;
                case DecafScannerTokenTypes.TRUE:
                case DecafScannerTokenTypes.FALSE:
                  type = " BOOLEANLITERAL";
                  break;
                case DecafScannerTokenTypes.HEX:
                case DecafScannerTokenTypes.DECIMAL:
                  type = " INTLITERAL";
                  break;
                case DecafScannerTokenTypes.STRING:
                  type = " STRINGLITERAL";
                  break;
              }
              System.out.println(token.getLine() + type + " " + text);
            }
            done = true;
          } catch (Exception e) {
            // print the error:
            System.out.println(CLI.infile + " " + e);
            lexer.consume();
          }
        }
      } else if (CLI.target == CLI.PARSE || CLI.target == CLI.DEFAULT) {
        DecafScanner lexer = new DecafScanner(new DataInputStream(inputStream));
        DecafParser parser = new DecafParser(lexer, CLI.debug);
        // DecafParser parser = new DecafParser (lexer);
        parser.program();
      } else if (CLI.target == CLI.INTER) {
        DecafScanner lexer = new DecafScanner(new DataInputStream(inputStream));
        DecafParser parser = new DecafParser(lexer, CLI.debug);
        parser.program();
        IrNode irRoot = parser.getIrTree();
        SemanticChecker checker = new SemanticChecker(CLI.infile, CLI.debug);
        if (CLI.debug) System.out.println("--- checking -----");
        checker.checkProgram((IrClassDecl) irRoot);
      } else if (CLI.target == CLI.LOWIR) {
        DecafScanner lexer = new DecafScanner(new DataInputStream(inputStream));
        DecafParser parser = new DecafParser(lexer, CLI.debug);
        parser.program();
        IrNode irRoot = parser.getIrTree();
        SemanticChecker checker = new SemanticChecker(CLI.infile, CLI.debug);
        if (CLI.debug) System.out.println("--- checking -----");
        checker.checkProgram((IrClassDecl) irRoot);
        CodeGen codegen = new CodeGen(irRoot, CLI.debug);
        codegen.genLowIr();
        codegen.printLowIr();
      }
    } catch (Exception e) {
      // print the error:
      System.out.println(CLI.infile + " " + e);
    }
  }