コード例 #1
0
  @Test
  public void testLookback() throws Exception {
    LexerGrammar g =
        new LexerGrammar(
            "lexer grammar t;\n"
                + "ID : 'a'..'z'+;\n"
                + "INT : '0'..'9'+;\n"
                + "SEMI : ';';\n"
                + "ASSIGN : '=';\n"
                + "PLUS : '+';\n"
                + "MULT : '*';\n"
                + "WS : ' '+;\n");
    // Tokens: 012345678901234567
    // Input:  x = 3 * 0 + 2 * 0;
    CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
    LexerInterpreter lexEngine = g.createLexerInterpreter(input);
    TokenStream tokens = createTokenStream(lexEngine);

    tokens.consume(); // get x into buffer
    Token t = tokens.LT(-1);
    assertEquals("x", t.getText());

    tokens.consume();
    tokens.consume(); // consume '='
    t = tokens.LT(-3);
    assertEquals("x", t.getText());
    t = tokens.LT(-2);
    assertEquals(" ", t.getText());
    t = tokens.LT(-1);
    assertEquals("=", t.getText());
  }
コード例 #2
0
ファイル: XPath.java プロジェクト: antlr/antlr4
 /**
  * Convert word like {@code *} or {@code ID} or {@code expr} to a path element. {@code anywhere}
  * is {@code true} if {@code //} precedes the word.
  */
 protected XPathElement getXPathElement(Token wordToken, boolean anywhere) {
   if (wordToken.getType() == Token.EOF) {
     throw new IllegalArgumentException("Missing path element at end of path");
   }
   String word = wordToken.getText();
   int ttype = parser.getTokenType(word);
   int ruleIndex = parser.getRuleIndex(word);
   switch (wordToken.getType()) {
     case XPathLexer.WILDCARD:
       return anywhere ? new XPathWildcardAnywhereElement() : new XPathWildcardElement();
     case XPathLexer.TOKEN_REF:
     case XPathLexer.STRING:
       if (ttype == Token.INVALID_TYPE) {
         throw new IllegalArgumentException(
             word + " at index " + wordToken.getStartIndex() + " isn't a valid token name");
       }
       return anywhere
           ? new XPathTokenAnywhereElement(word, ttype)
           : new XPathTokenElement(word, ttype);
     default:
       if (ruleIndex == -1) {
         throw new IllegalArgumentException(
             word + " at index " + wordToken.getStartIndex() + " isn't a valid rule name");
       }
       return anywhere
           ? new XPathRuleAnywhereElement(word, ruleIndex)
           : new XPathRuleElement(word, ruleIndex);
   }
 }
コード例 #3
0
  @Override
  public void addToHighlighting(int annexOffset, Token token, String id) {
    int offset = ((CommonToken) token).getStartIndex();
    int length = token.getText().length();
    int column = token.getCharPositionInLine();

    _elementToHighlight.add(new AadlBaLocationReference(annexOffset, offset, length, column, id));
  }
コード例 #4
0
 private void addElement(Token token, SyntaxElementType type) {
   syntaxElements.add(
       SyntaxElement.create(
           token.getText(),
           token.getStartIndex(),
           token.getStopIndex(),
           token.getTokenIndex(),
           type));
   if (debug) {
     SyntaxElement e = syntaxElements.get(syntaxElements.size() - 1);
     System.out.println(String.format("%d-%d %s %s", e.from, e.to, e.type.name(), e.value));
   }
 }
コード例 #5
0
ファイル: StatementSplitter.java プロジェクト: Rokum/presto
 public StatementSplitter(String sql, Set<String> delimiters) {
   TokenSource tokens = getLexer(sql, delimiters);
   ImmutableList.Builder<Statement> list = ImmutableList.builder();
   StringBuilder sb = new StringBuilder();
   while (true) {
     Token token = tokens.nextToken();
     if (token.getType() == Token.EOF) {
       break;
     }
     if (token.getType() == SqlBaseParser.DELIMITER) {
       String statement = sb.toString().trim();
       if (!statement.isEmpty()) {
         list.add(new Statement(statement, token.getText()));
       }
       sb = new StringBuilder();
     } else {
       sb.append(token.getText());
     }
   }
   this.completeStatements = list.build();
   this.partialStatement = sb.toString().trim();
 }
コード例 #6
0
    /**
     * Logs parser errors in Checkstyle manner. Parser can generate error messages. There is special
     * error that parser can generate. It is missed close HTML tag. This case is special because
     * parser prints error like {@code "no viable alternative at input 'b \n *\n'"} and it is not
     * clear that error is about missed close HTML tag. Other error messages are not special and
     * logged simply as "Parse Error...".
     *
     * <p>{@inheritDoc}
     */
    @Override
    public void syntaxError(
        Recognizer<?, ?> recognizer,
        Object offendingSymbol,
        int line,
        int charPositionInLine,
        String msg,
        RecognitionException ex) {
      final int lineNumber = offset + line;
      final Token token = (Token) offendingSymbol;

      if (MSG_JAVADOC_MISSED_HTML_CLOSE.equals(msg)) {
        errorMessage =
            new ParseErrorMessage(
                lineNumber, MSG_JAVADOC_MISSED_HTML_CLOSE, charPositionInLine, token.getText());

        throw new ParseCancellationException(msg);
      } else if (MSG_JAVADOC_WRONG_SINGLETON_TAG.equals(msg)) {
        errorMessage =
            new ParseErrorMessage(
                lineNumber, MSG_JAVADOC_WRONG_SINGLETON_TAG, charPositionInLine, token.getText());

        throw new ParseCancellationException(msg);
      } else {
        final int ruleIndex = ex.getCtx().getRuleIndex();
        final String ruleName = recognizer.getRuleNames()[ruleIndex];
        final String upperCaseRuleName =
            CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, ruleName);

        errorMessage =
            new ParseErrorMessage(
                lineNumber,
                MSG_JAVADOC_PARSE_RULE_ERROR,
                charPositionInLine,
                msg,
                upperCaseRuleName);
      }
    }
コード例 #7
0
ファイル: StatementSplitter.java プロジェクト: Rokum/presto
 public static String squeezeStatement(String sql) {
   TokenSource tokens = getLexer(sql, ImmutableSet.<String>of());
   StringBuilder sb = new StringBuilder();
   while (true) {
     Token token = tokens.nextToken();
     if (token.getType() == Token.EOF) {
       break;
     }
     if (token.getType() == SqlBaseLexer.WS) {
       sb.append(' ');
     } else {
       sb.append(token.getText());
     }
   }
   return sb.toString().trim();
 }
コード例 #8
0
    @Override
    @RuleDependency(
        recognizer = TemplateParser.class,
        rule = TemplateParser.RULE_anonymousTemplateParameters,
        version = 0,
        dependents = Dependents.PARENTS)
    public void enterAnonymousTemplateParameters(AnonymousTemplateParametersContext ctx) {
      if (ctx.names != null) {
        parameterDeclarations.addAll(ctx.names);

        Set<String> currentParameters = parameters.peek();
        for (Token token : ctx.names) {
          currentParameters.add(token.getText());
        }
      }
    }
コード例 #9
0
  public void syntaxError(
      Recognizer<?, ?> recognizer,
      Object offendingSymbol,
      int line,
      int charPositionInLine,
      String msg,
      RecognitionException e) {
    CommonTokenStream tokens = (CommonTokenStream) recognizer.getInputStream();
    String input = tokens.getTokenSource().getInputStream().toString();
    Token token = (Token) offendingSymbol;
    String[] lines = StringUtils.splitPreserveAllTokens(input, '\n');
    String errorLine = lines[line - 1];

    String simpleMessage = "syntax error at or near \"" + token.getText() + "\"";
    throw new LangParserException(token, line, charPositionInLine, simpleMessage, errorLine);
  }
コード例 #10
0
 @Override
 public void exitInst(InstContext ctx) {
   String s = getValue(ctx.getChild(0));
   setValue(ctx, s);
   int a = ctx.getStart().getTokenIndex();
   List<Token> tt = tokens.getHiddenTokensToLeft(a, 1);
   List<Token> ttnl = tokens.getHiddenTokensToLeft(a, 2);
   String ttt = "";
   if (ttnl != null && ttnl.size() > 1) ttt = "\n\n";
   if (tt != null) {
     for (Token t : tt) {
       ttt = ttt + t.getText();
       // System.out.println(t.getType() + " - " + t.getChannel());
     }
     // System.out.println(ttt);
   }
   setValue(ctx, ttt + getValue(ctx));
 }
コード例 #11
0
 @Override
 public int getLen(
     RuleNode parent, Token token, List<Token> hiddenTokensToRight, TokenStream tokenStream) {
   int len = 0;
   for (org.antlr.v4.runtime.Token hiddenToken : hiddenTokensToRight) {
     final String text = hiddenToken.getText();
     if (TokenUtils.isWs(text)) {
       len++;
     } else {
       if (hiddenToken.getCharPositionInLine() > token.getCharPositionInLine()) {
         len++;
       } else {
         break;
       }
     }
   }
   return len;
 }
コード例 #12
0
  /**
   * Turns a token stream into a string, makes for easy debugging of token errors.
   *
   * @param lexer
   * @return
   */
  public static String tokensToString(final Lexer lexer) {
    final StringBuilder build = new StringBuilder();
    Token t;
    final String[] names = lexer.getTokenNames();
    while ((t = lexer.nextToken()) != null) {
      build.append("|");
      build.append(t.getText());
      build.append(" -> ");
      if (t.getType() >= 0) {
        build.append(names[t.getType()]);
      } else if (t.getType() == Token.EOF) {
        build.append("EOF");
        break;
      } else {
        build.append("???");
      }
    }

    return build.toString();
  }
コード例 #13
0
  protected String getTokenDisplayString(Token token) {
    String string;

    if (token == null) {
      string = "[no token]";
    } else {
      String text = token.getText();

      if (text == null) {
        if (token.getType() == Token.EOF) {
          string = "end of text";
        } else {
          string = "[" + token.getType() + "]";
        }
      } else {
        string = quote(text);
      }
    }

    return string;
  }
コード例 #14
0
ファイル: PSymbol.java プロジェクト: dtwelch/resolve-lite
 public PSymbolBuilder qualifier(Token q) {
   return qualifier(q != null ? q.getText() : null);
 }