Пример #1
0
 /**
  * Convert word like {@code *} or {@code ID} or {@code expr} to a path element. {@code anywhere}
  * is {@code true} if {@code //} precedes the word.
  */
 protected XPathElement getXPathElement(Token wordToken, boolean anywhere) {
   if (wordToken.getType() == Token.EOF) {
     throw new IllegalArgumentException("Missing path element at end of path");
   }
   String word = wordToken.getText();
   int ttype = parser.getTokenType(word);
   int ruleIndex = parser.getRuleIndex(word);
   switch (wordToken.getType()) {
     case XPathLexer.WILDCARD:
       return anywhere ? new XPathWildcardAnywhereElement() : new XPathWildcardElement();
     case XPathLexer.TOKEN_REF:
     case XPathLexer.STRING:
       if (ttype == Token.INVALID_TYPE) {
         throw new IllegalArgumentException(
             word + " at index " + wordToken.getStartIndex() + " isn't a valid token name");
       }
       return anywhere
           ? new XPathTokenAnywhereElement(word, ttype)
           : new XPathTokenElement(word, ttype);
     default:
       if (ruleIndex == -1) {
         throw new IllegalArgumentException(
             word + " at index " + wordToken.getStartIndex() + " isn't a valid rule name");
       }
       return anywhere
           ? new XPathRuleAnywhereElement(word, ruleIndex)
           : new XPathRuleElement(word, ruleIndex);
   }
 }
  protected String getErrorMessage(RecognitionException re) {
    String message = "";

    Parser recognizer = (Parser) re.getRecognizer();
    TokenStream tokens = recognizer.getInputStream();

    if (re instanceof NoViableAltException) {
      NoViableAltException e = (NoViableAltException) re;
      Token startToken = e.getStartToken();
      String input =
          (startToken.getType() == Token.EOF)
              ? "end of text"
              : quote(tokens.getText(startToken, e.getOffendingToken()));

      message = "no viable date format found at " + input;
    } else if (re instanceof InputMismatchException) {
      InputMismatchException e = (InputMismatchException) re;
      message =
          "did not expect "
              + getTokenDisplayString(e.getOffendingToken())
              + " while looking for "
              + e.getExpectedTokens().toString(recognizer.getTokenNames());
    } else if (re instanceof FailedPredicateException) {
      FailedPredicateException e = (FailedPredicateException) re;
      String ruleName = recognizer.getRuleNames()[recognizer.getContext().getRuleIndex()];

      message = "failed predicate " + ruleName + ": " + e.getMessage();
    }

    return message;
  }
Пример #3
0
 public static String squeezeStatement(String sql) {
   TokenSource tokens = getLexer(sql, ImmutableSet.<String>of());
   StringBuilder sb = new StringBuilder();
   while (true) {
     Token token = tokens.nextToken();
     if (token.getType() == Token.EOF) {
       break;
     }
     if (token.getType() == SqlBaseLexer.WS) {
       sb.append(' ');
     } else {
       sb.append(token.getText());
     }
   }
   return sb.toString().trim();
 }
  @Test
  public void testCompleteBufferAfterConsuming() throws Exception {
    LexerGrammar g =
        new LexerGrammar(
            "lexer grammar t;\n"
                + "ID : 'a'..'z'+;\n"
                + "INT : '0'..'9'+;\n"
                + "SEMI : ';';\n"
                + "ASSIGN : '=';\n"
                + "PLUS : '+';\n"
                + "MULT : '*';\n"
                + "WS : ' '+;\n");
    // Tokens: 012345678901234567
    // Input:  x = 3 * 0 + 2 * 0;
    CharStream input = new ANTLRInputStream("x = 3 * 0 + 2 * 0;");
    LexerInterpreter lexEngine = g.createLexerInterpreter(input);
    TokenStream tokens = createTokenStream(lexEngine);

    Token t = tokens.LT(1);
    while (t.getType() != Token.EOF) {
      tokens.consume();
      t = tokens.LT(1);
    }

    String result = tokens.getText();
    String expecting = "x = 3 * 0 + 2 * 0;";
    assertEquals(expecting, result);
  }
Пример #5
0
  /**
   * Turns a token stream into a string, makes for easy debugging of token errors.
   *
   * @param lexer
   * @return
   */
  public static String tokensToString(final Lexer lexer) {
    final StringBuilder build = new StringBuilder();
    Token t;
    final String[] names = lexer.getTokenNames();
    while ((t = lexer.nextToken()) != null) {
      build.append("|");
      build.append(t.getText());
      build.append(" -> ");
      if (t.getType() >= 0) {
        build.append(names[t.getType()]);
      } else if (t.getType() == Token.EOF) {
        build.append("EOF");
        break;
      } else {
        build.append("???");
      }
    }

    return build.toString();
  }
  protected String getTokenDisplayString(Token token) {
    String string;

    if (token == null) {
      string = "[no token]";
    } else {
      String text = token.getText();

      if (text == null) {
        if (token.getType() == Token.EOF) {
          string = "end of text";
        } else {
          string = "[" + token.getType() + "]";
        }
      } else {
        string = quote(text);
      }
    }

    return string;
  }
Пример #7
0
 public static boolean isEmptyStatement(String sql) {
   TokenSource tokens = getLexer(sql, ImmutableSet.<String>of());
   while (true) {
     Token token = tokens.nextToken();
     if (token.getType() == Token.EOF) {
       return true;
     }
     if (token.getChannel() != Token.HIDDEN_CHANNEL) {
       return false;
     }
   }
 }
Пример #8
0
 public StatementSplitter(String sql, Set<String> delimiters) {
   TokenSource tokens = getLexer(sql, delimiters);
   ImmutableList.Builder<Statement> list = ImmutableList.builder();
   StringBuilder sb = new StringBuilder();
   while (true) {
     Token token = tokens.nextToken();
     if (token.getType() == Token.EOF) {
       break;
     }
     if (token.getType() == SqlBaseParser.DELIMITER) {
       String statement = sb.toString().trim();
       if (!statement.isEmpty()) {
         list.add(new Statement(statement, token.getText()));
       }
       sb = new StringBuilder();
     } else {
       sb.append(token.getText());
     }
   }
   this.completeStatements = list.build();
   this.partialStatement = sb.toString().trim();
 }
Пример #9
0
    @Override
    protected boolean sync(int i) {
      if (!super.sync(i)) {
        return false;
      }

      Token t = get(i);
      if (hide.contains(t.getType())) {
        ((WritableToken) t).setChannel(Token.HIDDEN_CHANNEL);
      }

      return true;
    }
Пример #10
0
 @Override
 public void createPresentation(TextPresentation presentation, ITypedRegion region) {
   // Use tokens provided by the lexer to highlight keywords, etc...
   // Seems fast enough to skip Eclipse partitioning. Infact, the Eclipse
   // partitioner seems to slow everything down...
   TSLKGrammarLexer lexer = new TSLKGrammarLexer(new ANTLRInputStream(document.get()));
   Token t = null;
   while ((t = lexer.nextToken()).getType() != Token.EOF) {
     if (t.getStartIndex() > region.getOffset() + region.getLength()) break;
     int start = t.getStartIndex();
     int end = t.getStopIndex();
     RGB foreground = null;
     RGB background = null;
     int style = SWT.NORMAL;
     switch (t.getType()) { // TODO: Make keywords customisable
       case TSLKGrammarLexer.WHILE:
       case TSLKGrammarLexer.FOR:
       case TSLKGrammarLexer.FUNC:
       case TSLKGrammarLexer.IF:
       case TSLKGrammarLexer.THEN:
       case TSLKGrammarLexer.DO:
       case TSLKGrammarLexer.END:
         foreground = ColorManager.KEYWORD;
         style = SWT.BOLD;
         break;
       case TSLKGrammarLexer.STRING:
         foreground = ColorManager.STRING;
         break;
       case TSLKGrammarLexer.SLCOMMENT:
         foreground = ColorManager.SINGLE_LINE_COMMENT;
         break;
       case TSLKGrammarLexer.MLCOMMENT:
         foreground = ColorManager.MULTI_LINE_COMMENT;
         break;
       default:
         foreground = ColorManager.DEFAULT;
         break;
     }
     presentation.addStyleRange(
         new StyleRange(
             start,
             end - start + 1,
             colorManager.getColor(foreground),
             colorManager.getColor(background),
             style));
   }
 }
Пример #11
0
  private static int getArgumentCount(final List<Token> tokens) {
    if (tokens.isEmpty()) {
      return 0;
    }

    int count = 0;

    for (Token token : tokens) {
      if (token.getType() == AntlrLexer.ARGUMENT) {
        ++count;
      } else {
        break;
      }
    }

    return count;
  }
Пример #12
0
  public static Interval getSourceInterval(@NonNull ParserRuleContext context) {
    Parameters.notNull("context", context);
    int startIndex = context.start.getStartIndex();
    Token stopSymbol = getStopSymbol(context);
    if (stopSymbol == null) {
      return new Interval(startIndex, startIndex - 1);
    }

    int stopIndex;
    if (stopSymbol.getType() != Token.EOF) {
      stopIndex = stopSymbol.getStopIndex();
    } else {
      TokenSource tokenSource = context.getStart().getTokenSource();
      CharStream inputStream = tokenSource != null ? tokenSource.getInputStream() : null;
      if (inputStream != null) {
        stopIndex = inputStream.size() - 1;
      } else {
        stopIndex = context.start.getStartIndex() - 1;
      }
    }

    stopIndex = Math.max(stopIndex, startIndex - 1);
    return new Interval(startIndex, stopIndex);
  }
Пример #13
0
 private static boolean is(Token token, int type) {
   return token != null && token.getType() == type;
 }
  public List<TaggedPositionRegion<TokenTag<Token>>> getHighlights(int startOffset, int endOffset) {
    List<TaggedPositionRegion<TokenTag<Token>>> tags = new ArrayList<>();
    boolean updateOffsets = true;

    if (endOffset == Integer.MAX_VALUE) {
      endOffset = snapshot.length();
    }

    OffsetRegion span = OffsetRegion.fromBounds(startOffset, endOffset);

    if (failedTimeout) {
      return tags;
    }

    boolean spanExtended = false;

    int extendMultiLineSpanToLine = 0;
    OffsetRegion extendedSpan = span;

    synchronized (lock) {
      OffsetRegion requestedSpan = span;

      ParseRequest<TState> request = adjustParseSpan(span);
      TState startState = request.getState();
      span = request.getRegion();

      CharStream input;
      try {
        input = createInputStream(span);
      } catch (BadLocationException ex) {
        LOGGER.log(Level.WARNING, ex.getMessage(), ex);
        return tags;
      }

      TokenSourceWithStateV4<TState> lexer = createLexer(input, startState);
      lexer.setTokenFactory(new DocumentSnapshotTokenFactory(getEffectiveTokenSource(lexer)));

      Token previousToken = null;
      boolean previousTokenEndsLine = false;

      /* this is held outside the loop because only tokens which end at the end of a line
       * impact its value.
       */
      boolean lineStateChanged = false;

      while (true) {
        // TODO: perform this under a read lock
        Token token = lexer.nextToken();

        // The latter is true for EOF token with span.getEnd() at the end of the document
        boolean inBounds =
            token.getStartIndex() < span.getEnd() || token.getStopIndex() < span.getEnd();

        if (updateOffsets) {
          int startLineCurrent;
          if (token.getType() == Token.EOF) startLineCurrent = snapshot.getLineCount();
          else startLineCurrent = snapshot.findLineNumber(token.getStartIndex());

          // endLinePrevious is the line number the previous token ended on
          int endLinePrevious;
          if (previousToken != null)
            endLinePrevious = snapshot.findLineNumber(previousToken.getStopIndex() + 1);
          else endLinePrevious = snapshot.findLineNumber(span.getStart()) - 1;

          if (startLineCurrent > endLinePrevious + 1
              || (startLineCurrent == endLinePrevious + 1 && !previousTokenEndsLine)) {
            int firstMultilineLine = endLinePrevious;
            if (previousToken == null || previousTokenEndsLine) firstMultilineLine++;

            for (int i = firstMultilineLine; i < startLineCurrent; i++) {
              if (!lineStates.get(i).getIsMultiLineToken() || lineStateChanged)
                extendMultiLineSpanToLine = i + 1;

              if (inBounds) setLineState(i, lineStates.get(i).createMultiLineState());
            }
          }
        }

        if (token.getType() == Token.EOF) break;

        if (updateOffsets && isMultiLineToken(lexer, token)) {
          int startLine = snapshot.findLineNumber(token.getStartIndex());
          int stopLine = snapshot.findLineNumber(token.getStopIndex() + 1);
          for (int i = startLine; i < stopLine; i++) {
            if (!lineStates.get(i).getIsMultiLineToken()) extendMultiLineSpanToLine = i + 1;

            if (inBounds) setLineState(i, lineStates.get(i).createMultiLineState());
          }
        }

        boolean tokenEndsLine = tokenEndsAtEndOfLine(lexer, token);
        if (updateOffsets && tokenEndsLine) {
          TState stateAtEndOfLine = lexer.getCurrentState();
          int line = snapshot.findLineNumber(token.getStopIndex() + 1);
          lineStateChanged =
              lineStates.get(line).getIsMultiLineToken()
                  || !lineStates.get(line).equals(stateAtEndOfLine);

          // even if the state didn't change, we call SetLineState to make sure the
          // _first/_lastChangedLine values get updated.
          // have to check bounds for this one or the editor might not get an update (if the token
          // ends a line)
          if (updateOffsets && inBounds) setLineState(line, stateAtEndOfLine);

          if (lineStateChanged) {
            if (line < snapshot.getLineCount() - 1) {
              /* update the span's end position or the line state change won't be reflected
               * in the editor
               */
              int endPosition =
                  line < snapshot.getLineCount() - 2
                      ? snapshot.findLineFromLineNumber(line + 2).getStart().getOffset()
                      : snapshot.length();
              if (endPosition > extendedSpan.getEnd()) {
                spanExtended = true;
                extendedSpan = OffsetRegion.fromBounds(extendedSpan.getStart(), endPosition);
              }
            }
          }
        }

        if (token.getStartIndex() >= span.getEnd()) {
          break;
        }

        previousToken = token;
        previousTokenEndsLine = tokenEndsLine;

        if (token.getStopIndex() < requestedSpan.getStart()) {
          continue;
        }

        Collection<TaggedPositionRegion<TokenTag<Token>>> tokenClassificationSpans =
            getTagsForToken(token);
        if (tokenClassificationSpans != null) {
          tags.addAll(tokenClassificationSpans);
        }

        if (!inBounds) {
          break;
        }
      }
    }

    if (updateOffsets && extendMultiLineSpanToLine > 0) {
      int endPosition =
          extendMultiLineSpanToLine < snapshot.getLineCount() - 1
              ? snapshot
                  .findLineFromLineNumber(extendMultiLineSpanToLine + 1)
                  .getStart()
                  .getOffset()
              : snapshot.length();
      if (endPosition > extendedSpan.getEnd()) {
        spanExtended = true;
        extendedSpan = OffsetRegion.fromBounds(extendedSpan.getStart(), endPosition);
      }
    }

    if (updateOffsets && spanExtended) {
      /* Subtract 1 from each of these because the spans include the line break on their last
       * line, forcing it to appear as the first position on the following line.
       */
      assert extendedSpan.getEnd() > span.getEnd();
      int firstLine = snapshot.findLineNumber(span.getEnd());
      int lastLine = snapshot.findLineNumber(extendedSpan.getEnd()) - 1;
      // when considering the last line of a document, span and extendedSpan may end on the same
      // line
      forceRehighlightLines(firstLine, Math.max(firstLine, lastLine));
    }

    return tags;
  }
Пример #15
0
  public XPathElement[] split(String path) {
    ANTLRInputStream in;
    try {
      in = new ANTLRInputStream(new StringReader(path));
    } catch (IOException ioe) {
      throw new IllegalArgumentException("Could not read path: " + path, ioe);
    }
    XPathLexer lexer =
        new XPathLexer(in) {
          @Override
          public void recover(LexerNoViableAltException e) {
            throw e;
          }
        };
    lexer.removeErrorListeners();
    lexer.addErrorListener(new XPathLexerErrorListener());
    CommonTokenStream tokenStream = new CommonTokenStream(lexer);
    try {
      tokenStream.fill();
    } catch (LexerNoViableAltException e) {
      int pos = lexer.getCharPositionInLine();
      String msg = "Invalid tokens or characters at index " + pos + " in path '" + path + "'";
      throw new IllegalArgumentException(msg, e);
    }

    List<Token> tokens = tokenStream.getTokens();
    //		System.out.println("path="+path+"=>"+tokens);
    List<XPathElement> elements = new ArrayList<XPathElement>();
    int n = tokens.size();
    int i = 0;
    loop:
    while (i < n) {
      Token el = tokens.get(i);
      Token next = null;
      switch (el.getType()) {
        case XPathLexer.ROOT:
        case XPathLexer.ANYWHERE:
          boolean anywhere = el.getType() == XPathLexer.ANYWHERE;
          i++;
          next = tokens.get(i);
          boolean invert = next.getType() == XPathLexer.BANG;
          if (invert) {
            i++;
            next = tokens.get(i);
          }
          XPathElement pathElement = getXPathElement(next, anywhere);
          pathElement.invert = invert;
          elements.add(pathElement);
          i++;
          break;

        case XPathLexer.TOKEN_REF:
        case XPathLexer.RULE_REF:
        case XPathLexer.WILDCARD:
          elements.add(getXPathElement(el, false));
          i++;
          break;

        case Token.EOF:
          break loop;

        default:
          throw new IllegalArgumentException("Unknowth path element " + el);
      }
    }
    return elements.toArray(new XPathElement[0]);
  }