/** * Convert word like {@code *} or {@code ID} or {@code expr} to a path element. {@code anywhere} * is {@code true} if {@code //} precedes the word. */ protected XPathElement getXPathElement(Token wordToken, boolean anywhere) { if (wordToken.getType() == Token.EOF) { throw new IllegalArgumentException("Missing path element at end of path"); } String word = wordToken.getText(); int ttype = parser.getTokenType(word); int ruleIndex = parser.getRuleIndex(word); switch (wordToken.getType()) { case XPathLexer.WILDCARD: return anywhere ? new XPathWildcardAnywhereElement() : new XPathWildcardElement(); case XPathLexer.TOKEN_REF: case XPathLexer.STRING: if (ttype == Token.INVALID_TYPE) { throw new IllegalArgumentException( word + " at index " + wordToken.getStartIndex() + " isn't a valid token name"); } return anywhere ? new XPathTokenAnywhereElement(word, ttype) : new XPathTokenElement(word, ttype); default: if (ruleIndex == -1) { throw new IllegalArgumentException( word + " at index " + wordToken.getStartIndex() + " isn't a valid rule name"); } return anywhere ? new XPathRuleAnywhereElement(word, ruleIndex) : new XPathRuleElement(word, ruleIndex); } }
public static Interval getSourceInterval(@NonNull ParseTree context) { Parameters.notNull("context", context); if (context instanceof TerminalNode) { TerminalNode terminalNode = (TerminalNode) context; Token token = terminalNode.getSymbol(); return new Interval(token.getStartIndex(), token.getStopIndex()); } else if (context instanceof RuleNode) { RuleNode ruleNode = (RuleNode) context; RuleContext ruleContext = ruleNode.getRuleContext(); if (ruleContext instanceof ParserRuleContext) { return getSourceInterval((ParserRuleContext) ruleContext); } else { Token startSymbol = getStartSymbol(context); Token stopSymbol = getStopSymbol(context); if (startSymbol == null || stopSymbol == null) { return Interval.INVALID; } return new Interval(startSymbol.getStartIndex(), stopSymbol.getStopIndex()); } } else { return Interval.INVALID; } }
public static void displayErrorOrWarning( File compilationUnit, String errorOrWarning, CommonTokenStream tokens, Token offendingTokenStart, Token offendingTokenEnd, String msg) { System.err.println( errorOrWarning + compilationUnit.getAbsolutePath() + ", line " + offendingTokenStart.getLine() + ":" + offendingTokenStart.getCharPositionInLine() + " " + msg); String input = tokens.getTokenSource().getInputStream().toString(); String[] lines = input.split("\n"); String errorLine = lines[offendingTokenStart.getLine() - 1]; System.err.println(errorLine); for (int i = 0; i < offendingTokenStart.getCharPositionInLine(); i++) if (errorLine.substring(i, i + 1).equals("\t")) System.err.print("\t"); else System.err.print(" "); int start = offendingTokenStart.getStartIndex(); int stop = offendingTokenEnd.getStopIndex(); if (start >= 0 && stop >= 0) for (int i = start; i <= stop; i++) System.err.print("^"); System.err.println(); }
@Override public void createPresentation(TextPresentation presentation, ITypedRegion region) { // Use tokens provided by the lexer to highlight keywords, etc... // Seems fast enough to skip Eclipse partitioning. Infact, the Eclipse // partitioner seems to slow everything down... TSLKGrammarLexer lexer = new TSLKGrammarLexer(new ANTLRInputStream(document.get())); Token t = null; while ((t = lexer.nextToken()).getType() != Token.EOF) { if (t.getStartIndex() > region.getOffset() + region.getLength()) break; int start = t.getStartIndex(); int end = t.getStopIndex(); RGB foreground = null; RGB background = null; int style = SWT.NORMAL; switch (t.getType()) { // TODO: Make keywords customisable case TSLKGrammarLexer.WHILE: case TSLKGrammarLexer.FOR: case TSLKGrammarLexer.FUNC: case TSLKGrammarLexer.IF: case TSLKGrammarLexer.THEN: case TSLKGrammarLexer.DO: case TSLKGrammarLexer.END: foreground = ColorManager.KEYWORD; style = SWT.BOLD; break; case TSLKGrammarLexer.STRING: foreground = ColorManager.STRING; break; case TSLKGrammarLexer.SLCOMMENT: foreground = ColorManager.SINGLE_LINE_COMMENT; break; case TSLKGrammarLexer.MLCOMMENT: foreground = ColorManager.MULTI_LINE_COMMENT; break; default: foreground = ColorManager.DEFAULT; break; } presentation.addStyleRange( new StyleRange( start, end - start + 1, colorManager.getColor(foreground), colorManager.getColor(background), style)); } }
@CheckForNull public static TerminalNode findTerminalNode(@NonNull ParseTree node, Token symbol) { if (symbol == null) { return null; } if (node instanceof TerminalNode) { TerminalNode terminalNode = (TerminalNode) node; if (Utils.equals(terminalNode.getSymbol(), symbol)) { return terminalNode; } return null; } for (int i = 0; i < node.getChildCount(); i++) { ParseTree child = node.getChild(i); TerminalNode stopNode = ParseTrees.getStopNode(child); if (stopNode == null) { continue; } Token stopSymbol = stopNode.getSymbol(); if (stopSymbol.getStopIndex() < symbol.getStartIndex()) { continue; } TerminalNode startNode = ParseTrees.getStartNode(child); assert startNode != null; stopSymbol = startNode.getSymbol(); if (stopSymbol == null || stopSymbol.getStartIndex() > symbol.getStopIndex()) { break; } if (stopSymbol.equals(symbol)) { return startNode; } TerminalNode terminalNode = findTerminalNode(child, symbol); if (terminalNode != null) { return terminalNode; } } return null; }
protected boolean isMultiLineToken(TokenSourceWithStateV4<TState> lexer, Token token) { /*if (lexer != null && lexer.getLine() > token.getLine()) { return true; }*/ int startLine = snapshot.findLineNumber(token.getStartIndex()); int stopLine = snapshot.findLineNumber(token.getStopIndex() + 1); return startLine != stopLine; }
private void addElement(Token token, SyntaxElementType type) { syntaxElements.add( SyntaxElement.create( token.getText(), token.getStartIndex(), token.getStopIndex(), token.getTokenIndex(), type)); if (debug) { SyntaxElement e = syntaxElements.get(syntaxElements.size() - 1); System.out.println(String.format("%d-%d %s %s", e.from, e.to, e.type.name(), e.value)); } }
protected Collection<TaggedPositionRegion<TokenTag<Token>>> getTagsForToken(Token token) { TokenTag<Token> tag = highlightToken(token); if (tag != null) { return Collections.<TaggedPositionRegion<TokenTag<Token>>>singleton( new BaseTaggedPositionRegion<>( new SnapshotPositionRegion( snapshot, OffsetRegion.fromBounds(token.getStartIndex(), token.getStopIndex() + 1)), tag)); } return Collections.emptyList(); }
/** * Gets whether or not {@code token} is the first non-whitespace symbol on a line. * * @param token The token to test. * @return {@code true} if the only characters appearing before {@code token} on the same line are * whitespace characters according to {@link Character#isWhitespace}. */ public static boolean elementStartsLine(Token token) { String beginningOfLineText = token .getTokenSource() .getInputStream() .getText( new Interval( token.getStartIndex() - token.getCharPositionInLine(), token.getStartIndex() - 1)); for (int i = 0; i < beginningOfLineText.length(); i++) { if (!Character.isWhitespace(beginningOfLineText.charAt(i))) { return false; } } return true; }
protected boolean tokenSkippedLines(int endLinePrevious, Token token) { int startLineCurrent = snapshot.findLineNumber(token.getStartIndex()); return startLineCurrent > endLinePrevious + 1; }
public List<TaggedPositionRegion<TokenTag<Token>>> getHighlights(int startOffset, int endOffset) { List<TaggedPositionRegion<TokenTag<Token>>> tags = new ArrayList<>(); boolean updateOffsets = true; if (endOffset == Integer.MAX_VALUE) { endOffset = snapshot.length(); } OffsetRegion span = OffsetRegion.fromBounds(startOffset, endOffset); if (failedTimeout) { return tags; } boolean spanExtended = false; int extendMultiLineSpanToLine = 0; OffsetRegion extendedSpan = span; synchronized (lock) { OffsetRegion requestedSpan = span; ParseRequest<TState> request = adjustParseSpan(span); TState startState = request.getState(); span = request.getRegion(); CharStream input; try { input = createInputStream(span); } catch (BadLocationException ex) { LOGGER.log(Level.WARNING, ex.getMessage(), ex); return tags; } TokenSourceWithStateV4<TState> lexer = createLexer(input, startState); lexer.setTokenFactory(new DocumentSnapshotTokenFactory(getEffectiveTokenSource(lexer))); Token previousToken = null; boolean previousTokenEndsLine = false; /* this is held outside the loop because only tokens which end at the end of a line * impact its value. */ boolean lineStateChanged = false; while (true) { // TODO: perform this under a read lock Token token = lexer.nextToken(); // The latter is true for EOF token with span.getEnd() at the end of the document boolean inBounds = token.getStartIndex() < span.getEnd() || token.getStopIndex() < span.getEnd(); if (updateOffsets) { int startLineCurrent; if (token.getType() == Token.EOF) startLineCurrent = snapshot.getLineCount(); else startLineCurrent = snapshot.findLineNumber(token.getStartIndex()); // endLinePrevious is the line number the previous token ended on int endLinePrevious; if (previousToken != null) endLinePrevious = snapshot.findLineNumber(previousToken.getStopIndex() + 1); else endLinePrevious = snapshot.findLineNumber(span.getStart()) - 1; if (startLineCurrent > endLinePrevious + 1 || (startLineCurrent == endLinePrevious + 1 && !previousTokenEndsLine)) { int firstMultilineLine = endLinePrevious; if (previousToken == null || previousTokenEndsLine) firstMultilineLine++; for (int i = firstMultilineLine; i < startLineCurrent; i++) { if (!lineStates.get(i).getIsMultiLineToken() || lineStateChanged) extendMultiLineSpanToLine = i + 1; if (inBounds) setLineState(i, lineStates.get(i).createMultiLineState()); } } } if (token.getType() == Token.EOF) break; if (updateOffsets && isMultiLineToken(lexer, token)) { int startLine = snapshot.findLineNumber(token.getStartIndex()); int stopLine = snapshot.findLineNumber(token.getStopIndex() + 1); for (int i = startLine; i < stopLine; i++) { if (!lineStates.get(i).getIsMultiLineToken()) extendMultiLineSpanToLine = i + 1; if (inBounds) setLineState(i, lineStates.get(i).createMultiLineState()); } } boolean tokenEndsLine = tokenEndsAtEndOfLine(lexer, token); if (updateOffsets && tokenEndsLine) { TState stateAtEndOfLine = lexer.getCurrentState(); int line = snapshot.findLineNumber(token.getStopIndex() + 1); lineStateChanged = lineStates.get(line).getIsMultiLineToken() || !lineStates.get(line).equals(stateAtEndOfLine); // even if the state didn't change, we call SetLineState to make sure the // _first/_lastChangedLine values get updated. // have to check bounds for this one or the editor might not get an update (if the token // ends a line) if (updateOffsets && inBounds) setLineState(line, stateAtEndOfLine); if (lineStateChanged) { if (line < snapshot.getLineCount() - 1) { /* update the span's end position or the line state change won't be reflected * in the editor */ int endPosition = line < snapshot.getLineCount() - 2 ? snapshot.findLineFromLineNumber(line + 2).getStart().getOffset() : snapshot.length(); if (endPosition > extendedSpan.getEnd()) { spanExtended = true; extendedSpan = OffsetRegion.fromBounds(extendedSpan.getStart(), endPosition); } } } } if (token.getStartIndex() >= span.getEnd()) { break; } previousToken = token; previousTokenEndsLine = tokenEndsLine; if (token.getStopIndex() < requestedSpan.getStart()) { continue; } Collection<TaggedPositionRegion<TokenTag<Token>>> tokenClassificationSpans = getTagsForToken(token); if (tokenClassificationSpans != null) { tags.addAll(tokenClassificationSpans); } if (!inBounds) { break; } } } if (updateOffsets && extendMultiLineSpanToLine > 0) { int endPosition = extendMultiLineSpanToLine < snapshot.getLineCount() - 1 ? snapshot .findLineFromLineNumber(extendMultiLineSpanToLine + 1) .getStart() .getOffset() : snapshot.length(); if (endPosition > extendedSpan.getEnd()) { spanExtended = true; extendedSpan = OffsetRegion.fromBounds(extendedSpan.getStart(), endPosition); } } if (updateOffsets && spanExtended) { /* Subtract 1 from each of these because the spans include the line break on their last * line, forcing it to appear as the first position on the following line. */ assert extendedSpan.getEnd() > span.getEnd(); int firstLine = snapshot.findLineNumber(span.getEnd()); int lastLine = snapshot.findLineNumber(extendedSpan.getEnd()) - 1; // when considering the last line of a document, span and extendedSpan may end on the same // line forceRehighlightLines(firstLine, Math.max(firstLine, lastLine)); } return tags; }