private Token<PHPTokenId> getPhpToken(int offset) { TokenSequence<PHPTokenId> phpts = ts.embeddedJoined(PHPTokenId.languageInPHP()); assertNotNull(phpts); phpts.move(offset); assertTrue(phpts.moveNext()); return phpts.token(); }
private Token<TplTokenId> getTplToken(int offset) { TokenSequence<TplTokenId> htmlts = ts.embeddedJoined(TplTokenId.language()); assertNotNull(htmlts); htmlts.move(offset); assertTrue(htmlts.moveNext()); return htmlts.token(); }
@Override public List<Embedding> getEmbeddings() { // Initializes template counters. TemplateResolver.init(); // TODO: neprochazet celou sekvenci (ale par radku pred a po caret) // jestli je to vubec mozny... TokenSequence<LatteTopTokenId> sequence = LexUtils.getTopSequence(getSnapshot().getText().toString()); sequence.moveStart(); LatteResolver latteResolver = new LatteResolver(this); HtmlPhpResolver htmlPhpResolver = new HtmlPhpResolver(this); while (sequence.moveNext()) { Token t = sequence.token(); if (t.id() == LatteTopTokenId.LATTE) { latteResolver.solve(t, sequence); // deals with all latte macros SyntaxUtils.findArrayForHint(getSnapshot().getSource().getDocument(false), sequence); } else { htmlPhpResolver.solve(t, sequence); } } return super.getEmbeddings(); }
@Override protected void inside(VisageSequenceExplicit t) throws IOException { if (LOGGABLE) log("inside VisageSequenceExplicit " + t + " offset == " + offset); // NOI18N TokenSequence<VisageTokenId> last = findLastNonWhitespaceToken((int) sourcePositions.getStartPosition(root, t), offset); if (LOGGABLE) log(" last(1) == " + (last == null ? "null" : last.token().id())); // NOI18N localResult(getSmartType(t)); addValueKeywords(); }
private static TokenSequence<SQLTokenId> getTokenSequence(String sql) throws BadLocationException { Document doc = new ModificationTextDocument(); doc.insertString(0, sql, null); doc.putProperty(Language.class, SQLTokenId.language()); TokenHierarchy<?> hi = TokenHierarchy.get(doc); TokenSequence<SQLTokenId> seq = hi.tokenSequence(SQLTokenId.language()); seq.moveStart(); return seq; }
private static void assertTokens(TokenSequence<SQLTokenId> seq, SQLTokenId... ids) { if (ids == null) { ids = new SQLTokenId[0]; } assertEquals("Wrong token count.", ids.length, seq.tokenCount()); seq.moveNext(); for (SQLTokenId id : ids) { assertEquals("Wrong token ID at index " + seq.index(), id, seq.token().id()); seq.moveNext(); } }
private String getFunctionalTplTokenId(Token<TplTopTokenId> token) { TokenHierarchy<CharSequence> th = TokenHierarchy.create(token.text(), TplTokenId.language()); TokenSequence<TplTokenId> sequence = th.tokenSequence(TplTokenId.language()); while (sequence.moveNext()) { if (sequence.token().id() == TplTokenId.WHITESPACE) { continue; } else { return sequence.token().toString(); } } return ""; }
private OffsetRange getReferenceSpan( TokenSequence<?> ts, TokenHierarchy<Document> th, int lexOffset) { Token<?> token = ts.token(); TokenId id = token.id(); // if (id == PythonTokenId.IDENTIFIER) { // if (token.length() == 1 && id == PythonTokenId.IDENTIFIER && // token.text().toString().equals(",")) { // return OffsetRange.NONE; // } // } // TODO: Tokens.SUPER, Tokens.THIS, Tokens.SELF ... if (id == PythonTokenId.IDENTIFIER) { return new OffsetRange(ts.offset(), ts.offset() + token.length()); } // Look for embedded RDoc comments: TokenSequence<?> embedded = ts.embedded(); if (embedded != null) { ts = embedded; embedded.move(lexOffset); if (embedded.moveNext()) { Token<?> embeddedToken = embedded.token(); if (embeddedToken.id() == PythonStringTokenId.URL) { return new OffsetRange(embedded.offset(), embedded.offset() + embeddedToken.length()); } // Recurse into the range - perhaps there is Ruby code (identifiers // etc.) to follow there OffsetRange range = getReferenceSpan(embedded, th, lexOffset); if (range != OffsetRange.NONE) { return range; } } } return OffsetRange.NONE; }
public boolean verifyState(Document doc, int offset) { TokenHierarchy hi = TokenHierarchy.get(doc); TokenSequence<HTMLTokenId> ts = hi.tokenSequence(HTMLTokenId.language()); if (ts != null) { ts.move(offset); ts.moveNext(); Token<HTMLTokenId> tok = ts.token(); int newOffset = ts.offset(); String matcherText = tok.text().toString(); Matcher m = MY_SPECIAL_PATTERN.matcher(matcherText); if (m.matches()) { target = m.group(1); int idx = matcherText.indexOf(target); targetStart = newOffset + idx; targetEnd = targetStart + target.length(); return true; } } return false; }
public OffsetRange getReferenceSpan(Document doc, int lexOffset) { TokenHierarchy<Document> th = TokenHierarchy.get(doc); // BaseDocument doc = (BaseDocument)document; TokenSequence<? extends PythonTokenId> ts = PythonLexerUtils.getPythonSequence(th, lexOffset); if (ts == null) { return OffsetRange.NONE; } ts.move(lexOffset); if (!ts.moveNext() && !ts.movePrevious()) { return OffsetRange.NONE; } // Determine whether the caret position is right between two tokens boolean isBetween = (lexOffset == ts.offset()); OffsetRange range = getReferenceSpan(ts, th, lexOffset); if ((range == OffsetRange.NONE) && isBetween) { // The caret is between two tokens, and the token on the right // wasn't linkable. Try on the left instead. if (ts.movePrevious()) { range = getReferenceSpan(ts, th, lexOffset); } } return range; }
private static CharSequence dumpTokens(TokenSequence<?> seq) { seq.moveStart(); StringBuilder builder = new StringBuilder(); Token<?> token = null; while (seq.moveNext()) { if (token != null) { builder.append('\n'); } token = seq.token(); builder.append(token.id()); PartType part = token.partType(); if (part != PartType.COMPLETE) { builder.append(' '); builder.append(token.partType()); } builder.append(' '); builder.append('\''); builder.append(token.text()); builder.append('\''); } return builder; }
@NbBundle.Messages({ "# {0} - PI target", "ERR_invalidProcessingInstruction=Invalid processing instruction: {0}. Expected 'import', 'include' or 'language'", "ERR_missingProcessingInstruction=Missing processing intruction." }) private void handleErrorInstruction(String target, String data) { int start = contentLocator.getElementOffset(); int offset = -1; int piOffset = -1; TokenSequence<XMLTokenId> seq = contentLocator.getTokenSequence(); // lex up to the invalid target: seq.move(start); boolean found = false; while (!found && seq.moveNext()) { Token<XMLTokenId> t = seq.token(); switch (t.id()) { case PI_START: piOffset = offset; if (target == null) { found = true; } case WS: break; default: case PI_TARGET: offset = seq.offset(); found = true; break; } } ErrorMark mark; if (target != null) { mark = new ErrorMark( offset, seq.token().length(), "invalid-processing-instruction", ERR_invalidProcessingInstruction(target), target); } else { mark = new ErrorMark( piOffset, seq.token().length(), "missing-processing-instruction", ERR_missingProcessingInstruction()); } addError(mark); }
public void testEscapeSingleQuote() throws Exception { TokenSequence<SQLTokenId> seq = getTokenSequence("'Frank\\'s Book'"); assertTrue(seq.moveNext()); assertEquals(SQLTokenId.STRING, seq.token().id()); assertEquals("'Frank\\'s Book'", seq.token().text().toString()); seq = getTokenSequence("'Frank\\s Book'"); assertTrue(seq.moveNext()); assertEquals(SQLTokenId.STRING, seq.token().id()); assertEquals("'Frank\\s Book'", seq.token().text().toString()); seq = getTokenSequence("'Frank\\"); assertTokens(seq, SQLTokenId.INCOMPLETE_STRING); seq = getTokenSequence("'Frank\\'"); assertTokens(seq, SQLTokenId.INCOMPLETE_STRING); }
@SuppressWarnings("empty-statement") private DeclarationLocation findUrl(PythonParserResult info, Document doc, int lexOffset) { TokenSequence<?> ts = PythonLexerUtils.getPythonSequence((BaseDocument) doc, lexOffset); if (ts == null) { return DeclarationLocation.NONE; } ts.move(lexOffset); if (!ts.moveNext() && !ts.movePrevious()) { return DeclarationLocation.NONE; } Token<?> token = ts.token(); TokenSequence<?> embedded = ts.embedded(); if (embedded != null) { ts = embedded; embedded.move(lexOffset); if (!embedded.moveNext() && !embedded.movePrevious()) { return DeclarationLocation.NONE; } token = embedded.token(); } // Is this a comment? If so, possibly do rdoc-method reference jump if ((token != null) && (token.id() == PythonStringTokenId.URL)) { // TODO - use findLinkedMethod String method = token.text().toString(); if (method.startsWith("www.")) { // NOI18N method = "http://" + method; // NOI18N } // A URL such as http://netbeans.org - try to open it in a browser! try { URL url = new URL(method); return new DeclarationLocation(url); } catch (MalformedURLException mue) { // URL is from user source... don't complain with exception dialogs etc. ; } } return DeclarationLocation.NONE; }
private DeclarationLocation findImport(PythonParserResult info, int lexOffset, BaseDocument doc) { TokenSequence<? extends PythonTokenId> ts = PythonLexerUtils.getPositionedSequence(doc, lexOffset); if (ts == null) { return DeclarationLocation.NONE; } if (ts.offset() == lexOffset) { // We're looking at the offset to the RIGHT of the caret // and here I care about what's on the left if (!ts.movePrevious()) { return DeclarationLocation.NONE; } } Token<? extends PythonTokenId> token = ts.token(); if (token == null) { return DeclarationLocation.NONE; } TokenId id = token.id(); String moduleName = null; while (true) { if (id == PythonTokenId.IDENTIFIER || id.primaryCategory().equals(PythonLexer.KEYWORD_CAT)) { // Possibly inside the import string String tokenText = token.text().toString(); if (moduleName == null) { moduleName = tokenText; } else { moduleName = tokenText + "." + moduleName; } } else if (id != PythonTokenId.DOT) { break; } if (!ts.movePrevious()) { return DeclarationLocation.NONE; } token = ts.token(); id = token.id(); } if (id != PythonTokenId.ERROR && id != PythonTokenId.NEWLINE && id != PythonTokenId.WHITESPACE) { return DeclarationLocation.NONE; } if (!ts.movePrevious()) { return DeclarationLocation.NONE; } token = ts.token(); id = token.id(); if (id != PythonTokenId.IMPORT) { return DeclarationLocation.NONE; } if (moduleName == null) { return DeclarationLocation.NONE; } if (id == PythonTokenId.IMPORT || id == PythonTokenId.FROM) { if (id == PythonTokenId.IMPORT && ts.movePrevious() && ts.token().id() == PythonTokenId.WHITESPACE && ts.movePrevious()) { // See if this was "from foo import bar" such that we really should // be listing symbols inside the foo library token = ts.token(); id = token.id(); String library = null; while (true) { if (id == PythonTokenId.IDENTIFIER || id.primaryCategory().equals(PythonLexer.KEYWORD_CAT)) { // Possibly inside the import string String tokenText = token.text().toString(); if (library == null) { library = tokenText; } else { library = tokenText + "." + library; } } else if (id != PythonTokenId.DOT) { break; } if (!ts.movePrevious()) { return DeclarationLocation.NONE; } token = ts.token(); id = token.id(); } if (library != null) { if (id == PythonTokenId.WHITESPACE && ts.movePrevious() && ts.token().id() == PythonTokenId.FROM) { return findImport(info, library, moduleName); } } } return findImport(info, moduleName, null); } return DeclarationLocation.NONE; }
private Token<TplTopTokenId> getTplTopToken(int offset) { ts.move(offset); assertTrue(ts.moveNext()); return ts.token(); }
private void assertNullEmbeddingForLanguage(Language language) { TokenSequence htmlts = ts.embeddedJoined(language); assertNull(htmlts); }
/** @throws java.lang.Exception */ @RandomlyFails public void testDocumentModification() throws Exception { // 1) register tasks and parsers MockServices.setServices(MockMimeLookup.class, MyScheduler.class); final CountDownLatch latch1 = new CountDownLatch(1); final CountDownLatch latch2 = new CountDownLatch(2); final CountDownLatch latch3 = new CountDownLatch(3); final int[] fooParser = {1}; final int[] fooParserResult = {1}; final int[] fooEmbeddingProvider = {1}; final int[] fooTask = {1}; final int[] booParser = {1}; final int[] booParserResult = {1}; final int[] booTask = {1}; final TestComparator test = new TestComparator( "1 - reschedule all schedulers\n" + "foo get embeddings 1 (Snapshot 1), \n" + "Snapshot 1: Toto je testovaci file, na kterem se budou delat hnusne pokusy!!!, \n" + "Snapshot 2: stovaci fi, \n" + "foo parse 1 (Snapshot 1, FooParserResultTask 1, SourceModificationEvent -1:-1), \n" + "foo get result 1 (FooParserResultTask 1), \n" + "foo task 1 (FooResult 1 (Snapshot 1), SchedulerEvent 1), \n" + "foo invalidate 1, \n" + "boo parse 1 (Snapshot 2, BooParserResultTask 1, SourceModificationEvent -1:-1), \n" + "boo get result 1 (BooParserResultTask 1), \n" + "boo task 1 (BooResult 1 (Snapshot 2), SchedulerEvent 1), \n" + "boo invalidate 1, \n" + "2 - insert 14 chars on offset 22\n" + "foo get embeddings 1 (Snapshot 3), \n" + "Snapshot 3: Toto je testovaci file (druha verze), na kterem se budou delat hnusne pokusy!!!, \n" + "Snapshot 4: stovaci fi, \n" + "foo parse 1 (Snapshot 3, FooParserResultTask 1, SourceModificationEvent 18:37), \n" + "foo get result 1 (FooParserResultTask 1), \n" + "foo task 1 (FooResult 2 (Snapshot 3), SchedulerEvent 1), \n" + "foo invalidate 2, \n" + "boo parse 1 (Snapshot 4, BooParserResultTask 1, SourceModificationEvent -1:-1), \n" + // !! source unchanged "boo get result 1 (BooParserResultTask 1), \n" + "boo task 1 (BooResult 2 (Snapshot 4), SchedulerEvent 1), \n" + "boo invalidate 2, \n" + "3 - remove 5 chars on offset 44\n" + "foo get embeddings 1 (Snapshot 5), \n" + "Snapshot 5: Toto je testovaci file (druha verze), na ktee budou delat hnusne pokusy!!!, \n" + "Snapshot 6: stovaci fi, \n" + "foo parse 1 (Snapshot 5, FooParserResultTask 1, SourceModificationEvent 41:45), \n" + "foo get result 1 (FooParserResultTask 1), \n" + "foo task 1 (FooResult 3 (Snapshot 5), SchedulerEvent 2), \n" + "foo invalidate 3, \n" + "boo parse 1 (Snapshot 6, BooParserResultTask 1, SourceModificationEvent -1:-1), \n" + // !! source unchanged "boo get result 1 (BooParserResultTask 1), \n" + "boo task 1 (BooResult 3 (Snapshot 6), SchedulerEvent 2), \n" + "boo invalidate 3, \n" + "4 - end\n"); MockMimeLookup.setInstances( MimePath.get("text/foo"), new ParserFactory() { public Parser createParser(Collection<Snapshot> snapshots2) { return new Parser() { private Snapshot last; private int i = fooParser[0]++; public void parse(Snapshot snapshot, Task task, SourceModificationEvent event) throws ParseException { test.check( "foo parse " + i + " (Snapshot " + test.get(snapshot) + ", " + task + ", " + event + "), \n"); last = snapshot; } public Result getResult(Task task) throws ParseException { test.check("foo get result " + i + " (" + task + "), \n"); return new Result(last) { public void invalidate() { test.check("foo invalidate " + i + ", \n"); } private int i = fooParserResult[0]++; @Override public String toString() { return "FooResult " + i + " (Snapshot " + test.get(getSnapshot()) + ")"; } }; } public void cancel() {} public void addChangeListener(ChangeListener changeListener) {} public void removeChangeListener(ChangeListener changeListener) {} }; } }, new TaskFactory() { public Collection<SchedulerTask> create(Snapshot snapshot) { return Arrays.asList( new SchedulerTask[] { new EmbeddingProvider() { private int i = fooEmbeddingProvider[0]++; public List<Embedding> getEmbeddings(Snapshot snapshot) { test.check( "foo get embeddings " + i + " (Snapshot " + test.get(snapshot) + "), \n"); test.check( "Snapshot " + test.get(snapshot) + ": " + snapshot.getText() + ", \n"); Embedding embedding = snapshot.create(10, 10, "text/boo"); test.get(embedding.getSnapshot()); test.check( "Snapshot " + test.get(embedding.getSnapshot()) + ": " + embedding.getSnapshot().getText() + ", \n"); return Arrays.asList(new Embedding[] {embedding}); } public int getPriority() { return 10; } public void cancel() {} }, new ParserResultTask() { public void run(Result result, SchedulerEvent event) { test.check( "foo task " + i + " (" + result + ", SchedulerEvent " + test.get(event) + "), \n"); } public int getPriority() { return 100; } public Class<? extends Scheduler> getSchedulerClass() { return Scheduler.EDITOR_SENSITIVE_TASK_SCHEDULER; } public void cancel() {} private int i = fooTask[0]++; @Override public String toString() { return "FooParserResultTask " + i; } } }); } }); MockMimeLookup.setInstances( MimePath.get("text/boo"), new ParserFactory() { public Parser createParser(Collection<Snapshot> snapshots2) { return new Parser() { private Snapshot last; private int i = booParser[0]++; public void parse(Snapshot snapshot, Task task, SourceModificationEvent event) throws ParseException { test.check( "boo parse " + i + " (Snapshot " + test.get(snapshot) + ", " + task + ", " + event + "), \n"); last = snapshot; } public Result getResult(Task task) throws ParseException { test.check("boo get result " + i + " (" + task + "), \n"); return new Result(last) { public void invalidate() { test.check("boo invalidate " + i + ", \n"); latch1.countDown(); latch2.countDown(); latch3.countDown(); } private int i = booParserResult[0]++; @Override public String toString() { return "BooResult " + i + " (Snapshot " + test.get(getSnapshot()) + ")"; } }; } public void cancel() {} public void addChangeListener(ChangeListener changeListener) {} public void removeChangeListener(ChangeListener changeListener) {} }; } }, new TaskFactory() { public Collection<SchedulerTask> create(Snapshot snapshot) { return Arrays.asList( new SchedulerTask[] { new ParserResultTask() { private int i = booTask[0]++; public void run(Result result, SchedulerEvent event) { test.check( "boo task " + i + " (" + result + ", SchedulerEvent " + test.get(event) + "), \n"); } public int getPriority() { return 150; } public Class<? extends Scheduler> getSchedulerClass() { return Scheduler.EDITOR_SENSITIVE_TASK_SCHEDULER; } public void cancel() {} @Override public String toString() { return "BooParserResultTask " + i; } } }); } }); // 2) create source file clearWorkDir(); FileObject workDir = FileUtil.toFileObject(getWorkDir()); FileObject testFile = FileUtil.createData(workDir, "bla.foo"); FileUtil.setMIMEType("foo", "text/foo"); OutputStream outputStream = testFile.getOutputStream(); OutputStreamWriter writer = new OutputStreamWriter(outputStream); writer.append("Toto je testovaci file, na kterem se budou delat hnusne pokusy!!!"); writer.close(); Source source = Source.create(testFile); Document document = source.getDocument(true); document.putProperty("mimeType", "text/foo"); document.putProperty(Language.class, new ALanguageHierarchy().language()); TokenHierarchy th = TokenHierarchy.get(document); TokenSequence ts = th.tokenSequence(); ts.tokenCount(); test.check("1 - reschedule all schedulers\n"); // 3) shcedulle CurrentDocumentScheduler for (Scheduler scheduler : Schedulers.getSchedulers()) if (scheduler instanceof CurrentDocumentScheduler) ((CurrentDocumentScheduler) scheduler).schedule(source); latch1.await(); test.check("2 - insert 14 chars on offset 22\n"); document.insertString(22, " (druha verze)", null); latch2.await(); test.check("3 - remove 5 chars on offset 44\n"); document.remove(44, 5); latch3.await(); test.check("4 - end\n"); assertEquals("", test.getResult()); }
public void run(RubyRuleContext context, List<Hint> result) { Node node = context.node; ParserResult info = context.parserResult; IfNode ifNode = (IfNode) node; if (ifNode.getCondition() == null) { // Can happen for this code: // if () // end // (typically while editing) return; } Node body = ifNode.getThenBody(); Node elseNode = ifNode.getElseBody(); if (body != null && elseNode != null) { // Can't convert if-then-else conditionals return; } if (body == null && elseNode == null) { // Can't convert empty conditions return; } // Can't convert if !x/elseif blocks if (ifNode.getElseBody() != null && ifNode.getElseBody().getNodeType() == NodeType.IFNODE) { return; } int start = ifNode.getPosition().getStartOffset(); if (!RubyHints.isNullOrInvisible(body) && ( // Can't convert blocks with multiple statements body.getNodeType() == NodeType.BLOCKNODE || // Already a statement modifier? body.getPosition().getStartOffset() <= start)) { return; } else if (!RubyHints.isNullOrInvisible(elseNode) && (elseNode.getNodeType() == NodeType.BLOCKNODE || elseNode.getPosition().getStartOffset() <= start)) { return; } BaseDocument doc = context.doc; try { int keywordOffset = ConvertIfToUnless.findKeywordOffset(context, ifNode); if (keywordOffset == -1 || keywordOffset > doc.getLength() - 1) { return; } char k = doc.getText(keywordOffset, 1).charAt(0); if (!(k == 'i' || k == 'u')) { return; // Probably ternary operator, ?: } } catch (BadLocationException ble) { Exceptions.printStackTrace(ble); } // If statement that is not already a statement modifier OffsetRange range = AstUtilities.getRange(node); if (RubyUtils.isRhtmlDocument(doc) || RubyUtils.isYamlDocument(doc)) { // Make sure that we're in a single contiguous Ruby section; if not, this won't work range = LexUtilities.getLexerOffsets(info, range); if (range == OffsetRange.NONE) { return; } try { doc.readLock(); TokenHierarchy th = TokenHierarchy.get(doc); TokenSequence ts = th.tokenSequence(); ts.move(range.getStart()); if (!ts.moveNext() && !ts.movePrevious()) { return; } if (ts.offset() + ts.token().length() < range.getEnd()) { return; } } finally { doc.readUnlock(); } } ConvertToModifier fix = new ConvertToModifier(context, ifNode); if (fix.getEditList() == null) { return; } List<HintFix> fixes = Collections.<HintFix>singletonList(fix); String displayName = NbBundle.getMessage(ConvertConditionals.class, "ConvertConditionals"); Hint desc = new Hint(this, displayName, RubyUtils.getFileObject(info), range, fixes, 500); result.add(desc); }