private void moveTokens(int offset, int end) { synchronized (this) { Integer floorKey = this.tokens.floorKey(offset); if (floorKey == null) { floorKey = offset; } NavigableMap<Integer, ILuposToken> tailMap = this.tokens.tailMap(floorKey, true); LinkedList<Integer> toDelete = new LinkedList<Integer>(); LinkedList<ILuposToken> toAdd = new LinkedList<ILuposToken>(); final int length = end - offset; for (Entry<Integer, ILuposToken> entry : tailMap.entrySet()) { ILuposToken oldToken = entry.getValue(); final int beginCharOldToken = entry.getKey(); if (beginCharOldToken >= offset) { toDelete.add(beginCharOldToken); toAdd.add( oldToken.create( oldToken.getDescription(), oldToken.getContents(), beginCharOldToken + length)); continue; } final int endCharOldToken = beginCharOldToken + oldToken.getContents().length(); if (endCharOldToken > offset) { toDelete.add(beginCharOldToken); final String oldContent = oldToken.getContents(); final int firstBorder = offset - beginCharOldToken; if (firstBorder > 0) { toAdd.add( oldToken.create( oldToken.getDescription().getErrorEnum(), oldContent.substring(0, offset - beginCharOldToken), beginCharOldToken)); } final int secondBorder = offset - beginCharOldToken; if (offset - beginCharOldToken > 0 && secondBorder < oldContent.length()) { toAdd.add( oldToken.create( oldToken.getDescription().getErrorEnum(), oldContent.substring(secondBorder), end)); } } } for (Integer indexOldToken : toDelete) { this.tokens.remove(indexOldToken); } for (ILuposToken newToken : toAdd) { this.tokens.put(newToken.getBeginChar(), newToken); // this.doc.setCharacterAttributes(newToken.getBeginChar(), // newToken.getContents().length(), LANGUAGE.getAttributeSet(newToken.getDescription()), // true); } } }
private int forceHighlightingInnerLoop(int offset, int offsetEnd, final String content) { final boolean firstTime = this.tokens.isEmpty(); int start = offset; // after end of text? final int contentLength = content.length(); if (start >= contentLength) { start = contentLength - 1; } if (start < 0) { start = 0; } while (start > 0 && content.charAt(start) != '\n') { start--; } // or did we do something in a token spanning over several lines? // just determine the previous token and check its borders... Entry<Integer, ILuposToken> floorOffsetEntry = this.tokens.floorEntry(start); if (floorOffsetEntry != null) { final int startCandidate = floorOffsetEntry.getKey(); if (startCandidate < start && startCandidate + floorOffsetEntry.getValue().getContents().length() >= start) { start = startCandidate; } } int result = start; try { ILuposToken token; this.parser.setReaderTokenFriendly(this.luposDocumentReader, start, content.length()); ILuposToken previousToken = null; ILuposToken nextToken = null; do { token = (nextToken != null) ? nextToken : parser.getNextToken(content); nextToken = null; if (token != null) { if (previousToken != null) { // check if the two tokens combined are an error if (previousToken.getBeginChar() + previousToken.getContents().length() == token.getBeginChar()) { // there is no space between them => check them further... // it looks like it is a problem (for SPARQL queries/RIF rules/RDF data) if e.g. two // reserved words are written together... // decision is made in TYPE_ENUM classes... if (token .getDescription() .errorWhenDirectlyFollowingToken(previousToken.getDescription())) { token = token.create( token.getDescription().getErrorEnum(), previousToken.getContents() + token.getContents(), previousToken.getBeginChar()); } } } // some scanners are not perfect: With this, we can allow to combine two tokens into one TYPE_ENUM typeToBeCombined = token.getDescription().combineWith(token.getContents()); if (typeToBeCombined != null) { ILuposToken nextToken2 = parser.getNextToken(content); if (nextToken2 != null) { if (nextToken2.getDescription() == typeToBeCombined) { token = token.create( token.getDescription(), content.substring( token.getBeginChar(), nextToken2.getBeginChar() + nextToken2.getContents().length()), token.getBeginChar()); } else { token = token.create( token.getDescription().getErrorEnum(), content.substring( token.getBeginChar(), nextToken2.getBeginChar() + nextToken2.getContents().length()), token.getBeginChar()); } } } // make some type of context-sensitive check for errors: // determine the next token and check if it is in the expected set of tokens of the // current token // (otherwise e.g. the RIF scanners seldom detects errors) Set<ILuposToken> expected = token.getDescription().expectedNextTokens(); if (expected != null) { nextToken = parser.getNextToken(content); if (nextToken != null) { boolean flagFound = false; for (ILuposToken expectedToken : expected) { if (expectedToken.getDescription() == nextToken.getDescription()) { if (expectedToken.getContents().equals("") || expectedToken.getContents().compareTo(nextToken.getContents()) == 0) { flagFound = true; break; } } } if (!flagFound) { token = token.create( token.getDescription().getErrorEnum(), token.getContents(), token.getBeginChar()); } } else { token = token.create( token.getDescription().getErrorEnum(), token.getContents(), token.getBeginChar()); } } boolean flag = false; final int beginChar = token.getBeginChar(); final int endChar = beginChar + token.getContents().length(); if (!firstTime) { // check tokens for old tokens, which overlap with the new one and remove those ones. Entry<Integer, ILuposToken> floorEntry; do { floorEntry = this.tokens.floorEntry(endChar - 1); if (floorEntry != null) { ILuposToken oldToken = floorEntry.getValue(); final int beginCharPreviousToken = oldToken.getBeginChar(); if (beginCharPreviousToken == beginChar && oldToken.getContents().compareTo(token.getContents()) == 0) { // case scanned token and old token are the same! flag = true; break; } else if (beginCharPreviousToken + oldToken.getContents().length() > beginChar) { // case overlapping token => remove old token! this.tokens.remove(floorEntry.getKey()); } else { break; } } } while (floorEntry != null); } this.tokens.put(beginChar, token); // System.out.println(token); final int length = token.getContents().length(); this.doc.setCharacterAttributes( beginChar, length, LANGUAGE.getAttributeSet(token.getDescription()), true); result = beginChar + length; if (beginChar > offsetEnd && flag) { break; } previousToken = token; } } while (token != null); } catch (Exception ex) { ex.printStackTrace(); System.out.println(ex); } return result; }