Пример #1
0
  private UpdateEvent forceHighlighting(
      final List<Tuple<Integer, Integer>> areas, UpdateEvent nextEvent) {
    synchronized (this) {
      if (nextEvent != null || !this.buffer.isCurrentlyEmpty()) {
        // if currently some modifications have been done (should be few),
        // then handle these events such that all has been correctly updated!
        // new events should not occur because of the lock in the key listener
        nextEvent = eventHandling(areas, nextEvent);
      }
      this.doc.text.setRepaint(false);
      final String content = this.luposDocumentReader.getText();
      for (Tuple<Integer, Integer> area : areas) {
        // determine start for rescanning:
        // rescan whole line, thus determine line start

        forceHighlightingInnerLoop(area.getFirst(), area.getSecond(), content);
      }

      // However, this is wrong for tokens spanning over several lines
      // For this, rescanning is started for each line with an error token before offset
      // This could theoretically lead also to errors, but should not occur in practical cases
      // (Theoretical case: Insert token c between token a and b, and a b c is also (another) token
      // (which is spanning over several lines))

      LinkedList<ILuposToken> errorTokens = new LinkedList<ILuposToken>();
      for (ILuposToken token : this.tokens.values()) {
        if (token.getDescription().isError()) {
          errorTokens.add(token);
        }
      }

      int lastEnd = -1;
      for (ILuposToken token : errorTokens) {
        final int beginChar = token.getBeginChar();
        if (lastEnd < beginChar) { // rescan only if not already done...
          lastEnd =
              forceHighlightingInnerLoop(
                  beginChar, beginChar + token.getContents().length(), content);
        }
      }
      this.doc.text.setRepaint(true);
      this.doc.text.repaint();
    }
    return nextEvent;
  }
Пример #2
0
  private int forceHighlightingInnerLoop(int offset, int offsetEnd, final String content) {
    final boolean firstTime = this.tokens.isEmpty();
    int start = offset;
    // after end of text?
    final int contentLength = content.length();
    if (start >= contentLength) {
      start = contentLength - 1;
    }
    if (start < 0) {
      start = 0;
    }
    while (start > 0 && content.charAt(start) != '\n') {
      start--;
    }
    // or did we do something in a token spanning over several lines?
    // just determine the previous token and check its borders...
    Entry<Integer, ILuposToken> floorOffsetEntry = this.tokens.floorEntry(start);
    if (floorOffsetEntry != null) {
      final int startCandidate = floorOffsetEntry.getKey();
      if (startCandidate < start
          && startCandidate + floorOffsetEntry.getValue().getContents().length() >= start) {
        start = startCandidate;
      }
    }

    int result = start;

    try {
      ILuposToken token;
      this.parser.setReaderTokenFriendly(this.luposDocumentReader, start, content.length());
      ILuposToken previousToken = null;
      ILuposToken nextToken = null;
      do {
        token = (nextToken != null) ? nextToken : parser.getNextToken(content);
        nextToken = null;

        if (token != null) {

          if (previousToken != null) {
            // check if the two tokens combined are an error
            if (previousToken.getBeginChar() + previousToken.getContents().length()
                == token.getBeginChar()) {
              // there is no space between them => check them further...
              // it looks like it is a problem (for SPARQL queries/RIF rules/RDF data) if e.g. two
              // reserved words are written together...
              // decision is made in TYPE_ENUM classes...
              if (token
                  .getDescription()
                  .errorWhenDirectlyFollowingToken(previousToken.getDescription())) {
                token =
                    token.create(
                        token.getDescription().getErrorEnum(),
                        previousToken.getContents() + token.getContents(),
                        previousToken.getBeginChar());
              }
            }
          }

          // some scanners are not perfect: With this, we can allow to combine two tokens into one
          TYPE_ENUM typeToBeCombined = token.getDescription().combineWith(token.getContents());
          if (typeToBeCombined != null) {
            ILuposToken nextToken2 = parser.getNextToken(content);
            if (nextToken2 != null) {
              if (nextToken2.getDescription() == typeToBeCombined) {
                token =
                    token.create(
                        token.getDescription(),
                        content.substring(
                            token.getBeginChar(),
                            nextToken2.getBeginChar() + nextToken2.getContents().length()),
                        token.getBeginChar());
              } else {
                token =
                    token.create(
                        token.getDescription().getErrorEnum(),
                        content.substring(
                            token.getBeginChar(),
                            nextToken2.getBeginChar() + nextToken2.getContents().length()),
                        token.getBeginChar());
              }
            }
          }

          // make some type of context-sensitive check for errors:
          // determine the next token and check if it is in the expected set of tokens of the
          // current token
          // (otherwise e.g. the RIF scanners seldom detects errors)
          Set<ILuposToken> expected = token.getDescription().expectedNextTokens();
          if (expected != null) {
            nextToken = parser.getNextToken(content);
            if (nextToken != null) {
              boolean flagFound = false;
              for (ILuposToken expectedToken : expected) {
                if (expectedToken.getDescription() == nextToken.getDescription()) {
                  if (expectedToken.getContents().equals("")
                      || expectedToken.getContents().compareTo(nextToken.getContents()) == 0) {
                    flagFound = true;
                    break;
                  }
                }
              }
              if (!flagFound) {
                token =
                    token.create(
                        token.getDescription().getErrorEnum(),
                        token.getContents(),
                        token.getBeginChar());
              }
            } else {
              token =
                  token.create(
                      token.getDescription().getErrorEnum(),
                      token.getContents(),
                      token.getBeginChar());
            }
          }

          boolean flag = false;
          final int beginChar = token.getBeginChar();
          final int endChar = beginChar + token.getContents().length();
          if (!firstTime) {
            // check tokens for old tokens, which overlap with the new one and remove those ones.
            Entry<Integer, ILuposToken> floorEntry;
            do {
              floorEntry = this.tokens.floorEntry(endChar - 1);
              if (floorEntry != null) {
                ILuposToken oldToken = floorEntry.getValue();
                final int beginCharPreviousToken = oldToken.getBeginChar();
                if (beginCharPreviousToken == beginChar
                    && oldToken.getContents().compareTo(token.getContents()) == 0) {
                  // case scanned token and old token are the same!
                  flag = true;
                  break;
                } else if (beginCharPreviousToken + oldToken.getContents().length() > beginChar) {
                  // case overlapping token => remove old token!
                  this.tokens.remove(floorEntry.getKey());
                } else {
                  break;
                }
              }
            } while (floorEntry != null);
          }

          this.tokens.put(beginChar, token);
          // System.out.println(token);

          final int length = token.getContents().length();

          this.doc.setCharacterAttributes(
              beginChar, length, LANGUAGE.getAttributeSet(token.getDescription()), true);

          result = beginChar + length;

          if (beginChar > offsetEnd && flag) {
            break;
          }
          previousToken = token;
        }

      } while (token != null);
    } catch (Exception ex) {
      ex.printStackTrace();
      System.out.println(ex);
    }
    return result;
  }
Пример #3
0
 private void removeTokens(int offset, int end) {
   synchronized (this) {
     Integer floorKey = this.tokens.floorKey(offset);
     if (floorKey == null) {
       floorKey = offset;
     }
     final int contentLenght = this.doc.text.getText().length();
     NavigableMap<Integer, ILuposToken> tailMap = this.tokens.tailMap(floorKey, true);
     LinkedList<Integer> toDelete = new LinkedList<Integer>();
     LinkedList<ILuposToken> toAdd = new LinkedList<ILuposToken>();
     final int length = end - offset;
     for (Entry<Integer, ILuposToken> entry : tailMap.entrySet()) {
       ILuposToken oldToken = entry.getValue();
       final int beginCharOldToken = entry.getKey();
       final int endCharOldToken = beginCharOldToken + oldToken.getContents().length();
       if (beginCharOldToken >= offset && endCharOldToken <= end) {
         toDelete.add(beginCharOldToken);
         continue;
       }
       if (beginCharOldToken >= end) {
         toDelete.add(beginCharOldToken);
         if (beginCharOldToken < contentLenght) {
           toAdd.add(
               oldToken.create(
                   oldToken.getDescription(), oldToken.getContents(), beginCharOldToken - length));
         }
         continue;
       }
       if (endCharOldToken > offset) {
         toDelete.add(beginCharOldToken);
         final String oldContent = oldToken.getContents();
         final int firstBorder = offset - beginCharOldToken;
         if (firstBorder > 0) {
           toAdd.add(
               oldToken.create(
                   oldToken.getDescription().getErrorEnum(),
                   oldContent.substring(0, firstBorder),
                   beginCharOldToken));
         }
         final int secondBorder =
             end - beginCharOldToken; // oldContent.length()-(endCharOldToken-end);
         if (secondBorder > 0 && secondBorder < oldContent.length()) {
           toAdd.add(
               oldToken.create(
                   oldToken.getDescription().getErrorEnum(),
                   oldContent.substring(secondBorder),
                   offset));
         }
       }
     }
     for (Integer indexOldToken : toDelete) {
       this.tokens.remove(indexOldToken);
     }
     for (ILuposToken newToken : toAdd) {
       this.tokens.put(newToken.getBeginChar(), newToken);
       //			this.doc.setCharacterAttributes(newToken.getBeginChar(),
       // newToken.getContents().length(), LANGUAGE.getAttributeSet(newToken.getDescription()),
       // true);
     }
   }
 }