public void run() {
    Logger.debug(this, "Main loop running...");

    try {
      mConnectedToWoT = connectToWoT();

      if (mConnectedToWoT) {
        try {
          fetchOwnIdentities(); // Fetch the own identities first to prevent own-identities from
                                // being imported as normal identity...
          fetchIdentities();
          garbageCollectIdentities();
        } catch (Exception e) {
          Logger.error(this, "Fetching identities failed.", e);
        }
      }
    } finally {
      final long sleepTime =
          mConnectedToWoT
              ? (THREAD_PERIOD / 2 + mRandom.nextInt(THREAD_PERIOD))
              : WOT_RECONNECT_DELAY;
      Logger.debug(this, "Sleeping for " + (sleepTime / (60 * 1000)) + " minutes.");
      mTicker.queueTimedJob(
          this, "Freetalk " + this.getClass().getSimpleName(), sleepTime, false, true);
    }

    Logger.debug(this, "Main loop finished.");
  }
  // TODO: This function should be a feature of WoT.
  @SuppressWarnings("unchecked")
  private synchronized void updateShortestUniqueNicknameCache() {
    Logger.debug(this, "Updating shortest unique nickname cache...");

    // We don't use getAllIdentities() because we do not need to have intializeTransient() called on
    // each identity, we only query strings anyway.
    final Query q = db.query();
    q.constrain(WoTIdentity.class);
    ObjectSet<WoTIdentity> result = q.execute();
    final WoTIdentity[] identities = result.toArray(new WoTIdentity[result.size()]);

    Arrays.sort(
        identities,
        new Comparator<WoTIdentity>() {

          public int compare(WoTIdentity i1, WoTIdentity i2) {
            return i1.getFreetalkAddress().compareToIgnoreCase(i2.getFreetalkAddress());
          }
        });

    final String[] nicknames = new String[identities.length];

    for (int i = 0; i < identities.length; ++i) {
      nicknames[i] = identities[i].getNickname();

      int minLength = nicknames[i].length();
      int firstDuplicate;

      do {
        firstDuplicate = i;

        while ((firstDuplicate - 1) >= 0
            && nicknames[firstDuplicate - 1].equalsIgnoreCase(nicknames[i])) {
          --firstDuplicate;
        }

        if (firstDuplicate < i) {
          ++minLength;

          for (int j = i; j >= firstDuplicate; --j) {
            nicknames[j] = identities[j].getFreetalkAddress(minLength);
          }
        }
      } while (firstDuplicate != i);
    }

    final HashMap<String, String> newCache = new HashMap<String, String>(identities.length * 2);

    for (int i = 0; i < identities.length; ++i) newCache.put(identities[i].getID(), nicknames[i]);

    mShortestUniqueNicknameCache = newCache;
    mShortestUniqueNicknameCacheNeedsUpdate = false;

    Logger.debug(this, "Finished updating shortest unique nickname cache.");
  }
Esempio n. 3
0
 public void onSent(int totalPacketLength, BasePeerNode pn) {
   int totalMessageData = 0;
   int size = fragments.size();
   int biggest = 0;
   for (MessageFragment frag : fragments) {
     totalMessageData += frag.fragmentLength;
     size++;
     if (biggest < frag.messageLength) biggest = frag.messageLength;
   }
   int overhead = totalPacketLength - totalMessageData;
   if (logDEBUG)
     Logger.debug(
         this,
         "Total packet overhead: "
             + overhead
             + " for "
             + size
             + " messages total message length "
             + totalMessageData
             + " total packet length "
             + totalPacketLength
             + " biggest message "
             + biggest);
   for (MessageFragment frag : fragments) {
     // frag.wrapper is always non-null on sending.
     frag.wrapper.onSent(
         frag.fragmentOffset, frag.fragmentOffset + frag.fragmentLength - 1, overhead / size, pn);
   }
 }
Esempio n. 4
0
 static {
   Logger.minor(NativeThread.class, "Running init()");
   // Loading the NativeThread library isn't useful on macos
   boolean maybeLoadNative =
       ("Linux".equalsIgnoreCase(System.getProperty("os.name")))
           && (NodeStarter.extBuildNumber > 18);
   Logger.debug(NativeThread.class, "Run init(): should loadNative=" + maybeLoadNative);
   if (maybeLoadNative && LibraryLoader.loadNative("/freenet/support/io/", "NativeThread")) {
     NATIVE_PRIORITY_BASE = getLinuxPriority();
     NATIVE_PRIORITY_RANGE = 20 - NATIVE_PRIORITY_BASE;
     System.out.println(
         "Using the NativeThread implementation (base nice level is "
             + NATIVE_PRIORITY_BASE
             + ')');
     // they are 3 main prio levels
     HAS_THREE_NICE_LEVELS = NATIVE_PRIORITY_RANGE >= 3;
     HAS_ENOUGH_NICE_LEVELS = NATIVE_PRIORITY_RANGE >= ENOUGH_NICE_LEVELS;
     HAS_PLENTY_NICE_LEVELS = NATIVE_PRIORITY_RANGE >= JAVA_PRIORITY_RANGE;
     if (!(HAS_ENOUGH_NICE_LEVELS && HAS_THREE_NICE_LEVELS))
       System.err.println(
           "WARNING!!! The JVM has been niced down to a level which won't allow it to schedule threads properly! LOWER THE NICE LEVEL!!");
     _loadNative = true;
   } else {
     // unused anyway
     NATIVE_PRIORITY_BASE = 0;
     NATIVE_PRIORITY_RANGE = 19;
     HAS_THREE_NICE_LEVELS = true;
     HAS_ENOUGH_NICE_LEVELS = true;
     HAS_PLENTY_NICE_LEVELS = true;
     _loadNative = false;
   }
   Logger.minor(NativeThread.class, "Run init(): _loadNative = " + _loadNative);
 }
  @SuppressWarnings("unchecked")
  private void garbageCollectIdentities() {
    final MessageManager messageManager = mFreetalk.getMessageManager();
    final PersistentTaskManager taskManager = mFreetalk.getTaskManager();

    synchronized (this) {
      if (mIdentityFetchInProgress
          || mOwnIdentityFetchInProgress
          || mLastIdentityFetchTime == 0
          || mLastOwnIdentityFetchTime == 0) return;

      /* Executing the thread loop once will always take longer than THREAD_PERIOD. Therefore, if we set the limit to 3*THREAD_PERIOD,
       * it will hit identities which were last received before more than 2*THREAD_LOOP, not exactly 3*THREAD_LOOP. */
      long lastAcceptTime =
          Math.min(mLastIdentityFetchTime, mLastOwnIdentityFetchTime) - GARBAGE_COLLECT_DELAY;
      lastAcceptTime =
          Math.max(
              lastAcceptTime,
              0); // This is not really needed but a time less than 0 does not make sense.;

      Query q = db.query();
      q.constrain(WoTIdentity.class);
      q.descend("mLastReceivedFromWoT").constrain(lastAcceptTime).smaller();
      ObjectSet<WoTIdentity> result = q.execute();

      for (WoTIdentity identity : result) {
        identity.initializeTransient(mFreetalk);
        Logger.debug(this, "Garbage collecting identity " + identity);
        deleteIdentity(identity, messageManager, taskManager);
      }

      if (mShortestUniqueNicknameCacheNeedsUpdate) updateShortestUniqueNicknameCache();
    }
  }
  public void start() {
    Logger.debug(this, "Starting...");

    deleteDuplicateIdentities();

    mTicker.queueTimedJob(this, "Freetalk " + this.getClass().getSimpleName(), 0, false, true);

    // TODO: Queue this as a job aswell.
    try {
      updateShortestUniqueNicknameCache();
    } catch (Exception e) {
      Logger.error(this, "Initializing shortest unique nickname cache failed", e);
    }

    Logger.debug(this, "Started.");
  }
Esempio n. 7
0
 public FreenetURI(FreenetURI uri) {
   //		this.uniqueHashCode = super.hashCode();
   if (uri.keyType == null) throw new NullPointerException();
   keyType = uri.keyType;
   docName = uri.docName;
   if (uri.metaStr != null) {
     metaStr = new String[uri.metaStr.length];
     System.arraycopy(uri.metaStr, 0, metaStr, 0, metaStr.length);
   } else metaStr = null;
   if (uri.routingKey != null) {
     routingKey = new byte[uri.routingKey.length];
     System.arraycopy(uri.routingKey, 0, routingKey, 0, routingKey.length);
   } else routingKey = null;
   if (uri.cryptoKey != null) {
     cryptoKey = new byte[uri.cryptoKey.length];
     System.arraycopy(uri.cryptoKey, 0, cryptoKey, 0, cryptoKey.length);
   } else cryptoKey = null;
   if (uri.extra != null) {
     extra = new byte[uri.extra.length];
     System.arraycopy(uri.extra, 0, extra, 0, extra.length);
   } else extra = null;
   this.suggestedEdition = uri.suggestedEdition;
   if (logDEBUG)
     Logger.debug(
         this, "Copied: " + toString() + " from " + uri.toString(), new Exception("debug"));
 }
 public boolean objectCanNew(ObjectContainer container) {
   if (finished) {
     Logger.error(this, "objectCanNew when already finished on " + this);
     return false;
   }
   if (logDEBUG) Logger.debug(this, "objectCanNew() on " + this, new Exception("debug"));
   return true;
 }
 /**
  * Add urgent messages, then non-urgent messages. Add a load message if need to.
  *
  * @param size
  * @param minSize
  * @param maxSize
  * @param now
  * @param messages
  * @return
  */
 int addPriorityMessages(
     int size,
     int minSize,
     int maxSize,
     long now,
     ArrayList<MessageItem> messages,
     MutableBoolean incomplete) {
   synchronized (PeerMessageQueue.this) {
     // Urgent messages first.
     if (logMINOR) {
       int nonEmpty = nonEmptyItemsWithID == null ? 0 : nonEmptyItemsWithID.size();
       int empty = emptyItemsWithID == null ? 0 : emptyItemsWithID.size();
       int byID = itemsByID == null ? 0 : itemsByID.size();
       if (nonEmpty + empty < byID) {
         Logger.error(
             this,
             "Leaking itemsByID? non empty = "
                 + nonEmpty
                 + " empty = "
                 + empty
                 + " by ID = "
                 + byID
                 + " on "
                 + this);
       } else if (logDEBUG)
         Logger.debug(
             this,
             "Items: non empty "
                 + nonEmpty
                 + " empty "
                 + empty
                 + " by ID "
                 + byID
                 + " on "
                 + this);
     }
     moveToUrgent(now);
     clearOldNonUrgent(now);
     size = addUrgentMessages(size, minSize, maxSize, now, messages);
     if (size < 0) {
       size = -size;
       incomplete.value = true;
       return size;
     }
     // If no more urgent messages, try to add some non-urgent messages too.
     size = addNonUrgentMessages(size, minSize, maxSize, now, messages);
     if (size < 0) {
       size = -size;
       incomplete.value = true;
     }
     return size;
   }
 }
 @Override
 public synchronized void onSuccess(BaseClientPutter state, ObjectContainer container) {
   try {
     if (logDEBUG)
       Logger.debug(this, "Successfully inserted WoTOwnMessageList at " + state.getURI());
     mMessageManager.onMessageListInsertSucceeded(state.getURI());
   } catch (Exception e) {
     Logger.error(this, "WoTOwnMessageList insert succeeded but onSuccess() failed", e);
   } finally {
     removeInsert(state);
     Closer.close(((ClientPutter) state).getData());
   }
 }
 private void moveToUrgent(long now) {
   if (itemsNonUrgent == null) return;
   ListIterator<MessageItem> it = itemsNonUrgent.listIterator();
   int moved = 0;
   while (it.hasNext()) {
     MessageItem item = it.next();
     if (item.submitted + PacketSender.MAX_COALESCING_DELAY <= now) {
       // Move to urgent list
       long id = item.getID();
       Items list;
       if (itemsByID == null) {
         itemsByID = new HashMap<Long, Items>();
         if (nonEmptyItemsWithID == null)
           nonEmptyItemsWithID = new DoublyLinkedListImpl<Items>();
         list = new Items(id);
         nonEmptyItemsWithID.push(list);
         itemsByID.put(id, list);
       } else {
         list = itemsByID.get(id);
         if (list == null) {
           list = new Items(id);
           if (nonEmptyItemsWithID == null)
             nonEmptyItemsWithID = new DoublyLinkedListImpl<Items>();
           // In order to ensure fairness, we add it at the beginning.
           // addLast() is typically called by sendAsync().
           // If there are later items they are probably block transfers that are
           // already in progress; it is fairer to send the new item first.
           nonEmptyItemsWithID.unshift(list);
           itemsByID.put(id, list);
         } else {
           if (list.items.isEmpty()) {
             assert (list.getParent() == emptyItemsWithID);
             // It already exists, so it has a valid time.
             // Which is probably in the past, so use Forward.
             moveFromEmptyToNonEmptyForward(list);
           } else {
             assert (list.getParent() == nonEmptyItemsWithID);
           }
         }
       }
       list.addLast(item);
       it.remove();
       moved++;
     } else {
       if (logDEBUG && moved > 0)
         Logger.debug(this, "Moved " + moved + " items to urgent round-robin");
       return;
     }
   }
 }
Esempio n. 12
0
  /**
   * Loads an existing Config object from the database and adds any missing default values to it,
   * creates and stores a new one if none exists.
   *
   * @return The config object.
   */
  public static Config loadOrCreate(WoT myWoT) {
    ExtObjectContainer db = myWoT.getDB();
    synchronized (db.lock()) {
      Config config;
      ObjectSet<Config> result = db.queryByExample(Config.class);

      if (result.size() == 0) {
        Logger.debug(myWoT, "Creating new Config...");
        config = new Config(myWoT);
        config.storeAndCommit();
      } else {
        if (result.size() > 1) /* Do not throw, we do not want to prevent WoT from starting up. */
          Logger.error(myWoT, "Multiple config objects stored!");

        Logger.debug(myWoT, "Loaded config.");
        config = result.next();
        config.initializeTransient(myWoT);
        config.setDefaultValues(false);
      }

      return config;
    }
  }
Esempio n. 13
0
 public ToadletContextImpl(
     Socket sock,
     MultiValueTable<String, String> headers,
     BucketFactory bf,
     PageMaker pageMaker,
     ToadletContainer container)
     throws IOException {
   this.headers = headers;
   this.closed = false;
   sockOutputStream = sock.getOutputStream();
   remoteAddr = sock.getInetAddress();
   if (Logger.shouldLog(Logger.DEBUG, this)) Logger.debug(this, "Connection from " + remoteAddr);
   this.bf = bf;
   this.pagemaker = pageMaker;
   this.container = container;
 }
Esempio n. 14
0
 private void realRun(DatagramPacket packet) {
   // Single receiving thread
   boolean gotPacket = getPacket(packet);
   long now = System.currentTimeMillis();
   if (gotPacket) {
     long startTime = System.currentTimeMillis();
     Peer peer = new Peer(packet.getAddress(), packet.getPort());
     tracker.receivedPacketFrom(peer);
     long endTime = System.currentTimeMillis();
     if (endTime - startTime > 50) {
       if (endTime - startTime > 3000) {
         Logger.error(this, "packet creation took " + (endTime - startTime) + "ms");
       } else {
         if (logMINOR) Logger.minor(this, "packet creation took " + (endTime - startTime) + "ms");
       }
     }
     byte[] data = packet.getData();
     int offset = packet.getOffset();
     int length = packet.getLength();
     try {
       if (logMINOR) Logger.minor(this, "Processing packet of length " + length + " from " + peer);
       startTime = System.currentTimeMillis();
       lowLevelFilter.process(data, offset, length, peer, now);
       endTime = System.currentTimeMillis();
       if (endTime - startTime > 50) {
         if (endTime - startTime > 3000) {
           Logger.error(this, "processing packet took " + (endTime - startTime) + "ms");
         } else {
           if (logMINOR)
             Logger.minor(this, "processing packet took " + (endTime - startTime) + "ms");
         }
       }
       if (logMINOR) Logger.minor(this, "Successfully handled packet length " + length);
     } catch (Throwable t) {
       Logger.error(this, "Caught " + t + " from " + lowLevelFilter, t);
     }
   } else {
     if (logDEBUG) Logger.debug(this, "No packet received");
   }
 }
 private void clearOldNonUrgent(long now) {
   int removed = 0;
   if (emptyItemsWithID == null) return;
   while (true) {
     if (emptyItemsWithID.isEmpty()) return;
     Items list = emptyItemsWithID.head();
     if (!list.items.isEmpty()) {
       // FIXME remove paranoia
       Logger.error(this, "List with items in emptyItemsWithID!!");
       emptyItemsWithID.remove(list);
       addToNonEmptyBackward(list);
       return;
     }
     if (list.timeLastSent == -1 || now - list.timeLastSent > FORGET_AFTER) {
       // FIXME: Urgh, what a braindead API! remove(Object) on a Map<Long, Items> !?!?!?!
       // Anyway we'd better check the return value!
       Items old = itemsByID.remove(list.id);
       if (old == null)
         Logger.error(this, "List was not in the items by ID tracker: " + list.id);
       else if (old != list)
         Logger.error(
             this,
             "Different list in the items by ID tracker: "
                 + old
                 + " not "
                 + list
                 + " for "
                 + list.id);
       emptyItemsWithID.remove(list);
       removed++;
     } else {
       if (logDEBUG && removed > 0)
         Logger.debug(this, "Removed " + removed + " old empty UID trackers");
       break;
     }
   }
 }
  /**
   * You have to synchronize on this <code>WoTMessageListInserter</code> and then on the <code>
   * WoTMessageManager</code> when using this function.
   */
  private void insertMessageList(WoTOwnMessageList list)
      throws TransformerException, ParserConfigurationException, NoSuchMessageException,
          IOException, InsertException {
    Bucket tempB = mTBF.makeBucket(4096); /* TODO: set to a reasonable value */
    OutputStream os = null;

    try {
      os = tempB.getOutputStream();
      // This is what requires synchronization on the WoTMessageManager: While being marked as
      // "being inserted", message lists cannot be modified anymore,
      // so it must be guranteed that the "being inserted" mark does not change while we encode the
      // XML etc.
      mMessageManager.onMessageListInsertStarted(list);

      mXML.encode(mMessageManager, list, os);
      os.close();
      os = null;
      tempB.setReadOnly();

      /* We do not specifiy a ClientMetaData with mimetype because that would result in the insertion of an additional CHK */
      InsertBlock ib = new InsertBlock(tempB, null, list.getInsertURI());
      InsertContext ictx = mClient.getInsertContext(true);

      ClientPutter pu =
          mClient.insert(
              ib, false, null, false, ictx, this, RequestStarter.INTERACTIVE_PRIORITY_CLASS);
      addInsert(pu);
      tempB = null;

      if (logDEBUG)
        Logger.debug(this, "Started insert of WoTOwnMessageList at request URI " + list.getURI());
    } finally {
      if (tempB != null) tempB.free();
      Closer.close(os);
    }
  }
  @SuppressWarnings("unchecked")
  private void parseIdentities(SimpleFieldSet params, boolean bOwnIdentities) {
    if (bOwnIdentities) Logger.normal(this, "Parsing received own identities...");
    else Logger.normal(this, "Parsing received identities...");

    int idx;
    int ignoredCount = 0;
    int newCount = 0;

    for (idx = 0; ; idx++) {
      String identityID = params.get("Identity" + idx);
      if (identityID == null
          || identityID.equals("")) /* TODO: Figure out whether the second condition is necessary */
        break;
      String requestURI = params.get("RequestURI" + idx);
      String insertURI = bOwnIdentities ? params.get("InsertURI" + idx) : null;
      String nickname = params.get("Nickname" + idx);

      if (nickname == null || nickname.length() == 0) {
        // If an identity publishes an invalid nickname in one of its first WoT inserts then WoT
        // will return an empty
        // nickname for that identity until a new XML was published with a valid nickname. We ignore
        // the identity until
        // then to prevent confusing error logs.
        // TODO: Maybe handle this in WoT. Would require checks in many places though.
        continue;
      }

      synchronized (this) {
          /* We lock here and not during the whole function to allow other threads to execute */
        Query q = db.query(); // TODO: Encapsulate the query in a function...
        q.constrain(WoTIdentity.class);
        q.descend("mID").constrain(identityID);
        ObjectSet<WoTIdentity> result = q.execute();
        WoTIdentity id = null;

        if (result.size() == 0) {
          try {
            importIdentity(bOwnIdentities, identityID, requestURI, insertURI, nickname);
            ++newCount;
          } catch (Exception e) {
            Logger.error(this, "Importing a new identity failed.", e);
          }
        } else {
          Logger.debug(this, "Not importing already existing identity " + requestURI);
          ++ignoredCount;

          assert (result.size() == 1);
          id = result.next();
          id.initializeTransient(mFreetalk);

          if (bOwnIdentities != (id instanceof WoTOwnIdentity)) {
            // The type of the identity changed so we need to delete and re-import it.

            try {
              Logger.normal(this, "Identity type changed, replacing it: " + id);
              // We MUST NOT take the following locks because deleteIdentity does other locks
              // (MessageManager/TaskManager) which must happen before...
              // synchronized(id)
              // synchronized(db.lock())
              deleteIdentity(id, mFreetalk.getMessageManager(), mFreetalk.getTaskManager());
              importIdentity(bOwnIdentities, identityID, requestURI, insertURI, nickname);
            } catch (Exception e) {
              Logger.error(this, "Replacing a WoTIdentity with WoTOwnIdentity failed.", e);
            }

          } else { // Normal case: Update the last received time of the idefnt
            synchronized (id) {
              synchronized (db.lock()) {
                try {
                  // TODO: The thread sometimes takes hours to parse the identities and I don't know
                  // why.
                  // So right now its better to re-query the time for each identity.
                  id.setLastReceivedFromWoT(CurrentTimeUTC.getInMillis());
                  id.checkedCommit(this);
                } catch (Exception e) {
                  Persistent.checkedRollback(db, this, e);
                }
              }
            }
          }
        }
      }

      Thread.yield();
    }

    Logger.normal(
        this,
        "parseIdentities(bOwnIdentities=="
            + bOwnIdentities
            + " received "
            + idx
            + " identities. Ignored "
            + ignoredCount
            + "; New: "
            + newCount);
  }
 public void terminate() {
   Logger.debug(this, "Terminating ...");
   mTicker.shutdown();
   Logger.debug(this, "Terminated.");
 }
      @Override
      public void startElement(
          String nameSpaceURI, String localName, String rawName, Attributes attrs)
          throws SAXException {

        if (idToFileMatches.isEmpty()) return;
        if (rawName == null) {
          rawName = localName;
        }
        String elt_name = rawName;

        if (elt_name.equals("files")) {
          String fileCount = attrs.getValue("", "totalFileCount");
          if (fileCount != null) totalFileCount = Integer.parseInt(fileCount);
          Logger.minor(this, "totalfilecount = " + totalFileCount);
        }

        if (elt_name.equals("file")) {
          try {
            String id = attrs.getValue("id");

            ArrayList<FileMatch> matches = idToFileMatches.get(id);

            if (matches != null) {

              for (FileMatch match : matches) {

                String key = attrs.getValue("key");
                int l = attrs.getLength();
                String title = null;
                int wordCount = -1;
                if (l >= 3) {
                  try {
                    title = attrs.getValue("title");
                  } catch (Exception e) {
                    Logger.error(this, "Index Format not compatible " + e.toString(), e);
                  }
                  try {
                    String wordCountString = attrs.getValue("wordCount");
                    if (wordCountString != null) {
                      wordCount = Integer.parseInt(attrs.getValue("wordCount"));
                    }
                  } catch (Exception e) {
                    // Logger.minor(this, "No wordcount found " + e.toString(), e);
                  }
                }

                for (FindRequest req : match.word.searches) {

                  Set<TermPageEntry> result = req.getUnfinishedResult();
                  float relevance = 0;

                  if (logDEBUG)
                    Logger.debug(
                        this,
                        "termcount "
                            + (match.termpositions == null ? 0 : match.termpositions.size())
                            + " filewordcount = "
                            + wordCount);
                  if (match.termpositions != null
                      && match.termpositions.size() > 0
                      && wordCount > 0) {
                    relevance = (float) (match.termpositions.size() / (float) wordCount);
                    if (totalFileCount > 0 && match.word.inWordFileCount > 0)
                      relevance *=
                          Math.log((float) totalFileCount / (float) match.word.inWordFileCount);
                    if (logDEBUG)
                      Logger.debug(
                          this, "Set relevance of " + title + " to " + relevance + " - " + key);
                  }

                  TermPageEntry pageEntry =
                      new TermPageEntry(
                          req.getSubject(),
                          relevance,
                          new FreenetURI(key),
                          title,
                          match.termpositions);
                  result.add(pageEntry);
                  // Logger.minor(this, "added "+inFileURI+ " to "+ match);
                }
              }
            }

          } catch (Exception e) {
            Logger.error(
                this, "File id and key could not be retrieved. May be due to format clash", e);
          }
        }
      }
  /**
   * Imports a identity XML file into the given web of trust. This includes: - The identity itself
   * and its attributes - The trust list of the identity, if it has published one in the XML.
   *
   * @param xmlInputStream The input stream containing the XML.
   */
  public void importIdentity(FreenetURI identityURI, InputStream xmlInputStream) {
    try { // Catch import problems so we can mark the edition as parsing failed
      // We first parse the XML without synchronization, then do the synchronized import into the
      // WebOfTrust
      final ParsedIdentityXML xmlData = parseIdentityXML(xmlInputStream);

      synchronized (mWoT) {
        synchronized (mWoT.getIdentityFetcher()) {
          final Identity identity = mWoT.getIdentityByURI(identityURI);

          Logger.normal(this, "Importing parsed XML for " + identity);

          long newEdition = identityURI.getEdition();
          if (identity.getEdition() > newEdition) {
            if (logDEBUG)
              Logger.debug(
                  this,
                  "Fetched an older edition: current == "
                      + identity.getEdition()
                      + "; fetched == "
                      + identityURI.getEdition());
            return;
          } else if (identity.getEdition() == newEdition) {
            if (identity.getCurrentEditionFetchState() == FetchState.Fetched) {
              if (logDEBUG)
                Logger.debug(
                    this,
                    "Fetched current edition which is marked as fetched already, not importing: "
                        + identityURI);
              return;
            } else if (identity.getCurrentEditionFetchState() == FetchState.ParsingFailed) {
              Logger.normal(
                  this,
                  "Re-fetched current-edition which was marked as parsing failed: " + identityURI);
            }
          }

          // We throw parse errors AFTER checking the edition number: If this XML was outdated
          // anyway, we don't have to throw.
          if (xmlData.parseError != null) throw xmlData.parseError;

          synchronized (Persistent.transactionLock(mDB)) {
            try { // Transaction rollback block
              identity.setEdition(
                  newEdition); // The identity constructor only takes the edition number as a hint,
                               // so we must store it explicitly.
              boolean didPublishTrustListPreviously = identity.doesPublishTrustList();
              identity.setPublishTrustList(xmlData.identityPublishesTrustList);

              try {
                identity.setNickname(xmlData.identityName);
              } catch (Exception e) {
                /* Nickname changes are not allowed, ignore them... */
                Logger.warning(this, "setNickname() failed.", e);
              }

              try {
                  /* Failure of context importing should not make an identity disappear, therefore we catch exceptions. */
                identity.setContexts(xmlData.identityContexts);
              } catch (Exception e) {
                Logger.warning(this, "setContexts() failed.", e);
              }

              try {
                  /* Failure of property importing should not make an identity disappear, therefore we catch exceptions. */
                identity.setProperties(xmlData.identityProperties);
              } catch (Exception e) {
                Logger.warning(this, "setProperties() failed", e);
              }

              mWoT
                  .beginTrustListImport(); // We delete the old list if !identityPublishesTrustList
                                           // and it did publish one earlier => we always call this.

              if (xmlData.identityPublishesTrustList) {
                // We import the trust list of an identity if it's score is equal to 0, but we only
                // create new identities or import edition hints
                // if the score is greater than 0. Solving a captcha therefore only allows you to
                // create one single identity.
                boolean positiveScore = false;
                boolean hasCapacity = false;

                // TODO: getBestScore/getBestCapacity should always yield a positive result because
                // we store a positive score object for an OwnIdentity
                // upon creation. The only case where it could not exist might be
                // restoreOwnIdentity() ... check that. If it is created there as well,
                // remove the additional check here.
                if (identity instanceof OwnIdentity) {
                  // Importing of OwnIdentities is always allowed
                  positiveScore = true;
                  hasCapacity = true;
                } else {
                  try {
                    positiveScore = mWoT.getBestScore(identity) > 0;
                    hasCapacity = mWoT.getBestCapacity(identity) > 0;
                  } catch (NotInTrustTreeException e) {
                  }
                }

                HashSet<String> identitiesWithUpdatedEditionHint = null;

                if (positiveScore) {
                  identitiesWithUpdatedEditionHint =
                      new HashSet<String>(xmlData.identityTrustList.size() * 2);
                }

                for (final ParsedIdentityXML.TrustListEntry trustListEntry :
                    xmlData.identityTrustList) {
                  final FreenetURI trusteeURI = trustListEntry.mTrusteeURI;
                  final byte trustValue = trustListEntry.mTrustValue;
                  final String trustComment = trustListEntry.mTrustComment;

                  Identity trustee = null;
                  try {
                    trustee = mWoT.getIdentityByURI(trusteeURI);
                    if (positiveScore) {
                      if (trustee.setNewEditionHint(trusteeURI.getEdition())) {
                        identitiesWithUpdatedEditionHint.add(trustee.getID());
                        trustee.storeWithoutCommit();
                      }
                    }
                  } catch (UnknownIdentityException e) {
                    if (hasCapacity) {
                        /* We only create trustees if the truster has capacity to rate them. */
                      try {
                        trustee = new Identity(mWoT, trusteeURI, null, false);
                        trustee.storeWithoutCommit();
                      } catch (MalformedURLException urlEx) {
                        // Logging the exception does NOT log the actual malformed URL so we do it
                        // manually.
                        Logger.warning(
                            this, "Received malformed identity URL: " + trusteeURI, urlEx);
                        throw urlEx;
                      }
                    }
                  }

                  if (trustee != null)
                    mWoT.setTrustWithoutCommit(identity, trustee, trustValue, trustComment);
                }

                for (Trust trust :
                    mWoT.getGivenTrustsOfDifferentEdition(identity, identityURI.getEdition())) {
                  mWoT.removeTrustWithoutCommit(trust);
                }

                IdentityFetcher identityFetcher = mWoT.getIdentityFetcher();
                if (positiveScore) {
                  for (String id : identitiesWithUpdatedEditionHint)
                    identityFetcher.storeUpdateEditionHintCommandWithoutCommit(id);

                  // We do not have to store fetch commands for new identities here,
                  // setTrustWithoutCommit does it.
                }
              } else if (!xmlData.identityPublishesTrustList
                  && didPublishTrustListPreviously
                  && !(identity instanceof OwnIdentity)) {
                // If it does not publish a trust list anymore, we delete all trust values it has
                // given.
                for (Trust trust : mWoT.getGivenTrusts(identity))
                  mWoT.removeTrustWithoutCommit(trust);
              }

              mWoT.finishTrustListImport();
              identity.onFetched(); // Marks the identity as parsed successfully
              identity.storeAndCommit();
            } catch (Exception e) {
              mWoT.abortTrustListImport(e, Logger.LogLevel.WARNING); // Does the rollback
              throw e;
            } // try
          } // synchronized(Persistent.transactionLock(db))

          Logger.normal(this, "Finished XML import for " + identity);
        } // synchronized(mWoT)
      } // synchronized(mWoT.getIdentityFetcher())
    } // try
    catch (Exception e) {
      synchronized (mWoT) {
        synchronized (mWoT.getIdentityFetcher()) {
          try {
            final Identity identity = mWoT.getIdentityByURI(identityURI);
            final long newEdition = identityURI.getEdition();
            if (identity.getEdition() <= newEdition) {
              Logger.normal(this, "Marking edition as parsing failed: " + identityURI);
              try {
                identity.setEdition(newEdition);
              } catch (InvalidParameterException e1) {
                // Would only happen if newEdition < current edition.
                // We have validated the opposite.
                throw new RuntimeException(e1);
              }
              identity.onParsingFailed();
              identity.storeAndCommit();
            } else {
              Logger.normal(
                  this,
                  "Not marking edition as parsing failed, we have already fetched a new one ("
                      + identity.getEdition()
                      + "):"
                      + identityURI);
            }
            Logger.normal(this, "Parsing identity XML failed gracefully for " + identityURI, e);
          } catch (UnknownIdentityException uie) {
            Logger.error(this, "Fetched an unknown identity: " + identityURI);
          }
        }
      }
    }
  }
 private int addNonUrgentMessages(
     int size, int minSize, int maxSize, long now, ArrayList<MessageItem> messages) {
   assert (size >= 0);
   assert (minSize >= 0);
   assert (maxSize >= minSize);
   if (size < 0) size = -size; // FIXME remove extra paranoia
   if (itemsNonUrgent == null) return size;
   int added = 0;
   for (ListIterator<MessageItem> items = itemsNonUrgent.listIterator(); items.hasNext(); ) {
     MessageItem item = items.next();
     int thisSize = item.getLength();
     boolean oversize = false;
     if (size + 2 + thisSize > maxSize) {
       if (size == minSize) {
         // Won't fit regardless, send it on its own.
         oversize = true;
       } else {
         // Send what we have so far.
         if (logDEBUG && added != 0)
           Logger.debug(
               this,
               "Returning with "
                   + added
                   + " non-urgent messages (have more but they don't fit)");
         return -size;
       }
     }
     size += 2 + thisSize;
     items.remove();
     messages.add(item);
     if (itemsByID != null) {
       long id = item.getID();
       Items tracker = itemsByID.get(id);
       if (tracker != null) {
         tracker.timeLastSent = now;
         DoublyLinkedList<? super Items> parent = tracker.getParent();
         // Demote the corresponding tracker to maintain round-robin.
         if (tracker.items.isEmpty()) {
           if (emptyItemsWithID == null) emptyItemsWithID = new DoublyLinkedListImpl<Items>();
           if (parent == null) {
             Logger.error(this, "Tracker is in itemsByID but not in either list! (empty)");
           } else if (parent == emptyItemsWithID) {
             // Normal. Remove it so we can re-add it in the right place.
             emptyItemsWithID.remove(tracker);
           } else if (parent == nonEmptyItemsWithID) {
             Logger.error(this, "Tracker is in non empty items list when is empty");
             nonEmptyItemsWithID.remove(tracker);
           } else assert (false);
           addToEmptyBackward(tracker);
         } else {
           if (nonEmptyItemsWithID == null)
             nonEmptyItemsWithID = new DoublyLinkedListImpl<Items>();
           if (parent == null) {
             Logger.error(this, "Tracker is in itemsByID but not in either list! (non-empty)");
           } else if (parent == nonEmptyItemsWithID) {
             // Normal. Remove it so we can re-add it in the right place.
             nonEmptyItemsWithID.remove(tracker);
           } else if (parent == emptyItemsWithID) {
             Logger.error(this, "Tracker is in empty items list when is non-empty");
             emptyItemsWithID.remove(tracker);
           } else assert (false);
           addToNonEmptyBackward(tracker);
         }
       }
     }
     added++;
     if (oversize) {
       if (logDEBUG) Logger.debug(this, "Returning with non-urgent oversize message");
       return size;
     }
   }
   if (logDEBUG && added != 0)
     Logger.debug(this, "Returning with " + added + " non-urgent messages (all gone)");
   return size;
 }
Esempio n. 22
0
  public boolean handleMessage(Message m) {
    PeerNode source = (PeerNode) m.getSource();
    if (source == null) {
      // Node has been disconnected and garbage collected already! Ouch.
      return true;
    }
    if (logMINOR) Logger.minor(this, "Dispatching " + m + " from " + source);
    if (callback != null) {
      try {
        callback.snoop(m, node);
      } catch (Throwable t) {
        Logger.error(this, "Callback threw " + t, t);
      }
    }
    MessageType spec = m.getSpec();
    if (spec == DMT.FNPPing) {
      // Send an FNPPong
      Message reply = DMT.createFNPPong(m.getInt(DMT.PING_SEQNO));
      try {
        source.sendAsync(reply, null, pingCounter); // nothing we can do if can't contact source
      } catch (NotConnectedException e) {
        if (logMINOR) Logger.minor(this, "Lost connection replying to " + m);
      }
      return true;
    } else if (spec == DMT.FNPStoreSecret) {
      return node.netid.handleStoreSecret(m);
    } else if (spec == DMT.FNPSecretPing) {
      return node.netid.handleSecretPing(m);
    } else if (spec == DMT.FNPDetectedIPAddress) {
      Peer p = (Peer) m.getObject(DMT.EXTERNAL_ADDRESS);
      source.setRemoteDetectedPeer(p);
      node.ipDetector.redetectAddress();
      return true;
    } else if (spec == DMT.FNPTime) {
      return handleTime(m, source);
    } else if (spec == DMT.FNPUptime) {
      return handleUptime(m, source);
    } else if (spec == DMT.FNPSentPackets) {
      source.handleSentPackets(m);
      return true;
    } else if (spec == DMT.FNPVoid) {
      return true;
    } else if (spec == DMT.FNPDisconnect) {
      handleDisconnect(m, source);
      return true;
    } else if (spec == DMT.nodeToNodeMessage) {
      node.receivedNodeToNodeMessage(m, source);
      return true;
    } else if (spec == DMT.UOMAnnounce && source.isRealConnection()) {
      return node.nodeUpdater.uom.handleAnnounce(m, source);
    } else if (spec == DMT.UOMRequestRevocation && source.isRealConnection()) {
      return node.nodeUpdater.uom.handleRequestRevocation(m, source);
    } else if (spec == DMT.UOMSendingRevocation && source.isRealConnection()) {
      return node.nodeUpdater.uom.handleSendingRevocation(m, source);
    } else if (spec == DMT.UOMRequestMain && source.isRealConnection()) {
      return node.nodeUpdater.uom.handleRequestJar(m, source, false);
    } else if (spec == DMT.UOMRequestExtra && source.isRealConnection()) {
      return node.nodeUpdater.uom.handleRequestJar(m, source, true);
    } else if (spec == DMT.UOMSendingMain && source.isRealConnection()) {
      return node.nodeUpdater.uom.handleSendingMain(m, source);
    } else if (spec == DMT.UOMSendingExtra && source.isRealConnection()) {
      return node.nodeUpdater.uom.handleSendingExt(m, source);
    } else if (spec == DMT.FNPOpennetAnnounceRequest) {
      return handleAnnounceRequest(m, source);
    } else if (spec == DMT.FNPRoutingStatus) {
      if (source instanceof DarknetPeerNode) {
        boolean value = m.getBoolean(DMT.ROUTING_ENABLED);
        if (logMINOR)
          Logger.minor(this, "The peer (" + source + ") asked us to set routing=" + value);
        ((DarknetPeerNode) source).setRoutingStatus(value, false);
      }
      // We claim it in any case
      return true;
    } else if (source.isRealConnection() && spec == DMT.FNPLocChangeNotificationNew) {
      double newLoc = m.getDouble(DMT.LOCATION);
      ShortBuffer buffer = ((ShortBuffer) m.getObject(DMT.PEER_LOCATIONS));
      double[] locs = Fields.bytesToDoubles(buffer.getData());

      /**
       * Do *NOT* remove the sanity check below!
       *
       * @see http://archives.freenetproject.org/message/20080718.144240.359e16d3.en.html
       */
      if ((OpennetManager.MAX_PEERS_FOR_SCALING < locs.length) && (source.isOpennet())) {
        if (locs.length > OpennetManager.PANIC_MAX_PEERS) {
          // This can't happen by accident
          Logger.error(
              this,
              "We received "
                  + locs.length
                  + " locations from "
                  + source.toString()
                  + "! That should *NOT* happen! Possible attack!");
          source.forceDisconnect(true);
          return true;
        } else {
          // A few extra can happen by accident. Just use the first 20.
          Logger.normal(
              this,
              "Too many locations from "
                  + source.toString()
                  + " : "
                  + locs.length
                  + " could be an accident, using the first "
                  + OpennetManager.MAX_PEERS_FOR_SCALING);
          double[] firstLocs = new double[OpennetManager.MAX_PEERS_FOR_SCALING];
          System.arraycopy(locs, 0, firstLocs, 0, OpennetManager.MAX_PEERS_FOR_SCALING);
          locs = firstLocs;
        }
      }
      // We are on darknet and we trust our peers OR we are on opennet
      // and the amount of locations sent to us seems reasonable
      source.updateLocation(newLoc, locs);

      return true;
    }

    if (!source.isRoutable()) return false;
    if (logDEBUG) Logger.debug(this, "Not routable");

    if (spec == DMT.FNPNetworkID) {
      source.handleFNPNetworkID(m);
      return true;
    } else if (spec == DMT.FNPSwapRequest) {
      return node.lm.handleSwapRequest(m, source);
    } else if (spec == DMT.FNPSwapReply) {
      return node.lm.handleSwapReply(m, source);
    } else if (spec == DMT.FNPSwapRejected) {
      return node.lm.handleSwapRejected(m, source);
    } else if (spec == DMT.FNPSwapCommit) {
      return node.lm.handleSwapCommit(m, source);
    } else if (spec == DMT.FNPSwapComplete) {
      return node.lm.handleSwapComplete(m, source);
    } else if (spec == DMT.FNPCHKDataRequest) {
      return handleDataRequest(m, source, false);
    } else if (spec == DMT.FNPSSKDataRequest) {
      return handleDataRequest(m, source, true);
    } else if (spec == DMT.FNPInsertRequest) {
      return handleInsertRequest(m, source, false);
    } else if (spec == DMT.FNPSSKInsertRequest) {
      return handleInsertRequest(m, source, true);
    } else if (spec == DMT.FNPSSKInsertRequestNew) {
      return handleInsertRequest(m, source, true);
    } else if (spec == DMT.FNPRHProbeRequest) {
      return handleProbeRequest(m, source);
    } else if (spec == DMT.FNPRoutedPing) {
      return handleRouted(m, source);
    } else if (spec == DMT.FNPRoutedPong) {
      return handleRoutedReply(m);
    } else if (spec == DMT.FNPRoutedRejected) {
      return handleRoutedRejected(m);
      // FIXME implement threaded probe requests of various kinds.
      // Old probe request code was a major pain, never really worked.
      // We should have threaded probe requests (for simple code),
      // and one for each routing strategy.
      //		} else if(spec == DMT.FNPProbeRequest) {
      //			return handleProbeRequest(m, source);
      //		} else if(spec == DMT.FNPProbeReply) {
      //			return handleProbeReply(m, source);
      //		} else if(spec == DMT.FNPProbeRejected) {
      //			return handleProbeRejected(m, source);
      //		} else if(spec == DMT.FNPProbeTrace) {
      //			return handleProbeTrace(m, source);
    } else if (spec == DMT.FNPOfferKey) {
      return handleOfferKey(m, source);
    } else if (spec == DMT.FNPGetOfferedKey) {
      return handleGetOfferedKey(m, source);
    }
    return false;
  }
Esempio n. 23
0
        @Override
        public void run() {
          if (logDEBUG) Logger.debug(this, "Running prefetch checker...");
          ArrayList<USK> toFetch = null;
          long now = System.currentTimeMillis();
          boolean empty = true;
          synchronized (USKManager.this) {
            for (Map.Entry<USK, Long> entry : temporaryBackgroundFetchersPrefetch.entrySet()) {
              empty = false;
              if (entry.getValue() > 0 && now - entry.getValue() >= PREFETCH_DELAY) {
                if (toFetch == null) toFetch = new ArrayList<USK>();
                USK clear = entry.getKey();
                long l = lookupLatestSlot(clear);
                if (lookupKnownGood(clear) < l) toFetch.add(clear.copy(l));
                entry.setValue(-1L); // Reset counter until new data comes in
              } else {
                if (logMINOR)
                  Logger.minor(
                      this, "Not prefetching: " + entry.getKey() + " : " + entry.getValue());
              }
            }
          }
          if (toFetch == null) return;
          for (final USK key : toFetch) {
            final long l = key.suggestedEdition;
            if (logMINOR)
              Logger.minor(
                  this, "Prefetching content for background fetch for edition " + l + " on " + key);
            FetchContext fctx =
                new FetchContext(realFetchContext, FetchContext.IDENTICAL_MASK, false, null);
            final ClientGetter get =
                new ClientGetter(
                    new ClientGetCallback() {

                      @Override
                      public void onFailure(
                          FetchException e, ClientGetter state, ObjectContainer container) {
                        if (e.newURI != null) {
                          if (logMINOR)
                            Logger.minor(this, "Prefetch succeeded with redirect for " + key);
                          updateKnownGood(key, l, context);
                          return;
                        } else {
                          if (logMINOR)
                            Logger.minor(this, "Prefetch failed later: " + e + " for " + key, e);
                          // Ignore
                        }
                      }

                      @Override
                      public void onSuccess(
                          FetchResult result, ClientGetter state, ObjectContainer container) {
                        if (logMINOR) Logger.minor(this, "Prefetch succeeded for " + key);
                        result.asBucket().free();
                        updateKnownGood(key, l, context);
                      }

                      @Override
                      public void onMajorProgress(ObjectContainer container) {
                        // Ignore
                      }
                    },
                    key.getURI().sskForUSK() /* FIXME add getSSKURI() */,
                    fctx,
                    RequestStarter.UPDATE_PRIORITY_CLASS,
                    rcBulk,
                    new NullBucket(),
                    null,
                    null);
            try {
              get.start(null, context);
            } catch (FetchException e) {
              if (logMINOR) Logger.minor(this, "Prefetch failed: " + e, e);
              // Ignore
            }
          }
          if (!empty) schedulePrefetchChecker();
        }
Esempio n. 24
0
 @Override
 void log(String s) {
   if (Logger.shouldLog(Logger.DEBUG, this)) Logger.debug(this, s);
 }
 /** Called by RequestStarter to find a request to run. */
 @Override
 public ChosenBlock grabRequest() {
   boolean needsRefill = true;
   while (true) {
     PersistentChosenRequest reqGroup = null;
     synchronized (starterQueue) {
       short bestPriority = Short.MAX_VALUE;
       for (PersistentChosenRequest req : starterQueue) {
         if (req.prio == RequestStarter.MINIMUM_PRIORITY_CLASS) {
           if (logDEBUG)
             Logger.debug(
                 this, "Ignoring paused persistent request: " + req + " prio: " + req.prio);
           continue; // Ignore paused requests
         }
         if (req.prio < bestPriority) {
           bestPriority = req.prio;
           reqGroup = req;
         }
       }
     }
     if (reqGroup != null) {
       // Try to find a better non-persistent request
       if (logMINOR)
         Logger.minor(this, "Persistent request: " + reqGroup + " prio " + reqGroup.prio);
       ChosenBlock better = getBetterNonPersistentRequest(reqGroup.prio);
       if (better != null) {
         if (better.getPriority() > reqGroup.prio) {
           Logger.error(
               this, "Selected " + better + " as better than " + reqGroup + " but isn't better!");
         }
         if (logMINOR) Logger.minor(this, "Returning better: " + better);
         return better;
       }
     }
     if (reqGroup == null) {
       queueFillRequestStarterQueue();
       return getBetterNonPersistentRequest(Short.MAX_VALUE);
     }
     ChosenBlock block;
     synchronized (starterQueue) {
       block = reqGroup.grabNotStarted(clientContext.fastWeakRandom, this);
       if (block == null) {
         if (logMINOR) Logger.minor(this, "No block found on " + reqGroup);
         int finalLength = 0;
         for (int i = 0; i < starterQueue.size(); i++) {
           if (starterQueue.get(i) == reqGroup) {
             starterQueue.remove(i);
             if (logMINOR)
               Logger.minor(this, "Removed " + reqGroup + " from starter queue because is empty");
             i--;
           } else {
             finalLength += starterQueue.get(i).sizeNotStarted();
           }
         }
         needsRefill = finalLength < MAX_STARTER_QUEUE_SIZE;
         continue;
       } else {
         // Prevent this request being selected, even though we may remove the PCR from the starter
         // queue
         // in the very near future. When the PCR finishes, the requests will be un-blocked.
         if (!runningPersistentRequests.contains(reqGroup.request))
           runningPersistentRequests.add(reqGroup.request);
       }
     }
     if (needsRefill) queueFillRequestStarterQueue();
     if (logMINOR) Logger.minor(this, "grabRequest() returning " + block + " for " + reqGroup);
     return block;
   }
 }
 /**
  * Add messages to <code>messages</code> until there are no more messages to add or <code>size
  * </code> would exceed <code>maxSize</code>. If <code>size == maxSize</code>, a message in the
  * queue will be added even if it makes <code>size</code> exceed <code>maxSize</code>. If <code>
  * isUrgent</code> is set, only messages that are considered urgent are added.
  *
  * @param size the current size of <code>messages</code>
  * @param minSize the size when <code>messages</code> is empty
  * @param maxSize the maximum size of <code>messages</code>
  * @param now the current time
  * @param messages the list that messages will be added to
  * @param isUrgent <code>true</code> if only urgent messages should be added
  * @return the size of <code>messages</code>, multiplied by -1 if there were messages that
  *     didn't fit
  */
 private int addUrgentMessages(
     int size, int minSize, int maxSize, long now, ArrayList<MessageItem> messages) {
   assert (size >= 0);
   assert (minSize >= 0);
   assert (maxSize >= minSize);
   if (size < 0) size = -size; // FIXME remove extra paranoia
   int added = 0;
   while (true) {
     boolean addedNone = true;
     int lists = 0;
     if (nonEmptyItemsWithID == null) return size;
     lists += nonEmptyItemsWithID.size();
     Items list = nonEmptyItemsWithID.head();
     for (int i = 0; i < lists && list != null; i++) {
       if (list.items.isEmpty()) {
         // Should not happen, but check for it anyway since it keeps happening. :(
         Logger.error(this, "List is in nonEmptyItemsWithID yet it is empty?!: " + list);
         nonEmptyItemsWithID.remove(list);
         addToEmptyBackward(list);
         if (nonEmptyItemsWithID.isEmpty()) return size;
         list = nonEmptyItemsWithID.head();
         continue;
       }
       MessageItem item = list.items.getFirst();
       int thisSize = item.getLength();
       boolean oversize = false;
       if (size + 2 + thisSize > maxSize) {
         if (size == minSize) {
           // Won't fit regardless, send it on its own.
           oversize = true;
         } else {
           // Send what we have so far.
           if (logDEBUG && added != 0)
             Logger.debug(
                 this,
                 "Added "
                     + added
                     + " urgent messages, could add more but out of space at "
                     + size);
           return -size;
         }
       }
       size += 2 + thisSize;
       list.items.removeFirst();
       // Move to end of list.
       Items prev = list.getPrev();
       nonEmptyItemsWithID.remove(list);
       list.timeLastSent = now;
       if (!list.items.isEmpty()) {
         addToNonEmptyBackward(list);
       } else {
         addToEmptyBackward(list);
       }
       if (prev == null) list = nonEmptyItemsWithID.head();
       else list = prev.getNext();
       messages.add(item);
       added++;
       addedNone = false;
       if (oversize) {
         if (logDEBUG) Logger.debug(this, "Returning with oversize urgent message");
         return size;
       }
     }
     if (addedNone) {
       if (logDEBUG && added != 0)
         Logger.debug(
             this,
             "Added "
                 + added
                 + " urgent messages, size now "
                 + size
                 + " no more queued at this priority");
       return size;
     }
   }
 }
Esempio n. 27
0
 public void objectOnDelete(ObjectContainer container) {
   if (logDEBUG) Logger.debug(this, "Deleting URI", new Exception("debug"));
 }
 /**
  * Compare a recently registered SendableRequest to what is already on the starter queue. If it is
  * better, kick out stuff from the queue until we are just over the limit.
  *
  * @param req
  * @param container
  */
 public void maybeAddToStarterQueue(
     SendableRequest req, ObjectContainer container, SendableRequest[] mightBeActive) {
   short prio = req.getPriorityClass(container);
   if (logMINOR) Logger.minor(this, "Maybe adding to starter queue: prio=" + prio);
   synchronized (starterQueue) {
     boolean betterThanSome = false;
     int size = 0;
     PersistentChosenRequest prev = null;
     for (PersistentChosenRequest old : starterQueue) {
       if (old.request == req) {
         // Wait for a reselect. Otherwise we can starve other
         // requests. Note that this happens with persistent SBI's:
         // they are added at the new retry count before being
         // removed at the old retry count.
         if (logMINOR) Logger.minor(this, "Already on starter queue: " + old + " for " + req);
         return;
       }
       if (prev == old)
         Logger.error(this, "ON STARTER QUEUE TWICE: " + prev + " for " + prev.request);
       if (prev != null && prev.request == old.request)
         Logger.error(
             this,
             "REQUEST ON STARTER QUEUE TWICE: "
                 + prev
                 + " for "
                 + prev.request
                 + " vs "
                 + old
                 + " for "
                 + old.request);
       boolean ignoreActive = false;
       if (mightBeActive != null) {
         for (SendableRequest tmp : mightBeActive) if (tmp == old.request) ignoreActive = true;
       }
       if (!ignoreActive) {
         if (container.ext().isActive(old.request))
           Logger.warning(
               this,
               "REQUEST ALREADY ACTIVATED: "
                   + old.request
                   + " for "
                   + old
                   + " while checking request queue in maybeAddToStarterQueue for "
                   + req);
         else if (logDEBUG)
           Logger.debug(
               this,
               "Not already activated for "
                   + old
                   + " in while checking request queue in maybeAddToStarterQueue for "
                   + req);
       } else if (logMINOR)
         Logger.minor(
             this,
             "Ignoring active because just registered: "
                 + old.request
                 + " in maybeAddToStarterQueue for "
                 + req);
       size += old.sizeNotStarted();
       if (old.prio > prio) betterThanSome = true;
       if (old.request == req) return;
       prev = old;
     }
     if (size >= MAX_STARTER_QUEUE_SIZE && !betterThanSome) {
       if (logMINOR)
         Logger.minor(
             this,
             "Not adding to starter queue: over limit and req not better than any queued requests");
       return;
     }
   }
   addToStarterQueue(req, container);
   trimStarterQueue(container);
 }
Esempio n. 29
0
  /**
   * Send loop. Strategy: - Each peer can tell us when its data needs to be sent by. This is usually
   * 100ms after it is posted. It could vary by message type. Acknowledgements also become valid
   * 100ms after being queued. - If any peer's data is overdue, send the data from the most overdue
   * peer. - If there are peers with more than a packet's worth of data queued, send the data from
   * the peer with the oldest data. - If there are peers with overdue ack's, send to the peer whose
   * acks are oldest.
   *
   * <p>It does not attempt to ensure fairness, it attempts to minimise latency. Fairness is best
   * dealt with at a higher level e.g. requests, although some transfers are not part of requests,
   * e.g. bulk f2f transfers, so we may need to reconsider this eventually...
   */
  private void realRun() {
    long now = System.currentTimeMillis();
    PeerManager pm;
    PeerNode[] nodes;

    pm = node.peers;
    nodes = pm.myPeers();

    long nextActionTime = Long.MAX_VALUE;
    long oldTempNow = now;

    boolean canSendThrottled = false;

    int MAX_PACKET_SIZE = node.darknetCrypto.socket.getMaxPacketSize();
    long count = node.outputThrottle.getCount();
    if (count > MAX_PACKET_SIZE) canSendThrottled = true;
    else {
      long canSendAt = node.outputThrottle.getNanosPerTick() * (MAX_PACKET_SIZE - count);
      canSendAt = MILLISECONDS.convert(canSendAt + MILLISECONDS.toNanos(1) - 1, NANOSECONDS);
      if (logMINOR) Logger.minor(this, "Can send throttled packets in " + canSendAt + "ms");
      nextActionTime = Math.min(nextActionTime, now + canSendAt);
    }

    /**
     * The earliest time at which a peer needs to send a packet, which is before now. Throttled if
     * canSendThrottled, otherwise not throttled. Note: we only use it to sort the full-packed peers
     * by priority, don't rely on it when setting nextActionTime!
     */
    long lowestUrgentSendTime = Long.MAX_VALUE;
    /** The peer(s) which lowestUrgentSendTime is referring to */
    ArrayList<PeerNode> urgentSendPeers = null;
    /**
     * The earliest time at which a peer needs to send a packet, which is after now, where there is
     * a full packet's worth of data to send. Throttled if canSendThrottled, otherwise not
     * throttled.
     */
    long lowestFullPacketSendTime = Long.MAX_VALUE;
    /** The peer(s) which lowestFullPacketSendTime is referring to */
    ArrayList<PeerNode> urgentFullPacketPeers = null;
    /** The earliest time at which a peer needs to send an ack, before now. */
    long lowestAckTime = Long.MAX_VALUE;
    /** The peer(s) which lowestAckTime is referring to */
    ArrayList<PeerNode> ackPeers = null;
    /** The earliest time at which a peer needs to handshake. */
    long lowestHandshakeTime = Long.MAX_VALUE;
    /** The peer(s) which lowestHandshakeTime is referring to */
    ArrayList<PeerNode> handshakePeers = null;

    for (PeerNode pn : nodes) {
      now = System.currentTimeMillis();

      // Basic peer maintenance.

      // For purposes of detecting not having received anything, which indicates a
      // serious connectivity problem, we want to look for *any* packets received,
      // including auth packets.
      lastReceivedPacketFromAnyNode =
          Math.max(pn.lastReceivedPacketTime(), lastReceivedPacketFromAnyNode);
      pn.maybeOnConnect();
      if (pn.shouldDisconnectAndRemoveNow() && !pn.isDisconnecting()) {
        // Might as well do it properly.
        node.peers.disconnectAndRemove(pn, true, true, false);
      }

      if (pn.isConnected()) {

        boolean shouldThrottle = pn.shouldThrottle();

        pn.checkForLostPackets();

        // Is the node dead?
        // It might be disconnected in terms of FNP but trying to reconnect via JFK's, so we need to
        // use the time when we last got a *data* packet.
        if (now - pn.lastReceivedDataPacketTime() > pn.maxTimeBetweenReceivedPackets()) {
          Logger.normal(this, "Disconnecting from " + pn + " - haven't received packets recently");
          // Hopefully this is a transient network glitch, but stuff will have already started to
          // timeout, so lets dump the pending messages.
          pn.disconnected(true, false);
          continue;
        } else if (now - pn.lastReceivedAckTime() > pn.maxTimeBetweenReceivedAcks()
            && !pn.isDisconnecting()) {
          // FIXME better to disconnect immediately??? Or check canSend()???
          Logger.normal(this, "Disconnecting from " + pn + " - haven't received acks recently");
          // Do it properly.
          // There appears to be connectivity from them to us but not from us to them.
          // So it is helpful for them to know that we are disconnecting.
          node.peers.disconnect(pn, true, true, false, true, false, SECONDS.toMillis(5));
          continue;
        } else if (pn.isRoutable() && pn.noLongerRoutable()) {
          /*
          NOTE: Whereas isRoutable() && noLongerRoutable() are generally mutually exclusive, this
          code will only execute because of the scheduled-runnable in start() which executes
          updateVersionRoutablity() on all our peers. We don't disconnect the peer, but mark it
          as being incompatible.
          */
          pn.invalidate(now);
          Logger.normal(
              this,
              "shouldDisconnectNow has returned true : marking the peer as incompatible: " + pn);
          continue;
        }

        // The peer is connected.

        if (canSendThrottled || !shouldThrottle) {
          // We can send to this peer.
          long sendTime = pn.getNextUrgentTime(now);
          if (sendTime != Long.MAX_VALUE) {
            if (sendTime <= now) {
              // Message is urgent.
              if (sendTime < lowestUrgentSendTime) {
                lowestUrgentSendTime = sendTime;
                if (urgentSendPeers != null) urgentSendPeers.clear();
                else urgentSendPeers = new ArrayList<PeerNode>();
              }
              if (sendTime <= lowestUrgentSendTime) urgentSendPeers.add(pn);
            } else if (pn.fullPacketQueued()) {
              if (sendTime < lowestFullPacketSendTime) {
                lowestFullPacketSendTime = sendTime;
                if (urgentFullPacketPeers != null) urgentFullPacketPeers.clear();
                else urgentFullPacketPeers = new ArrayList<PeerNode>();
              }
              if (sendTime <= lowestFullPacketSendTime) urgentFullPacketPeers.add(pn);
            }
          }
        } else if (shouldThrottle && !canSendThrottled) {
          long ackTime = pn.timeSendAcks();
          if (ackTime != Long.MAX_VALUE) {
            if (ackTime <= now) {
              if (ackTime < lowestAckTime) {
                lowestAckTime = ackTime;
                if (ackPeers != null) ackPeers.clear();
                else ackPeers = new ArrayList<PeerNode>();
              }
              if (ackTime <= lowestAckTime) ackPeers.add(pn);
            }
          }
        }

        if (canSendThrottled || !shouldThrottle) {
          long urgentTime = pn.getNextUrgentTime(now);
          // Should spam the logs, unless there is a deadlock
          if (urgentTime < Long.MAX_VALUE && logMINOR)
            Logger.minor(
                this,
                "Next urgent time: " + urgentTime + "(in " + (urgentTime - now) + ") for " + pn);
          nextActionTime = Math.min(nextActionTime, urgentTime);
        } else {
          nextActionTime = Math.min(nextActionTime, pn.timeCheckForLostPackets());
        }
      } else
      // Not connected

      if (pn.noContactDetails()) pn.startARKFetcher();

      long handshakeTime = pn.timeSendHandshake(now);
      if (handshakeTime != Long.MAX_VALUE) {
        if (handshakeTime < lowestHandshakeTime) {
          lowestHandshakeTime = handshakeTime;
          if (handshakePeers != null) handshakePeers.clear();
          else handshakePeers = new ArrayList<PeerNode>();
        }
        if (handshakeTime <= lowestHandshakeTime) handshakePeers.add(pn);
      }

      long tempNow = System.currentTimeMillis();
      if ((tempNow - oldTempNow) > SECONDS.toMillis(5))
        Logger.error(
            this,
            "tempNow is more than 5 seconds past oldTempNow ("
                + (tempNow - oldTempNow)
                + ") in PacketSender working with "
                + pn.userToString());
      oldTempNow = tempNow;
    }

    // We may send a packet, send an ack-only packet, or send a handshake.

    PeerNode toSendPacket = null;
    PeerNode toSendAckOnly = null;
    PeerNode toSendHandshake = null;

    long t = Long.MAX_VALUE;

    if (lowestUrgentSendTime <= now) {
      // We need to send a full packet.
      toSendPacket = urgentSendPeers.get(localRandom.nextInt(urgentSendPeers.size()));
      t = lowestUrgentSendTime;
    } else if (lowestFullPacketSendTime < Long.MAX_VALUE) {
      toSendPacket = urgentFullPacketPeers.get(localRandom.nextInt(urgentFullPacketPeers.size()));
      t = lowestFullPacketSendTime;
    } else if (lowestAckTime <= now) {
      // We need to send an ack
      toSendAckOnly = ackPeers.get(localRandom.nextInt(ackPeers.size()));
      t = lowestAckTime;
    }

    if (lowestHandshakeTime <= now && t > lowestHandshakeTime) {
      toSendHandshake = handshakePeers.get(localRandom.nextInt(handshakePeers.size()));
      toSendPacket = null;
      toSendAckOnly = null;
    }

    if (toSendPacket != null) {
      try {
        if (toSendPacket.maybeSendPacket(now, false)) {
          // Round-robin over the loop to update nextActionTime appropriately
          nextActionTime = now;
        }
      } catch (BlockedTooLongException e) {
        Logger.error(
            this,
            "Waited too long: "
                + TimeUtil.formatTime(e.delta)
                + " to allocate a packet number to send to "
                + toSendPacket
                + " : "
                + ("(new packet format)")
                + " (version "
                + toSendPacket.getVersionNumber()
                + ") - DISCONNECTING!");
        toSendPacket.forceDisconnect();
      }

      if (canSendThrottled || !toSendPacket.shouldThrottle()) {
        long urgentTime = toSendPacket.getNextUrgentTime(now);
        // Should spam the logs, unless there is a deadlock
        if (urgentTime < Long.MAX_VALUE && logMINOR)
          Logger.minor(
              this,
              "Next urgent time: "
                  + urgentTime
                  + "(in "
                  + (urgentTime - now)
                  + ") for "
                  + toSendPacket);
        nextActionTime = Math.min(nextActionTime, urgentTime);
      } else {
        nextActionTime = Math.min(nextActionTime, toSendPacket.timeCheckForLostPackets());
      }

    } else if (toSendAckOnly != null) {
      try {
        if (toSendAckOnly.maybeSendPacket(now, true)) {
          count = node.outputThrottle.getCount();
          if (count > MAX_PACKET_SIZE) canSendThrottled = true;
          else {
            canSendThrottled = false;
            long canSendAt = node.outputThrottle.getNanosPerTick() * (MAX_PACKET_SIZE - count);
            canSendAt = MILLISECONDS.convert(canSendAt + MILLISECONDS.toNanos(1) - 1, NANOSECONDS);
            if (logMINOR) Logger.minor(this, "Can send throttled packets in " + canSendAt + "ms");
            nextActionTime = Math.min(nextActionTime, now + canSendAt);
          }
        }
      } catch (BlockedTooLongException e) {
        Logger.error(
            this,
            "Waited too long: "
                + TimeUtil.formatTime(e.delta)
                + " to allocate a packet number to send to "
                + toSendAckOnly
                + " : "
                + ("(new packet format)")
                + " (version "
                + toSendAckOnly.getVersionNumber()
                + ") - DISCONNECTING!");
        toSendAckOnly.forceDisconnect();
      }

      if (canSendThrottled || !toSendAckOnly.shouldThrottle()) {
        long urgentTime = toSendAckOnly.getNextUrgentTime(now);
        // Should spam the logs, unless there is a deadlock
        if (urgentTime < Long.MAX_VALUE && logMINOR)
          Logger.minor(
              this,
              "Next urgent time: "
                  + urgentTime
                  + "(in "
                  + (urgentTime - now)
                  + ") for "
                  + toSendAckOnly);
        nextActionTime = Math.min(nextActionTime, urgentTime);
      } else {
        nextActionTime = Math.min(nextActionTime, toSendAckOnly.timeCheckForLostPackets());
      }
    }

    /* Estimating of nextActionTime logic:
     * FullPackets:
     *  - A full packet available, bandwidth available  -->> now
     *  - A full packet available for non-throttled peer -->> now
     *  - A full packet available, no bandwidth -->> wait till bandwidth available
     *  - No packet -->> don't care, will wake up anyway when one arrives, goto Nothing
     * UrgentMessages: Only applies when there's enough bandwidth to send a full packet, Includes any urgent acks
     *  - There's an urgent message, deadline(urgentMessage) > now -->> deadline(urgentMessage)
     *  - There's an urgent message, deadline(urgentMessage) <= now -->> now
     *  - There's an urgent message, but there's not enough bandwidth for a full packet -->> wait till bandwidth available
     *  - There's no urgent message -->> don't care, goto Nothing
     * Nothing:
     *  -->> timeCheckForLostPackets
     */

    if (toSendHandshake != null) {
      // Send handshake if necessary
      long beforeHandshakeTime = System.currentTimeMillis();
      toSendHandshake.getOutgoingMangler().sendHandshake(toSendHandshake, false);
      long afterHandshakeTime = System.currentTimeMillis();
      if ((afterHandshakeTime - beforeHandshakeTime) > SECONDS.toMillis(2))
        Logger.error(
            this,
            "afterHandshakeTime is more than 2 seconds past beforeHandshakeTime ("
                + (afterHandshakeTime - beforeHandshakeTime)
                + ") in PacketSender working with "
                + toSendHandshake.userToString());
    }

    // All of these take into account whether the data can be sent already.
    // So we can include them in nextActionTime.
    nextActionTime = Math.min(nextActionTime, lowestUrgentSendTime);
    nextActionTime = Math.min(nextActionTime, lowestFullPacketSendTime);
    nextActionTime = Math.min(nextActionTime, lowestAckTime);
    nextActionTime = Math.min(nextActionTime, lowestHandshakeTime);

    // FIXME: If we send something we will have to go around the loop again.
    // OPTIMISATION: We could track the second best, and check how many are in the array.

    /* Attempt to connect to old-opennet-peers.
     * Constantly send handshake packets, in order to get through a NAT.
     * Most JFK(1)'s are less than 300 bytes. 25*300/15 = avg 500B/sec bandwidth cost.
     * Well worth it to allow us to reconnect more quickly. */

    OpennetManager om = node.getOpennet();
    if (om != null && node.getUptime() > SECONDS.toMillis(30)) {
      PeerNode[] peers = om.getOldPeers();

      for (PeerNode pn : peers) {
        long lastConnected = pn.timeLastConnected(now);
        if (lastConnected <= 0)
          Logger.error(this, "Last connected is zero or negative for old-opennet-peer " + pn);
        // Will be removed by next line.
        if (now - lastConnected > OpennetManager.MAX_TIME_ON_OLD_OPENNET_PEERS) {
          om.purgeOldOpennetPeer(pn);
          if (logMINOR)
            Logger.minor(
                this,
                "Removing old opennet peer (too old): "
                    + pn
                    + " age is "
                    + TimeUtil.formatTime(now - lastConnected));
          continue;
        }
        if (pn.isConnected()) continue; // Race condition??
        if (pn.noContactDetails()) {
          pn.startARKFetcher();
          continue;
        }
        if (pn.shouldSendHandshake()) {
          // Send handshake if necessary
          long beforeHandshakeTime = System.currentTimeMillis();
          pn.getOutgoingMangler().sendHandshake(pn, true);
          long afterHandshakeTime = System.currentTimeMillis();
          if ((afterHandshakeTime - beforeHandshakeTime) > SECONDS.toMillis(2))
            Logger.error(
                this,
                "afterHandshakeTime is more than 2 seconds past beforeHandshakeTime ("
                    + (afterHandshakeTime - beforeHandshakeTime)
                    + ") in PacketSender working with "
                    + pn.userToString());
        }
      }
    }

    long oldNow = now;

    // Send may have taken some time
    now = System.currentTimeMillis();

    if ((now - oldNow) > SECONDS.toMillis(10))
      Logger.error(
          this, "now is more than 10 seconds past oldNow (" + (now - oldNow) + ") in PacketSender");

    long sleepTime = nextActionTime - now;

    // MAX_COALESCING_DELAYms maximum sleep time - same as the maximum coalescing delay
    sleepTime = Math.min(sleepTime, MAX_COALESCING_DELAY);

    if (now - node.startupTime > MINUTES.toMillis(5))
      if (now - lastReceivedPacketFromAnyNode > Node.ALARM_TIME) {
        Logger.error(
            this,
            "Have not received any packets from any node in last "
                + SECONDS.convert(Node.ALARM_TIME, MILLISECONDS)
                + " seconds");
        lastReportedNoPackets = now;
      }

    if (sleepTime > 0) {
      // Update logging only when have time to do so
      try {
        if (logMINOR) Logger.minor(this, "Sleeping for " + sleepTime);
        synchronized (this) {
          wait(sleepTime);
        }
      } catch (InterruptedException e) {
        // Ignore, just wake up. Probably we got interrupt()ed
        // because a new packet came in.
      }
    } else {
      if (logDEBUG)
        Logger.debug(this, "Next urgent time is " + (now - nextActionTime) + "ms in the past");
    }
  }
Esempio n. 30
0
 public void objectOnDelete(ObjectContainer container) {
   if (Logger.shouldLog(Logger.DEBUG, this))
     Logger.debug(this, "Deleting URI", new Exception("debug"));
 }