@Override
 public void run() {
   try {
     freenet.support.Logger.OSThread.logPID(this);
     // FIXME ? key is not known for inserts here
     if (key != null) stats.reportOutgoingLocalRequestLocation(key.toNormalizedDouble());
     if (!req.send(core, sched)) {
       if (!((!req.isPersistent()) && req.isCancelled()))
         Logger.error(this, "run() not able to send a request on " + req);
       else
         Logger.normal(
             this, "run() not able to send a request on " + req + " - request was cancelled");
     }
     if (logMINOR) Logger.minor(this, "Finished " + req);
   } finally {
     if (req.sendIsBlocking()) {
       if (key != null) sched.removeFetchingKey(key);
       else if ((!req.isPersistent())
           && ((TransientChosenBlock) req).request instanceof SendableInsert)
         sched.removeTransientInsertFetching(
             (SendableInsert) (((TransientChosenBlock) req).request), req.token);
       // Something might be waiting for a request to complete (e.g. if we have two requests for
       // the same key),
       // so wake the starter thread.
       wakeUp();
     }
   }
 }
 private boolean startRequest(ChosenBlock req, boolean logMINOR) {
   if ((!req.isPersistent()) && req.isCancelled()) {
     req.onDumped();
     return false;
   }
   if (req.key != null) {
     if (!sched.addToFetching(req.key)) {
       req.onDumped();
       return false;
     }
   } else if ((!req.isPersistent())
       && ((TransientChosenBlock) req).request instanceof SendableInsert) {
     if (!sched.addTransientInsertFetching(
         (SendableInsert) (((TransientChosenBlock) req).request), req.token)) {
       req.onDumped();
       return false;
     }
   }
   if (logMINOR) Logger.minor(this, "Running request " + req + " priority " + req.getPriority());
   core.getExecutor()
       .execute(new SenderThread(req, req.key), "RequestStarter$SenderThread for " + req);
   return true;
 }
 void realRun() {
   ChosenBlock req = null;
   // The last time at which we sent a request or decided not to
   long cycleTime = System.currentTimeMillis();
   while (true) {
     // Allow 5 minutes before we start killing requests due to not connecting.
     OpennetManager om;
     if (core.node.peers.countConnectedPeers() < 3
         && (om = core.node.getOpennet()) != null
         && System.currentTimeMillis() - om.getCreationTime() < 5 * 60 * 1000) {
       try {
         synchronized (this) {
           wait(1000);
         }
       } catch (InterruptedException e) {
         // TODO Auto-generated catch block
         e.printStackTrace();
       }
       continue;
     }
     if (req == null) {
       req = sched.grabRequest();
     }
     if (req != null) {
       if (logMINOR) Logger.minor(this, "Running " + req + " priority " + req.getPriority());
       if (!req.localRequestOnly) {
         // Wait
         long delay;
         delay = throttle.getDelay();
         if (logMINOR) Logger.minor(this, "Delay=" + delay + " from " + throttle);
         long sleepUntil = cycleTime + delay;
         if (!LOCAL_REQUESTS_COMPETE_FAIRLY) {
           inputBucket.blockingGrab(
               (int) (Math.max(0, averageInputBytesPerRequest.currentValue())));
           outputBucket.blockingGrab(
               (int) (Math.max(0, averageOutputBytesPerRequest.currentValue())));
         }
         long now;
         do {
           now = System.currentTimeMillis();
           if (now < sleepUntil)
             try {
               Thread.sleep(sleepUntil - now);
               if (logMINOR) Logger.minor(this, "Slept: " + (sleepUntil - now) + "ms");
             } catch (InterruptedException e) {
               // Ignore
             }
         } while (now < sleepUntil);
       }
       //				if(!doAIMD) {
       //					// Arbitrary limit on number of local requests waiting for slots.
       //					// Firstly, they use threads. This could be a serious problem for faster nodes.
       //					// Secondly, it may help to prevent wider problems:
       //					// If all queues are full, the network will die.
       //					int[] waiting = core.node.countRequestsWaitingForSlots();
       //					int localRequestsWaitingForSlots = waiting[0];
       //					int maxWaitingForSlots = MAX_WAITING_FOR_SLOTS;
       //					// FIXME calibrate this by the number of local timeouts.
       //					// FIXME consider an AIMD, or some similar mechanism.
       //					// Local timeout-waiting-for-slots is largely dependant on
       //					// the number of requests running, due to strict round-robin,
       //					// so we can probably do something even simpler than an AIMD.
       //					// For now we'll just have a fixed number.
       //					// This should partially address the problem.
       //					// Note that while waitFor() is blocking, we need such a limit anyway.
       //					if(localRequestsWaitingForSlots > maxWaitingForSlots) continue;
       //				}
       RejectReason reason;
       assert (req.realTimeFlag == realTime);
       if (LOCAL_REQUESTS_COMPETE_FAIRLY && !req.localRequestOnly) {
         if ((reason =
                 stats.shouldRejectRequest(
                     true,
                     isInsert,
                     isSSK,
                     true,
                     false,
                     null,
                     false,
                     isInsert && Node.PREFER_INSERT_DEFAULT,
                     req.realTimeFlag,
                     null))
             != null) {
           if (logMINOR) Logger.minor(this, "Not sending local request: " + reason);
           // Wait one throttle-delay before trying again
           cycleTime = System.currentTimeMillis();
           continue; // Let local requests compete with all the others
         }
       } else {
         stats.waitUntilNotOverloaded(isInsert);
       }
     } else {
       if (logMINOR) Logger.minor(this, "Waiting...");
       // Always take the lock on RequestStarter first. AFAICS we don't synchronize on
       // RequestStarter anywhere else.
       // Nested locks here prevent extra latency when there is a race, and therefore allow us to
       // sleep indefinitely
       synchronized (this) {
         req = sched.grabRequest();
         if (req == null) {
           try {
             wait(
                 1
                     * 1000); // this can happen when most but not all stuff is already running but
                              // there is still stuff to fetch, so don't wait *too* long.
             // FIXME increase when we can be *sure* there is nothing left in the queue (especially
             // for transient requests).
           } catch (InterruptedException e) {
             // Ignore
           }
         }
       }
     }
     if (req == null) continue;
     if (!startRequest(req, logMINOR)) {
       // Don't log if it's a cancelled transient request.
       if (!((!req.isPersistent()) && req.isCancelled()))
         Logger.normal(this, "No requests to start on " + req);
     }
     if (!req.localRequestOnly) cycleTime = System.currentTimeMillis();
     req = null;
   }
 }