private int starterQueueLength() {
   int length = 0;
   synchronized (starterQueue) {
     for (PersistentChosenRequest request : starterQueue) length += request.sizeNotStarted();
   }
   return length;
 }
 private void trimStarterQueue(ObjectContainer container) {
   ArrayList<PersistentChosenRequest> dumped = null;
   synchronized (starterQueue) {
     int length = starterQueueLength();
     while (length > MAX_STARTER_QUEUE_SIZE) {
       // Find the lowest priority/retry count request.
       // If we can dump it without going below the limit, then do so.
       // If we can't, return.
       PersistentChosenRequest worst = null;
       short worstPrio = -1;
       int worstIndex = -1;
       int worstLength = -1;
       if (starterQueue.isEmpty()) {
         break;
       }
       length = 0;
       for (int i = 0; i < starterQueue.size(); i++) {
         PersistentChosenRequest req = starterQueue.get(i);
         short prio = req.prio;
         int size = req.sizeNotStarted();
         length += size;
         if (prio > worstPrio) {
           worstPrio = prio;
           worst = req;
           worstIndex = i;
           worstLength = size;
           continue;
         }
       }
       int lengthAfter = length - worstLength;
       if (lengthAfter >= MAX_STARTER_QUEUE_SIZE) {
         if (dumped == null) dumped = new ArrayList<PersistentChosenRequest>(2);
         dumped.add(worst);
         starterQueue.remove(worstIndex);
         if (lengthAfter == MAX_STARTER_QUEUE_SIZE) break;
       } else {
         // Can't remove any more.
         break;
       }
     }
   }
   if (dumped == null) return;
   for (PersistentChosenRequest req : dumped) {
     req.onDumped(schedCore, container, false);
   }
 }
 /**
  * @param request
  * @param container
  * @return True if the queue is now full/over-full.
  */
 boolean addToStarterQueue(SendableRequest request, ObjectContainer container) {
   if (logMINOR) Logger.minor(this, "Adding to starter queue: " + request);
   container.activate(request, 1);
   PersistentChosenRequest chosen;
   try {
     chosen =
         new PersistentChosenRequest(
             request,
             request.getPriorityClass(container),
             container,
             ClientRequestScheduler.this,
             clientContext);
   } catch (NoValidBlocksException e) {
     return false;
   }
   if (logMINOR) Logger.minor(this, "Created PCR: " + chosen);
   container.deactivate(request, 1);
   boolean dumpNew = false;
   synchronized (starterQueue) {
     for (PersistentChosenRequest req : starterQueue) {
       if (req.request == request) {
         Logger.error(
             this, "Already on starter queue: " + req + " for " + request, new Exception("debug"));
         dumpNew = true;
         break;
       }
     }
     if (!dumpNew) {
       starterQueue.add(chosen);
       int length = starterQueueLength();
       length += chosen.sizeNotStarted();
       runningPersistentRequests.add(request);
       if (logMINOR)
         Logger.minor(
             this,
             "Added to running persistent requests, size now "
                 + runningPersistentRequests.size()
                 + " : "
                 + request);
       return length > MAX_STARTER_QUEUE_SIZE;
     }
   }
   if (dumpNew) chosen.onDumped(schedCore, container, false);
   return false;
 }
 /**
  * Compare a recently registered SendableRequest to what is already on the starter queue. If it is
  * better, kick out stuff from the queue until we are just over the limit.
  *
  * @param req
  * @param container
  */
 public void maybeAddToStarterQueue(
     SendableRequest req, ObjectContainer container, SendableRequest[] mightBeActive) {
   short prio = req.getPriorityClass(container);
   if (logMINOR) Logger.minor(this, "Maybe adding to starter queue: prio=" + prio);
   synchronized (starterQueue) {
     boolean betterThanSome = false;
     int size = 0;
     PersistentChosenRequest prev = null;
     for (PersistentChosenRequest old : starterQueue) {
       if (old.request == req) {
         // Wait for a reselect. Otherwise we can starve other
         // requests. Note that this happens with persistent SBI's:
         // they are added at the new retry count before being
         // removed at the old retry count.
         if (logMINOR) Logger.minor(this, "Already on starter queue: " + old + " for " + req);
         return;
       }
       if (prev == old)
         Logger.error(this, "ON STARTER QUEUE TWICE: " + prev + " for " + prev.request);
       if (prev != null && prev.request == old.request)
         Logger.error(
             this,
             "REQUEST ON STARTER QUEUE TWICE: "
                 + prev
                 + " for "
                 + prev.request
                 + " vs "
                 + old
                 + " for "
                 + old.request);
       boolean ignoreActive = false;
       if (mightBeActive != null) {
         for (SendableRequest tmp : mightBeActive) if (tmp == old.request) ignoreActive = true;
       }
       if (!ignoreActive) {
         if (container.ext().isActive(old.request))
           Logger.warning(
               this,
               "REQUEST ALREADY ACTIVATED: "
                   + old.request
                   + " for "
                   + old
                   + " while checking request queue in maybeAddToStarterQueue for "
                   + req);
         else if (logDEBUG)
           Logger.debug(
               this,
               "Not already activated for "
                   + old
                   + " in while checking request queue in maybeAddToStarterQueue for "
                   + req);
       } else if (logMINOR)
         Logger.minor(
             this,
             "Ignoring active because just registered: "
                 + old.request
                 + " in maybeAddToStarterQueue for "
                 + req);
       size += old.sizeNotStarted();
       if (old.prio > prio) betterThanSome = true;
       if (old.request == req) return;
       prev = old;
     }
     if (size >= MAX_STARTER_QUEUE_SIZE && !betterThanSome) {
       if (logMINOR)
         Logger.minor(
             this,
             "Not adding to starter queue: over limit and req not better than any queued requests");
       return;
     }
   }
   addToStarterQueue(req, container);
   trimStarterQueue(container);
 }
  private void fillRequestStarterQueue(ObjectContainer container, ClientContext context) {
    synchronized (this) {
      if (fillingRequestStarterQueue) return;
      fillingRequestStarterQueue = true;
    }
    long now = System.currentTimeMillis();
    try {
      if (logMINOR)
        Logger.minor(
            this,
            "Filling request queue... (SSK=" + isSSKScheduler + " insert=" + isInsertScheduler);
      long noLaterThan = Long.MAX_VALUE;
      boolean checkCooldownQueue = now > nextQueueFillRequestStarterQueue;
      if ((!isInsertScheduler) && checkCooldownQueue) {
        if (persistentCooldownQueue != null)
          noLaterThan = moveKeysFromCooldownQueue(persistentCooldownQueue, true, container);
        noLaterThan =
            Math.min(
                noLaterThan, moveKeysFromCooldownQueue(transientCooldownQueue, false, container));
      }
      // If anything has been re-added, the request starter will have been woken up.
      short fuzz = -1;
      if (PRIORITY_SOFT.equals(choosenPriorityScheduler)) fuzz = -1;
      else if (PRIORITY_HARD.equals(choosenPriorityScheduler)) fuzz = 0;
      boolean added = false;
      synchronized (starterQueue) {
        if (logMINOR && (!isSSKScheduler) && (!isInsertScheduler)) {
          Logger.minor(this, "Scheduling CHK fetches...");
          for (SendableRequest req : runningPersistentRequests) {
            boolean wasActive = container.ext().isActive(req);
            if (!wasActive) container.activate(req, 1);
            Logger.minor(this, "Running persistent request: " + req);
            if (!wasActive) container.deactivate(req, 1);
          }
        }
        // Recompute starterQueueLength
        int length = 0;
        PersistentChosenRequest old = null;
        for (PersistentChosenRequest req : starterQueue) {
          if (old == req) Logger.error(this, "DUPLICATE CHOSEN REQUESTS ON QUEUE: " + req);
          if (old != null && old.request == req.request)
            Logger.error(
                this, "DUPLICATE REQUEST ON QUEUE: " + old + " vs " + req + " both " + req.request);
          boolean ignoreActive = false;
          if (!ignoreActive) {
            if (container.ext().isActive(req.request))
              Logger.warning(
                  this,
                  "REQUEST ALREADY ACTIVATED: "
                      + req.request
                      + " for "
                      + req
                      + " while checking request queue in filling request queue");
            else if (logMINOR)
              Logger.minor(
                  this,
                  "Not already activated for "
                      + req
                      + " in while checking request queue in filling request queue");
          } else if (logMINOR)
            Logger.minor(this, "Ignoring active because just registered: " + req.request);
          req.pruneDuplicates(ClientRequestScheduler.this);
          old = req;
          length += req.sizeNotStarted();
        }
        if (logMINOR)
          Logger.minor(
              this,
              "Queue size: " + length + " SSK=" + isSSKScheduler + " insert=" + isInsertScheduler);
        if (length > MAX_STARTER_QUEUE_SIZE * 3 / 4) {
          if (length >= WARNING_STARTER_QUEUE_SIZE)
            Logger.error(this, "Queue already full: " + length);
          return;
        }
      }

      if ((!isSSKScheduler) && (!isInsertScheduler)) {
        Logger.minor(this, "Scheduling CHK fetches...");
      }
      boolean addedMore = false;
      while (true) {
        SelectorReturn r;
        // Must synchronize on scheduler to avoid problems with cooldown queue. See notes on
        // CooldownTracker.clearCachedWakeup, which also applies to other cooldown operations.
        synchronized (this) {
          r =
              selector.removeFirstInner(
                  fuzz,
                  random,
                  offeredKeys,
                  starter,
                  schedCore,
                  schedTransient,
                  false,
                  true,
                  Short.MAX_VALUE,
                  isRTScheduler,
                  context,
                  container,
                  now);
        }
        SendableRequest request = null;
        if (r != null && r.req != null) request = r.req;
        else {
          if (r != null && r.wakeupTime > 0 && noLaterThan > r.wakeupTime) {
            noLaterThan = r.wakeupTime;
            if (logMINOR)
              Logger.minor(
                  this,
                  "Waking up in " + TimeUtil.formatTime(noLaterThan - now) + " for cooldowns");
          }
        }
        if (request == null) {
          synchronized (ClientRequestScheduler.this) {
            // Don't wake up for a while, but no later than the time we expect the next item to come
            // off the cooldown queue
            if (checkCooldownQueue && !added) {
              nextQueueFillRequestStarterQueue =
                  System.currentTimeMillis() + WAIT_AFTER_NOTHING_TO_START;
              if (nextQueueFillRequestStarterQueue > noLaterThan)
                nextQueueFillRequestStarterQueue = noLaterThan + 1;
            }
          }
          if (addedMore) starter.wakeUp();
          return;
        }
        boolean full = addToStarterQueue(request, container);
        container.deactivate(request, 1);
        if (!added) starter.wakeUp();
        else addedMore = true;
        added = true;
        if (full) {
          if (addedMore) starter.wakeUp();
          return;
        }
      }
    } finally {
      synchronized (this) {
        fillingRequestStarterQueue = false;
      }
    }
  }