/**
  * After this finishes running, the status of this Search object will be correct, stimulates the
  * creation of the result if all subreqquests are complete and the result isn't made
  *
  * @throws plugins.Library.util.exec.TaskAbortException
  */
 private synchronized void setStatus() throws TaskAbortException {
   switch (status) {
     case Unstarted: // If Unstarted, status -> Busy
       status = SearchStatus.Busy;
     case Busy:
       if (!isSubRequestsComplete())
         for (Execution<Set<TermEntry>> request : subsearches)
           if (request != null
               && (!(request instanceof Search) || ((Search) request).status == SearchStatus.Busy))
             return; // If Busy & still waiting for subrequests to complete, status remains Busy
       status =
           SearchStatus
               .Combining_First; // If Busy and waiting for subrequests to combine, status ->
                                 // Combining_First
     case Combining_First: // for when subrequests are combining
       if (!isSubRequestsComplete()) // If combining first and subsearches still haven't completed,
                                     // remain
       return;
       // If subrequests have completed start process to combine results
       resultset =
           new ResultSet(subject, resultOperation, subsearches, innerCanFailAndStillComplete());
       if (executor != null) executor.execute(resultset, "Library.Search : combining results");
       else (new Thread(resultset, "Library.Search : combining results")).start();
       status = SearchStatus.Combining_Last;
     case Combining_Last: // for when this is combining
       if (!resultset.isDone())
         return; // If Combining & combine not finished, status remains as Combining
       subsearches = null; // clear the subrequests after they have been combined
       // If finished Combining and asked to generate resultnode, start that process
       if (formatResult) {
         // resultset doesn't exist but subrequests are complete so we can start up a resultset
         resultNodeGenerator =
             new ResultNodeGenerator(resultset, htmlgroupusk, htmlshowold, htmljs);
         if (executor != null)
           executor.execute(resultNodeGenerator, "Library.Search : formatting results");
         else (new Thread(resultNodeGenerator, "Library.Search : formatting results")).start();
         status = SearchStatus.Formatting; // status -> Formatting
       } else // If not asked to format output, status -> done
       status = SearchStatus.Done;
     case Formatting:
       if (formatResult) {
         // If asked to generate resultnode and still doing that, status remains as Formatting
         if (!resultNodeGenerator.isDone()) return;
         // If finished Formatting or not asked to do so, status -> Done
         pageEntryNode = resultNodeGenerator.getPageEntryNode();
         resultNodeGenerator = null;
       }
       status = SearchStatus.Done;
     case Done:
       // Done , do nothing
   }
 }
 /**
  * Puts request into the dependency List of either the main index or the subindex depending on
  * whether the main index is availiable
  *
  * @param request
  * @throws freenet.client.FetchException
  * @throws java.net.MalformedURLException
  */
 private synchronized void setdependencies(FindRequest request)
     throws FetchException, MalformedURLException {
   //		Logger.normal(this, "setting dependencies for "+request+" on "+this.toString());
   if (fetchStatus != FetchStatus.FETCHED) {
     waitingOnMainIndex.add(request);
     request.setStage(FindRequest.Stages.FETCHROOT);
     startFetch(false);
   } else {
     request.setStage(FindRequest.Stages.FETCHSUBINDEX);
     SubIndex subindex = getSubIndex(request.getSubject());
     subindex.addRequest(request);
     //			Logger.normal(this, "STarting "+getSubIndex(request.getSubject())+" to look for
     // "+request.getSubject());
     if (executor != null) executor.execute(subindex, "Subindex:" + subindex.getFileName());
     else (new Thread(subindex, "Subindex:" + subindex.getFileName())).start();
   }
 }
 /**
  * Subscribe to a given USK. Callback will be notified when it is updated. Note that this does not
  * imply that the USK will be checked on a regular basis, unless runBackgroundFetch=true.
  */
 public void subscribe(
     USK origUSK,
     USKCallback cb,
     boolean runBackgroundFetch,
     boolean ignoreUSKDatehints,
     RequestClient client) {
   if (logMINOR) Logger.minor(this, "Subscribing to " + origUSK + " for " + cb);
   if (client.persistent())
     throw new UnsupportedOperationException("USKManager subscriptions cannot be persistent");
   USKFetcher sched = null;
   long ed = origUSK.suggestedEdition;
   if (ed < 0) {
     Logger.error(this, "Subscribing to USK with negative edition number: " + ed);
     ed = -ed;
   }
   long curEd;
   curEd = lookupLatestSlot(origUSK);
   long goodEd;
   goodEd = lookupKnownGood(origUSK);
   synchronized (this) {
     USK clear = origUSK.clearCopy();
     USKCallback[] callbacks = subscribersByClearUSK.get(clear);
     if (callbacks == null) {
       callbacks = new USKCallback[] {cb};
     } else {
       boolean mustAdd = true;
       for (USKCallback callback : callbacks) {
         if (callback == cb) {
           // Already subscribed.
           // But it may still be waiting for the callback.
           if (!(curEd > ed || goodEd > ed)) return;
           mustAdd = false;
         }
       }
       if (mustAdd) {
         callbacks = Arrays.copyOf(callbacks, callbacks.length + 1);
         callbacks[callbacks.length - 1] = cb;
       }
     }
     subscribersByClearUSK.put(clear, callbacks);
     if (runBackgroundFetch) {
       USKFetcher f = backgroundFetchersByClearUSK.get(clear);
       if (f == null) {
         f =
             new USKFetcher(
                 origUSK,
                 this,
                 ignoreUSKDatehints ? backgroundFetchContextIgnoreDBR : backgroundFetchContext,
                 new USKFetcherWrapper(origUSK, RequestStarter.UPDATE_PRIORITY_CLASS, client),
                 3,
                 true,
                 false,
                 false);
         sched = f;
         backgroundFetchersByClearUSK.put(clear, f);
       }
       f.addSubscriber(cb, origUSK.suggestedEdition);
     }
   }
   if (goodEd > ed)
     cb.onFoundEdition(
         goodEd, origUSK.copy(curEd), null, context, false, (short) -1, null, true, curEd > ed);
   else if (curEd > ed)
     cb.onFoundEdition(
         curEd, origUSK.copy(curEd), null, context, false, (short) -1, null, false, false);
   final USKFetcher fetcher = sched;
   if (fetcher != null) {
     executor.execute(
         new Runnable() {
           @Override
           public void run() {
             if (logMINOR) Logger.minor(this, "Starting " + fetcher);
             fetcher.schedule(null, context);
           }
         },
         "USKManager.schedule for " + fetcher);
   }
 }
  public void startTemporaryBackgroundFetcher(
      USK usk,
      ClientContext context,
      final FetchContext fctx,
      boolean prefetchContent,
      boolean realTimeFlag) {
    final USK clear = usk.clearCopy();
    USKFetcher sched = null;
    ArrayList<USKFetcher> toCancel = null;
    synchronized (this) {
      //			int x = 0;
      //			for(USK key: backgroundFetchersByClearUSK.keySet()) {
      //				System.err.println("Fetcher "+x+": "+key);
      //				x++;
      //			}
      USKFetcher f = temporaryBackgroundFetchersLRU.get(clear);
      if (f == null) {
        f =
            new USKFetcher(
                usk,
                this,
                fctx.ignoreUSKDatehints ? backgroundFetchContextIgnoreDBR : backgroundFetchContext,
                new USKFetcherWrapper(
                    usk, RequestStarter.UPDATE_PRIORITY_CLASS, realTimeFlag ? rcRT : rcBulk),
                3,
                false,
                false,
                false);
        sched = f;
        temporaryBackgroundFetchersLRU.push(clear, f);
      } else {
        f.addHintEdition(usk.suggestedEdition);
      }
      if (prefetchContent) {
        long fetchTime = -1;
        // If nothing in 60 seconds, try fetching the last known slot.
        long slot = lookupLatestSlot(clear);
        long good = lookupKnownGood(clear);
        if (slot > -1 && good != slot) fetchTime = System.currentTimeMillis();
        temporaryBackgroundFetchersPrefetch.put(clear, fetchTime);
        if (logMINOR) Logger.minor(this, "Prefetch: set " + fetchTime + " for " + clear);
        schedulePrefetchChecker();
      }
      temporaryBackgroundFetchersLRU.push(clear, f);
      while (temporaryBackgroundFetchersLRU.size() > NodeClientCore.getMaxBackgroundUSKFetchers()) {
        USKFetcher fetcher = temporaryBackgroundFetchersLRU.popValue();
        temporaryBackgroundFetchersPrefetch.remove(fetcher.getOriginalUSK().clearCopy());
        if (!fetcher.hasSubscribers()) {
          if (toCancel == null) toCancel = new ArrayList<USKFetcher>(2);
          toCancel.add(fetcher);
        } else {
          if (logMINOR)
            Logger.minor(
                this,
                "Allowing temporary background fetcher to continue as it has subscribers... "
                    + fetcher);
        }
      }
    }
    final ArrayList<USKFetcher> cancelled = toCancel;
    final USKFetcher scheduleMe = sched;
    // This is just a prefetching method. so it should not unnecessarily delay the parent, nor
    // should it take important locks.
    // So we should do the actual schedule/cancels off-thread.
    // However, the above is done on-thread because a lot of the time it will already be running.
    if (cancelled != null || sched != null) {
      executor.execute(
          new Runnable() {

            @Override
            public void run() {
              if (cancelled != null) {
                for (int i = 0; i < cancelled.size(); i++) {
                  USKFetcher fetcher = cancelled.get(i);
                  fetcher.cancel(null, USKManager.this.context);
                }
              }
              if (scheduleMe != null) scheduleMe.schedule(null, USKManager.this.context);
            }
          });
    }
  }