/** Get an ArchiveHandler by key */
 ArchiveStoreContext getCached(FreenetURI key) {
   if (logMINOR) Logger.minor(this, "Get cached AH for " + key);
   ArchiveStoreContext handler = archiveHandlers.get(key);
   if (handler == null) return null;
   archiveHandlers.push(key, handler);
   return handler;
 }
 /**
  * Get a cached, previously extracted, file from an archive.
  *
  * @param key The key used to fetch the archive.
  * @param filename The name of the file within the archive.
  * @return A Bucket containing the data requested, or null.
  * @throws ArchiveFailureException
  */
 public Bucket getCached(FreenetURI key, String filename) throws ArchiveFailureException {
   if (logMINOR) Logger.minor(this, "Fetch cached: " + key + ' ' + filename);
   ArchiveKey k = new ArchiveKey(key, filename);
   ArchiveStoreItem asi = null;
   synchronized (this) {
     asi = storedData.get(k);
     if (asi == null) return null;
     // Promote to top of LRU
     storedData.push(k, asi);
   }
   if (logMINOR) Logger.minor(this, "Found data");
   return asi.getReaderBucket();
 }
 /**
  * Add a store element.
  *
  * @param callbackName If set, the name of the file for which we must call the callback if this
  *     file happens to match.
  * @param gotElement Flag indicating whether we've already found the file for the callback. If so
  *     we must not call it again.
  * @param callback Callback to be called if we do find it. We must getReaderBucket() before adding
  *     the data to the LRU, otherwise it may be deleted before it reaches the client.
  * @throws ArchiveFailureException If a failure occurred resulting in the data not being readable.
  *     Only happens if callback != null.
  */
 private ArchiveStoreItem addStoreElement(
     ArchiveStoreContext ctx,
     FreenetURI key,
     String name,
     Bucket temp,
     MutableBoolean gotElement,
     String callbackName,
     ArchiveExtractCallback callback,
     ClientContext context)
     throws ArchiveFailureException {
   RealArchiveStoreItem element = new RealArchiveStoreItem(ctx, key, name, temp);
   element.addToContext();
   if (logMINOR)
     Logger.minor(
         this,
         "Adding store element: "
             + element
             + " ( "
             + key
             + ' '
             + name
             + " size "
             + element.spaceUsed()
             + " )");
   ArchiveStoreItem oldItem;
   // Let it throw, if it does something is drastically wrong
   Bucket matchBucket = null;
   if ((!gotElement.value) && name.equals(callbackName)) {
     matchBucket = element.getReaderBucket();
   }
   synchronized (this) {
     oldItem = storedData.get(element.key);
     storedData.push(element.key, element);
     cachedData += element.spaceUsed();
     if (oldItem != null) {
       cachedData -= oldItem.spaceUsed();
       if (logMINOR)
         Logger.minor(this, "Dropping old store element from archive cache: " + oldItem);
       oldItem.close();
     }
   }
   if (matchBucket != null) {
     callback.gotBucket(matchBucket, context);
     gotElement.value = true;
   }
   return element;
 }
 /**
  * Add an error element to the cache. This happens when a single file in the archive is invalid
  * (usually because it is too large).
  *
  * @param ctx The ArchiveStoreContext which must be notified about this element's creation.
  * @param key The key from which the archive was fetched.
  * @param name The name of the file within the archive.
  * @param error The error message to be included on the eventual exception thrown, if anyone tries
  *     to extract the data for this element.
  */
 private void addErrorElement(
     ArchiveStoreContext ctx, FreenetURI key, String name, String error, boolean tooBig) {
   ErrorArchiveStoreItem element = new ErrorArchiveStoreItem(ctx, key, name, error, tooBig);
   element.addToContext();
   if (logMINOR)
     Logger.minor(this, "Adding error element: " + element + " for " + key + ' ' + name);
   ArchiveStoreItem oldItem;
   synchronized (this) {
     oldItem = storedData.get(element.key);
     storedData.push(element.key, element);
     if (oldItem != null) {
       oldItem.close();
       cachedData -= oldItem.spaceUsed();
       if (logMINOR)
         Logger.minor(this, "Dropping old store element from archive cache: " + oldItem);
     }
   }
 }
 /**
  * Remove a file from the cache. Called after it has been removed from its ArchiveHandler.
  *
  * @param item The ArchiveStoreItem to remove.
  */
 synchronized void removeCachedItem(ArchiveStoreItem item) {
   long size = item.spaceUsed();
   storedData.removeKey(item.key);
   // Hard disk space limit = remove it here.
   // Soft disk space limit would be to remove it outside the lock.
   // Soft disk space limit = we go over the limit significantly when we
   // are overloaded.
   cachedData -= size;
   if (logMINOR) Logger.minor(this, "removeCachedItem: " + item);
   item.close();
 }
Exemplo n.º 6
0
 public void onFinished(USKFetcher fetcher, boolean ignoreError) {
   USK orig = fetcher.getOriginalUSK();
   USK clear = orig.clearCopy();
   synchronized (this) {
     if (backgroundFetchersByClearUSK.get(clear) == fetcher) {
       backgroundFetchersByClearUSK.remove(clear);
       if (!ignoreError) {
         // This shouldn't happen, it's a sanity check: the only way we get cancelled is from
         // USKManager, which removes us before calling cancel().
         Logger.error(
             this,
             "onCancelled for " + fetcher + " - was still registered, how did this happen??",
             new Exception("debug"));
       }
     }
     if (temporaryBackgroundFetchersLRU.get(clear) == fetcher) {
       temporaryBackgroundFetchersLRU.removeKey(clear);
       temporaryBackgroundFetchersPrefetch.remove(clear);
     }
   }
 }
 /** Drop any stored data beyond the limit. Call synchronized on storedData. */
 private void trimStoredData() {
   synchronized (this) {
     while (true) {
       ArchiveStoreItem item;
       if (cachedData <= maxCachedData && storedData.size() <= maxCachedElements) return;
       if (storedData.isEmpty()) {
         // Race condition? cachedData out of sync?
         Logger.error(
             this,
             "storedData is empty but still over limit: cachedData="
                 + cachedData
                 + " / "
                 + maxCachedData);
         return;
       }
       item = storedData.popValue();
       long space = item.spaceUsed();
       cachedData -= space;
       // Hard limits = delete file within lock, soft limits = delete outside of lock
       // Here we use a hard limit
       if (logMINOR)
         Logger.minor(
             this,
             "Dropping "
                 + item
                 + " : cachedData="
                 + cachedData
                 + " of "
                 + maxCachedData
                 + " stored items : "
                 + storedData.size()
                 + " of "
                 + maxCachedElements);
       item.close();
     }
   }
 }
 /**
  * Create an ArchiveManager.
  *
  * @param maxHandlers The maximum number of cached ArchiveHandler's i.e. the maximum number of
  *     containers to track.
  * @param maxCachedData The maximum size of the cache directory, in bytes.
  * @param maxArchiveSize The maximum size of an archive.
  * @param maxArchivedFileSize The maximum extracted size of a single file in any archive.
  * @param maxCachedElements The maximum number of cached elements (an element is a file extracted
  *     from an archive. It is stored, encrypted and padded, in a single file.
  * @param tempBucketFactory
  */
 public ArchiveManager(
     int maxHandlers,
     long maxCachedData,
     long maxArchivedFileSize,
     int maxCachedElements,
     BucketFactory tempBucketFactory) {
   maxArchiveHandlers = maxHandlers;
   // FIXME PERFORMANCE I'm assuming there isn't much locality here, so it's faster to use the
   // FAST_COMPARATOR.
   // This may not be true if there are a lot of sites with many containers all inserted as
   // individual SSKs?
   archiveHandlers = LRUMap.createSafeMap(FreenetURI.FAST_COMPARATOR);
   this.maxCachedElements = maxCachedElements;
   this.maxCachedData = maxCachedData;
   storedData = new LRUMap<ArchiveKey, ArchiveStoreItem>();
   this.maxArchivedFileSize = maxArchivedFileSize;
   this.tempBucketFactory = tempBucketFactory;
   logMINOR = Logger.shouldLog(LogLevel.MINOR, this);
 }
Exemplo n.º 9
0
 public USKManager(NodeClientCore core) {
   HighLevelSimpleClient client =
       core.makeClient(RequestStarter.UPDATE_PRIORITY_CLASS, false, false);
   client.setMaxIntermediateLength(FProxyToadlet.MAX_LENGTH_NO_PROGRESS);
   client.setMaxLength(FProxyToadlet.MAX_LENGTH_NO_PROGRESS);
   backgroundFetchContext = client.getFetchContext();
   backgroundFetchContext.followRedirects = false;
   backgroundFetchContextIgnoreDBR = backgroundFetchContext.clone();
   backgroundFetchContextIgnoreDBR.ignoreUSKDatehints = true;
   realFetchContext = client.getFetchContext();
   // Performance: I'm pretty sure there is no spatial locality in the underlying data, so it's
   // okay to use the FAST_COMPARATOR here.
   // That is, even if two USKs are by the same author, they won't necessarily be updated or polled
   // at the same time.
   latestKnownGoodByClearUSK = new TreeMap<USK, Long>(USK.FAST_COMPARATOR);
   latestSlotByClearUSK = new TreeMap<USK, Long>(USK.FAST_COMPARATOR);
   subscribersByClearUSK = new TreeMap<USK, USKCallback[]>(USK.FAST_COMPARATOR);
   backgroundFetchersByClearUSK = new TreeMap<USK, USKFetcher>(USK.FAST_COMPARATOR);
   temporaryBackgroundFetchersLRU = LRUMap.createSafeMap(USK.FAST_COMPARATOR);
   temporaryBackgroundFetchersPrefetch = new WeakHashMap<USK, Long>();
   executor = core.getExecutor();
 }
Exemplo n.º 10
0
 /** Add an ArchiveHandler by key */
 private synchronized void putCached(FreenetURI key, ArchiveStoreContext zip) {
   if (logMINOR) Logger.minor(this, "Put cached AH for " + key + " : " + zip);
   archiveHandlers.push(key, zip);
   while (archiveHandlers.size() > maxArchiveHandlers) archiveHandlers.popKey(); // dump it
 }
Exemplo n.º 11
0
 /**
  * The result of that method will be displayed on the Statistic Toadlet : it will help catching
  * #1147 Afterwards it should be removed: it's not usefull :)
  *
  * @return the size of temporaryBackgroundFetchersLRU
  */
 public int getTemporaryBackgroundFetchersLRU() {
   return temporaryBackgroundFetchersLRU.size();
 }
Exemplo n.º 12
0
  public void startTemporaryBackgroundFetcher(
      USK usk,
      ClientContext context,
      final FetchContext fctx,
      boolean prefetchContent,
      boolean realTimeFlag) {
    final USK clear = usk.clearCopy();
    USKFetcher sched = null;
    ArrayList<USKFetcher> toCancel = null;
    synchronized (this) {
      //			int x = 0;
      //			for(USK key: backgroundFetchersByClearUSK.keySet()) {
      //				System.err.println("Fetcher "+x+": "+key);
      //				x++;
      //			}
      USKFetcher f = temporaryBackgroundFetchersLRU.get(clear);
      if (f == null) {
        f =
            new USKFetcher(
                usk,
                this,
                fctx.ignoreUSKDatehints ? backgroundFetchContextIgnoreDBR : backgroundFetchContext,
                new USKFetcherWrapper(
                    usk, RequestStarter.UPDATE_PRIORITY_CLASS, realTimeFlag ? rcRT : rcBulk),
                3,
                false,
                false,
                false);
        sched = f;
        temporaryBackgroundFetchersLRU.push(clear, f);
      } else {
        f.addHintEdition(usk.suggestedEdition);
      }
      if (prefetchContent) {
        long fetchTime = -1;
        // If nothing in 60 seconds, try fetching the last known slot.
        long slot = lookupLatestSlot(clear);
        long good = lookupKnownGood(clear);
        if (slot > -1 && good != slot) fetchTime = System.currentTimeMillis();
        temporaryBackgroundFetchersPrefetch.put(clear, fetchTime);
        if (logMINOR) Logger.minor(this, "Prefetch: set " + fetchTime + " for " + clear);
        schedulePrefetchChecker();
      }
      temporaryBackgroundFetchersLRU.push(clear, f);
      while (temporaryBackgroundFetchersLRU.size() > NodeClientCore.getMaxBackgroundUSKFetchers()) {
        USKFetcher fetcher = temporaryBackgroundFetchersLRU.popValue();
        temporaryBackgroundFetchersPrefetch.remove(fetcher.getOriginalUSK().clearCopy());
        if (!fetcher.hasSubscribers()) {
          if (toCancel == null) toCancel = new ArrayList<USKFetcher>(2);
          toCancel.add(fetcher);
        } else {
          if (logMINOR)
            Logger.minor(
                this,
                "Allowing temporary background fetcher to continue as it has subscribers... "
                    + fetcher);
        }
      }
    }
    final ArrayList<USKFetcher> cancelled = toCancel;
    final USKFetcher scheduleMe = sched;
    // This is just a prefetching method. so it should not unnecessarily delay the parent, nor
    // should it take important locks.
    // So we should do the actual schedule/cancels off-thread.
    // However, the above is done on-thread because a lot of the time it will already be running.
    if (cancelled != null || sched != null) {
      executor.execute(
          new Runnable() {

            @Override
            public void run() {
              if (cancelled != null) {
                for (int i = 0; i < cancelled.size(); i++) {
                  USKFetcher fetcher = cancelled.get(i);
                  fetcher.cancel(null, USKManager.this.context);
                }
              }
              if (scheduleMe != null) scheduleMe.schedule(null, USKManager.this.context);
            }
          });
    }
  }