/** Get an ArchiveHandler by key */ ArchiveStoreContext getCached(FreenetURI key) { if (logMINOR) Logger.minor(this, "Get cached AH for " + key); ArchiveStoreContext handler = archiveHandlers.get(key); if (handler == null) return null; archiveHandlers.push(key, handler); return handler; }
/** * Get a cached, previously extracted, file from an archive. * * @param key The key used to fetch the archive. * @param filename The name of the file within the archive. * @return A Bucket containing the data requested, or null. * @throws ArchiveFailureException */ public Bucket getCached(FreenetURI key, String filename) throws ArchiveFailureException { if (logMINOR) Logger.minor(this, "Fetch cached: " + key + ' ' + filename); ArchiveKey k = new ArchiveKey(key, filename); ArchiveStoreItem asi = null; synchronized (this) { asi = storedData.get(k); if (asi == null) return null; // Promote to top of LRU storedData.push(k, asi); } if (logMINOR) Logger.minor(this, "Found data"); return asi.getReaderBucket(); }
/** * Add a store element. * * @param callbackName If set, the name of the file for which we must call the callback if this * file happens to match. * @param gotElement Flag indicating whether we've already found the file for the callback. If so * we must not call it again. * @param callback Callback to be called if we do find it. We must getReaderBucket() before adding * the data to the LRU, otherwise it may be deleted before it reaches the client. * @throws ArchiveFailureException If a failure occurred resulting in the data not being readable. * Only happens if callback != null. */ private ArchiveStoreItem addStoreElement( ArchiveStoreContext ctx, FreenetURI key, String name, Bucket temp, MutableBoolean gotElement, String callbackName, ArchiveExtractCallback callback, ClientContext context) throws ArchiveFailureException { RealArchiveStoreItem element = new RealArchiveStoreItem(ctx, key, name, temp); element.addToContext(); if (logMINOR) Logger.minor( this, "Adding store element: " + element + " ( " + key + ' ' + name + " size " + element.spaceUsed() + " )"); ArchiveStoreItem oldItem; // Let it throw, if it does something is drastically wrong Bucket matchBucket = null; if ((!gotElement.value) && name.equals(callbackName)) { matchBucket = element.getReaderBucket(); } synchronized (this) { oldItem = storedData.get(element.key); storedData.push(element.key, element); cachedData += element.spaceUsed(); if (oldItem != null) { cachedData -= oldItem.spaceUsed(); if (logMINOR) Logger.minor(this, "Dropping old store element from archive cache: " + oldItem); oldItem.close(); } } if (matchBucket != null) { callback.gotBucket(matchBucket, context); gotElement.value = true; } return element; }
/** * Add an error element to the cache. This happens when a single file in the archive is invalid * (usually because it is too large). * * @param ctx The ArchiveStoreContext which must be notified about this element's creation. * @param key The key from which the archive was fetched. * @param name The name of the file within the archive. * @param error The error message to be included on the eventual exception thrown, if anyone tries * to extract the data for this element. */ private void addErrorElement( ArchiveStoreContext ctx, FreenetURI key, String name, String error, boolean tooBig) { ErrorArchiveStoreItem element = new ErrorArchiveStoreItem(ctx, key, name, error, tooBig); element.addToContext(); if (logMINOR) Logger.minor(this, "Adding error element: " + element + " for " + key + ' ' + name); ArchiveStoreItem oldItem; synchronized (this) { oldItem = storedData.get(element.key); storedData.push(element.key, element); if (oldItem != null) { oldItem.close(); cachedData -= oldItem.spaceUsed(); if (logMINOR) Logger.minor(this, "Dropping old store element from archive cache: " + oldItem); } } }
/** Add an ArchiveHandler by key */ private synchronized void putCached(FreenetURI key, ArchiveStoreContext zip) { if (logMINOR) Logger.minor(this, "Put cached AH for " + key + " : " + zip); archiveHandlers.push(key, zip); while (archiveHandlers.size() > maxArchiveHandlers) archiveHandlers.popKey(); // dump it }
public void startTemporaryBackgroundFetcher( USK usk, ClientContext context, final FetchContext fctx, boolean prefetchContent, boolean realTimeFlag) { final USK clear = usk.clearCopy(); USKFetcher sched = null; ArrayList<USKFetcher> toCancel = null; synchronized (this) { // int x = 0; // for(USK key: backgroundFetchersByClearUSK.keySet()) { // System.err.println("Fetcher "+x+": "+key); // x++; // } USKFetcher f = temporaryBackgroundFetchersLRU.get(clear); if (f == null) { f = new USKFetcher( usk, this, fctx.ignoreUSKDatehints ? backgroundFetchContextIgnoreDBR : backgroundFetchContext, new USKFetcherWrapper( usk, RequestStarter.UPDATE_PRIORITY_CLASS, realTimeFlag ? rcRT : rcBulk), 3, false, false, false); sched = f; temporaryBackgroundFetchersLRU.push(clear, f); } else { f.addHintEdition(usk.suggestedEdition); } if (prefetchContent) { long fetchTime = -1; // If nothing in 60 seconds, try fetching the last known slot. long slot = lookupLatestSlot(clear); long good = lookupKnownGood(clear); if (slot > -1 && good != slot) fetchTime = System.currentTimeMillis(); temporaryBackgroundFetchersPrefetch.put(clear, fetchTime); if (logMINOR) Logger.minor(this, "Prefetch: set " + fetchTime + " for " + clear); schedulePrefetchChecker(); } temporaryBackgroundFetchersLRU.push(clear, f); while (temporaryBackgroundFetchersLRU.size() > NodeClientCore.getMaxBackgroundUSKFetchers()) { USKFetcher fetcher = temporaryBackgroundFetchersLRU.popValue(); temporaryBackgroundFetchersPrefetch.remove(fetcher.getOriginalUSK().clearCopy()); if (!fetcher.hasSubscribers()) { if (toCancel == null) toCancel = new ArrayList<USKFetcher>(2); toCancel.add(fetcher); } else { if (logMINOR) Logger.minor( this, "Allowing temporary background fetcher to continue as it has subscribers... " + fetcher); } } } final ArrayList<USKFetcher> cancelled = toCancel; final USKFetcher scheduleMe = sched; // This is just a prefetching method. so it should not unnecessarily delay the parent, nor // should it take important locks. // So we should do the actual schedule/cancels off-thread. // However, the above is done on-thread because a lot of the time it will already be running. if (cancelled != null || sched != null) { executor.execute( new Runnable() { @Override public void run() { if (cancelled != null) { for (int i = 0; i < cancelled.size(); i++) { USKFetcher fetcher = cancelled.get(i); fetcher.cancel(null, USKManager.this.context); } } if (scheduleMe != null) scheduleMe.schedule(null, USKManager.this.context); } }); } }