/** * Convert a ClientKeyBlock to a Bucket. If an error occurs, report it via onFailure and return * null. */ protected Bucket extract( ClientKeyBlock block, Object token, ObjectContainer container, ClientContext context) { Bucket data; try { data = block.decode( context.getBucketFactory(persistent), (int) (Math.min(ctx.maxOutputLength, Integer.MAX_VALUE)), false); } catch (KeyDecodeException e1) { if (Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this, "Decode failure: " + e1, e1); onFailure( new FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage()), token, container, context); return null; } catch (TooBigException e) { onFailure( new FetchException(FetchException.TOO_BIG, e.getMessage()), token, container, context); return null; } catch (IOException e) { Logger.error(this, "Could not capture data - disk full?: " + e, e); onFailure(new FetchException(FetchException.BUCKET_ERROR, e), token, container, context); return null; } if (Logger.shouldLog(Logger.MINOR, this)) Logger.minor( this, data == null ? "Could not decode: null" : ("Decoded " + data.size() + " bytes")); return data; }
@Override public void requeueAfterCooldown( Key key, long time, ObjectContainer container, ClientContext context) { MyCooldownTrackerItem tracker = makeCooldownTrackerItem(container, context); if (tracker.cooldownWakeupTime > time) { if (logMINOR) Logger.minor(this, "Not requeueing as deadline has not passed yet"); return; } if (isEmpty(container)) { if (logMINOR) Logger.minor(this, "Not requeueing as cancelled or finished"); return; } if (persistent) container.activate(this.key, 5); if (!(key.equals(this.key.getNodeKey(false)))) { Logger.error( this, "Got requeueAfterCooldown for wrong key: " + key + " but mine is " + this.key.getNodeKey(false) + " for " + this.key); return; } if (logMINOR) Logger.minor(this, "Requeueing after cooldown " + key + " for " + this); reschedule(container, context); if (persistent) container.deactivate(this.key, 5); }
/** Called on failed/canceled fetch */ public void onFailure(FetchException e, ClientGetter state, ObjectContainer container) { fetchFailures++; if (fetchFailures < 20 && e.newURI != null) { try { if (logMINOR) Logger.minor(this, "Trying new URI: " + e.newURI); indexuri = e.newURI.setMetaString(new String[] {""}).toString(); if (origEdition != -1 && e.newURI.getEdition() < origEdition) { Logger.error( this, "Redirect to earlier edition?!?!?!?: " + e.newURI.getEdition() + " from " + origEdition); } else { if (logMINOR) Logger.minor(this, "Trying new URI: " + e.newURI + " : " + indexuri); startFetch(true); if (updateHook != null) updateHook.update(updateContext, indexuri); return; } } catch (FetchException ex) { e = ex; } catch (MalformedURLException ex) { Logger.error(this, "what?", ex); } } fetchStatus = FetchStatus.FAILED; for (FindRequest findRequest : waitingOnMainIndex) { findRequest.setError( new TaskAbortException("Failure fetching rootindex of " + toString(), e)); } Logger.error(this, "Fetch failed on " + toString() + " -- state = " + state, e); }
static { Logger.minor(NativeThread.class, "Running init()"); // Loading the NativeThread library isn't useful on macos boolean maybeLoadNative = ("Linux".equalsIgnoreCase(System.getProperty("os.name"))) && (NodeStarter.extBuildNumber > 18); Logger.debug(NativeThread.class, "Run init(): should loadNative=" + maybeLoadNative); if (maybeLoadNative && LibraryLoader.loadNative("/freenet/support/io/", "NativeThread")) { NATIVE_PRIORITY_BASE = getLinuxPriority(); NATIVE_PRIORITY_RANGE = 20 - NATIVE_PRIORITY_BASE; System.out.println( "Using the NativeThread implementation (base nice level is " + NATIVE_PRIORITY_BASE + ')'); // they are 3 main prio levels HAS_THREE_NICE_LEVELS = NATIVE_PRIORITY_RANGE >= 3; HAS_ENOUGH_NICE_LEVELS = NATIVE_PRIORITY_RANGE >= ENOUGH_NICE_LEVELS; HAS_PLENTY_NICE_LEVELS = NATIVE_PRIORITY_RANGE >= JAVA_PRIORITY_RANGE; if (!(HAS_ENOUGH_NICE_LEVELS && HAS_THREE_NICE_LEVELS)) System.err.println( "WARNING!!! The JVM has been niced down to a level which won't allow it to schedule threads properly! LOWER THE NICE LEVEL!!"); _loadNative = true; } else { // unused anyway NATIVE_PRIORITY_BASE = 0; NATIVE_PRIORITY_RANGE = 19; HAS_THREE_NICE_LEVELS = true; HAS_ENOUGH_NICE_LEVELS = true; HAS_PLENTY_NICE_LEVELS = true; _loadNative = false; } Logger.minor(NativeThread.class, "Run init(): _loadNative = " + _loadNative); }
/** * Fetch main index & process if local or fetch in background with callback if Freenet URI * * @throws freenet.client.FetchException * @throws java.net.MalformedURLException */ private synchronized void startFetch(boolean retry) throws FetchException, MalformedURLException { if ((!retry) && (fetchStatus != FetchStatus.UNFETCHED && fetchStatus != FetchStatus.FAILED)) return; fetchStatus = FetchStatus.FETCHING; String uri = indexuri + DEFAULT_FILE; // try local file first File file = new File(uri); if (file.exists() && file.canRead()) { processRequests(new FileBucket(file, true, false, false, false, false)); return; } if (logMINOR) Logger.minor(this, "Fetching " + uri); // FreenetURI, try to fetch from freenet FreenetURI u = new FreenetURI(uri); while (true) { try { rootGetter = hlsc.fetch(u, -1, this, this, hlsc.getFetchContext().clone()); Logger.normal(this, "Fetch started : " + toString()); break; } catch (FetchException e) { if (e.newURI != null) { u = e.newURI; if (logMINOR) Logger.minor(this, "New URI: " + uri); continue; } else throw e; } } }
public void onGeneratedURI(FreenetURI uri, BaseClientPutter state, ObjectContainer container) { if (logMINOR) Logger.minor(this, "Generated URI for " + darknetOpennetString + " ARK: " + uri); long l = uri.getSuggestedEdition(); if (l < crypto.myARKNumber) { Logger.error( this, "Inserted " + darknetOpennetString + " ARK edition # lower than attempted: " + l + " expected " + crypto.myARKNumber); } else if (l > crypto.myARKNumber) { if (logMINOR) Logger.minor( this, darknetOpennetString + " ARK number moving from " + crypto.myARKNumber + " to " + l); crypto.myARKNumber = l; if (crypto.isOpennet) node.writeOpennetFile(); else node.writeNodeFile(); // We'll broadcast the new ARK edition to our connected peers via a differential node // reference SimpleFieldSet fs = new SimpleFieldSet(true); fs.putSingle("ark.number", Long.toString(crypto.myARKNumber)); node.peers.locallyBroadcastDiffNodeRef(fs, !crypto.isOpennet, crypto.isOpennet); } }
/** * A non-authoritative hint that a specific edition *might* exist. At the moment, we just fetch * the block. We do not fetch the contents, and it is possible that USKFetcher's are also fetching * the block. FIXME would it be more efficient to pass it along to a USKFetcher? * * @param context * @throws MalformedURLException If the uri passed in is not a USK. */ public void hintUpdate(FreenetURI uri, ClientContext context, short priority) throws MalformedURLException { if (uri.getSuggestedEdition() < lookupLatestSlot(USK.create(uri))) { if (logMINOR) Logger.minor( this, "Ignoring hint because edition is " + uri.getSuggestedEdition() + " but latest is " + lookupLatestSlot(USK.create(uri))); return; } uri = uri.sskForUSK(); if (logMINOR) Logger.minor(this, "Doing hint fetch for " + uri); final ClientGetter get = new ClientGetter( new NullClientCallback(), uri, new FetchContext(backgroundFetchContext, FetchContext.IDENTICAL_MASK, false, null), priority, rcBulk, new NullBucket(), null, null); try { get.start(null, context); } catch (FetchException e) { if (logMINOR) Logger.minor(this, "Cannot start hint fetch for " + uri + " : " + e, e); // Ignore } }
void updateSlot(final USK origUSK, final long number, final ClientContext context) { if (logMINOR) Logger.minor(this, "Updating (slot) " + origUSK.getURI() + " : " + number); USK clear = origUSK.clearCopy(); final USKCallback[] callbacks; synchronized (this) { Long l = latestSlotByClearUSK.get(clear); if (logMINOR) Logger.minor(this, "Old slot: " + l); if ((l == null) || (number > l.longValue())) { l = Long.valueOf(number); latestSlotByClearUSK.put(clear, l); if (logMINOR) Logger.minor(this, "Put " + number); } else return; callbacks = subscribersByClearUSK.get(clear); if (temporaryBackgroundFetchersPrefetch.containsKey(clear)) { temporaryBackgroundFetchersPrefetch.put(clear, System.currentTimeMillis()); schedulePrefetchChecker(); } } if (callbacks != null) { // Run off-thread, because of locking, and because client callbacks may take some time final USK usk = origUSK.copy(number); for (final USKCallback callback : callbacks) context.mainExecutor.execute( new Runnable() { @Override public void run() { callback.onFoundEdition( number, usk, null, // non-persistent context, false, (short) -1, null, false, false); } }, "USKManager callback executor for " + callback); } }
public SimpleFieldSet exportFieldSet(Config.RequestType configRequestType, boolean withDefaults) { SimpleFieldSet fs = new SimpleFieldSet(true); @SuppressWarnings("unchecked") Map.Entry<String, Option<?>>[] entries = new Map.Entry[map.size()]; // FIXME is any locking at all necessary here? After it has finished init, it's constant... synchronized (this) { entries = map.entrySet().toArray(entries); } if (logMINOR) Logger.minor(this, "Prefix=" + prefix); for (Map.Entry<String, Option<?>> entry : entries) { String key = entry.getKey(); Option<?> o = entry.getValue(); if (logMINOR) Logger.minor( this, "Key=" + key + " value=" + o.getValueString() + " default=" + o.isDefault()); if (configRequestType == Config.RequestType.CURRENT_SETTINGS && (!withDefaults) && o.isDefault() && (!o.forceWrite)) { if (logMINOR) Logger.minor(this, "Skipping " + key + " - " + o.isDefault()); continue; } switch (configRequestType) { case CURRENT_SETTINGS: fs.putSingle(key, o.getValueString()); break; case DEFAULT_SETTINGS: fs.putSingle(key, o.getDefault()); break; case SORT_ORDER: fs.put(key, o.getSortOrder()); break; case EXPERT_FLAG: fs.put(key, o.isExpert()); break; case FORCE_WRITE_FLAG: fs.put(key, o.isForcedWrite()); break; case SHORT_DESCRIPTION: fs.putSingle(key, o.getLocalisedShortDesc()); break; case LONG_DESCRIPTION: fs.putSingle(key, o.getLocalisedLongDesc()); break; case DATA_TYPE: fs.putSingle(key, o.getDataTypeStr()); break; default: Logger.error(this, "Unknown config request type value: " + configRequestType); break; } if (logMINOR) Logger.minor(this, "Key=" + prefix + '.' + key + " value=" + o.getValueString()); } return fs; }
public void onFailure( BulkCallFailureItem[] items, ObjectContainer container, ClientContext context) { FetchException[] fetchExceptions = new FetchException[items.length]; int countFatal = 0; for (int i = 0; i < items.length; i++) { fetchExceptions[i] = translateException(items[i].e); if (fetchExceptions[i].isFatal()) countFatal++; } if (persistent) { container.activate(segment, 1); container.activate(parent, 1); container.activate(segment.errors, 1); } if (parent.isCancelled()) { if (logMINOR) Logger.minor(this, "Failing: cancelled"); // Fail the segment. segment.fail(new FetchException(FetchException.CANCELLED), container, context, false); // FIXME do we need to free the keyNum's??? Or will that happen later anyway? return; } for (int i = 0; i < fetchExceptions.length; i++) segment.errors.inc(fetchExceptions[i].getMode()); if (persistent) segment.errors.storeTo(container); int nonFatalExceptions = items.length - countFatal; int[] blockNumbers = new int[nonFatalExceptions]; if (countFatal > 0) { FetchException[] newFetchExceptions = new FetchException[items.length - countFatal]; // Call the fatal callbacks directly. int x = 0; for (int i = 0; i < items.length; i++) { int blockNum = ((SplitFileFetcherSegmentSendableRequestItem) items[i].token).blockNum; if (fetchExceptions[i].isFatal()) { segment.onFatalFailure(fetchExceptions[i], blockNum, container, context); } else { blockNumbers[x] = blockNum; newFetchExceptions[x] = fetchExceptions[i]; x++; } } fetchExceptions = newFetchExceptions; } else { for (int i = 0; i < blockNumbers.length; i++) blockNumbers[i] = ((SplitFileFetcherSegmentSendableRequestItem) items[i].token).blockNum; } if (logMINOR) Logger.minor( this, "Calling segment.onNonFatalFailure with " + blockNumbers.length + " failed fetches"); segment.onNonFatalFailure(fetchExceptions, blockNumbers, container, context); if (persistent) { container.deactivate(segment, 1); container.deactivate(parent, 1); container.deactivate(segment.errors, 1); } }
@Override public boolean canRestart() { if (!finished) { Logger.minor(this, "Cannot restart because not finished for " + identifier); return false; } if (succeeded) { Logger.minor(this, "Cannot restart because succeeded for " + identifier); return false; } return getter.canRestart(); }
/** * Restore keys from the given cooldown queue. Find any keys that are due to be restored, restore * all requests both persistent and non-persistent for those keys. * * @param queue * @param persistent * @param container * @return Long.MAX_VALUE if nothing is queued in the next WAIT_AFTER_NOTHING_TO_START millis, the * time at which the next key is due to be restored if there are keys queued to be restarted * in the near future. */ private long moveKeysFromCooldownQueue( CooldownQueue queue, boolean persistent, ObjectContainer container) { if (queue == null) return Long.MAX_VALUE; long now = System.currentTimeMillis(); if (logMINOR) Logger.minor(this, "Moving keys from cooldown queue persistent=" + persistent); /* * Only go around once. We will be called again. If there are keys to move, then RequestStarter will not * sleep, because it will start them. Then it will come back here. If we are off-thread i.e. on the database * thread, then we will wake it up if we find keys... and we'll be scheduled again. * * FIXME: I think we need to restore all the listeners for a single key * simultaneously to avoid some kind of race condition? Or could we just * restore the one request on the queue? Maybe it's just a misguided * optimisation? IIRC we had some severe problems when we didn't have * this, related to requests somehow being lost altogether... Is it * essential? We can save a query if it's not... Is this about requests * or about keys? Should we limit all requests across any * SendableRequest's to 3 every half hour for a specific key? Probably * yes...? In which case, can the cooldown queue be entirely in RAM, * and would it be useful for it to be? Less disk, more RAM... for fast * nodes with little RAM it would be bad... */ final int MAX_KEYS = 20; Object ret; ClientRequestScheduler otherScheduler = ((!isSSKScheduler) ? this.clientContext.getSskFetchScheduler(isRTScheduler) : this.clientContext.getChkFetchScheduler(isRTScheduler)); if (queue instanceof PersistentCooldownQueue) { ret = ((PersistentCooldownQueue) queue) .removeKeyBefore( now, WAIT_AFTER_NOTHING_TO_START, container, MAX_KEYS, (PersistentCooldownQueue) otherScheduler.persistentCooldownQueue); } else ret = queue.removeKeyBefore(now, WAIT_AFTER_NOTHING_TO_START, container, MAX_KEYS); if (ret == null) return Long.MAX_VALUE; if (ret instanceof Long) { return (Long) ret; } Key[] keys = (Key[]) ret; for (int j = 0; j < keys.length; j++) { Key key = keys[j]; if (persistent) container.activate(key, 5); if (logMINOR) Logger.minor(this, "Restoring key: " + key); if (key instanceof NodeSSK == isSSKScheduler) restoreKey(key, container, now); else otherScheduler.restoreKey(key, container, now); if (persistent) container.deactivate(key, 5); } return Long.MAX_VALUE; }
/** * Get a cached, previously extracted, file from an archive. * * @param key The key used to fetch the archive. * @param filename The name of the file within the archive. * @return A Bucket containing the data requested, or null. * @throws ArchiveFailureException */ public Bucket getCached(FreenetURI key, String filename) throws ArchiveFailureException { if (logMINOR) Logger.minor(this, "Fetch cached: " + key + ' ' + filename); ArchiveKey k = new ArchiveKey(key, filename); ArchiveStoreItem asi = null; synchronized (this) { asi = storedData.get(k); if (asi == null) return null; // Promote to top of LRU storedData.push(k, asi); } if (logMINOR) Logger.minor(this, "Found data"); return asi.getReaderBucket(); }
public void onGotKey(Key key, KeyBlock block, ObjectContainer container, ClientContext context) { if (persistent) { container.activate(this, 1); container.activate(segment, 1); container.activate(blockNums, 1); } if (logMINOR) Logger.minor(this, "onGotKey(" + key + ")"); // Find and remove block if it is on this subsegment. However it may have been // removed already. int blockNo; synchronized (segment) { for (int i = 0; i < blockNums.size(); i++) { Integer token = blockNums.get(i); int num = token; Key k = segment.getBlockNodeKey(num, container); if (k != null && k.equals(key)) { blockNums.remove(i); if (persistent) container.delete(token); break; } } blockNo = segment.getBlockNumber(key, container); } if (blockNo == -1) { Logger.minor(this, "No block found for key " + key + " on " + this); return; } Integer token = Integer.valueOf(blockNo); ClientCHK ckey = segment.getBlockKey(blockNo, container); ClientCHKBlock cb; try { cb = new ClientCHKBlock((CHKBlock) block, ckey); } catch (CHKVerifyException e) { onFailure( new FetchException(FetchException.BLOCK_DECODE_ERROR, e), token, container, context); return; } Bucket data = extract(cb, token, container, context); if (data == null) return; if (!cb.isMetadata()) { onSuccess(data, false, token, (token).intValue(), cb, container, context); } else { onFailure( new FetchException(FetchException.INVALID_METADATA, "Metadata where expected data"), token, container, context); } }
private boolean forward( Message m, long id, PeerNode pn, short htl, double target, RoutedContext ctx, byte[] targetIdentity) { if (logMINOR) Logger.minor(this, "Should forward"); // Forward m = preForward(m, htl); while (true) { PeerNode next = node.peers.getByIdentity(targetIdentity); if (next != null && !next.isConnected()) { Logger.error(this, "Found target but disconnected!: " + next); next = null; } if (next == null) next = node.peers.closerPeer( pn, ctx.routedTo, target, true, node.isAdvancedModeEnabled(), -1, null, null, htl); if (logMINOR) Logger.minor(this, "Next: " + next + " message: " + m); if (next != null) { // next is connected, or at least has been => next.getPeer() CANNOT be null. if (logMINOR) Logger.minor(this, "Forwarding " + m.getSpec() + " to " + next.getPeer().getPort()); ctx.addSent(next); try { next.sendAsync(m, null, nodeStats.routedMessageCtr); } catch (NotConnectedException e) { continue; } } else { if (logMINOR) Logger.minor( this, "Reached dead end for " + m.getSpec() + " on " + node.getDarknetPortNumber()); // Reached a dead end... Message reject = DMT.createFNPRoutedRejected(id, htl); if (pn != null) try { pn.sendAsync(reject, null, nodeStats.routedMessageCtr); } catch (NotConnectedException e) { Logger.error(this, "Cannot send reject message back to source " + pn); return true; } } return true; } }
void updateKnownGood(final USK origUSK, final long number, final ClientContext context) { if (logMINOR) Logger.minor(this, "Updating (known good) " + origUSK.getURI() + " : " + number); USK clear = origUSK.clearCopy(); final USKCallback[] callbacks; boolean newSlot = false; synchronized (this) { Long l = latestKnownGoodByClearUSK.get(clear); if (logMINOR) Logger.minor(this, "Old known good: " + l); if ((l == null) || (number > l.longValue())) { l = Long.valueOf(number); latestKnownGoodByClearUSK.put(clear, l); if (logMINOR) Logger.minor(this, "Put " + number); } else return; // If it's in KnownGood, it will also be in Slot l = latestSlotByClearUSK.get(clear); if (logMINOR) Logger.minor(this, "Old slot: " + l); if ((l == null) || (number > l.longValue())) { l = Long.valueOf(number); latestSlotByClearUSK.put(clear, l); if (logMINOR) Logger.minor(this, "Put " + number); newSlot = true; } callbacks = subscribersByClearUSK.get(clear); } if (callbacks != null) { // Run off-thread, because of locking, and because client callbacks may take some time final USK usk = origUSK.copy(number); final boolean newSlotToo = newSlot; for (final USKCallback callback : callbacks) context.mainExecutor.execute( new Runnable() { @Override public void run() { callback.onFoundEdition( number, usk, null, // non-persistent context, false, (short) -1, null, true, newSlotToo); } }, "USKManager callback executor for " + callback); } }
/** * Simply check whether the block exists, in such a way that we don't fetch the full content. If * it does exist then the USK tracker, and therefore any fetchers, will be updated. You can pass * either an SSK or a USK. */ public void hintCheck( FreenetURI uri, final Object token, ClientContext context, short priority, final HintCallback cb) throws MalformedURLException { final FreenetURI origURI = uri; if (uri.isUSK()) uri = uri.sskForUSK(); if (logMINOR) Logger.minor(this, "Doing hint fetch for " + uri); final ClientGetter get = new ClientGetter( new ClientGetCallback() { @Override public void onMajorProgress(ObjectContainer container) { // Ignore } @Override public void onSuccess( FetchResult result, ClientGetter state, ObjectContainer container) { cb.success(origURI, token); } @Override public void onFailure( FetchException e, ClientGetter state, ObjectContainer container) { if (e.isDataFound()) cb.success(origURI, token); else if (e.isDNF()) cb.dnf(origURI, token, e); else cb.failed(origURI, token, e); } }, uri, new FetchContext(backgroundFetchContext, FetchContext.IDENTICAL_MASK, false, null), priority, rcBulk, new NullBucket(), null, null); try { get.start(null, context); } catch (FetchException e) { if (logMINOR) Logger.minor(this, "Cannot start hint fetch for " + uri + " : " + e, e); if (e.isDataFound()) cb.success(origURI, token); else if (e.isDNF()) cb.dnf(origURI, token, e); else cb.failed(origURI, token, e); } }
@Override public List<PersistentChosenBlock> makeBlocks( PersistentChosenRequest request, RequestScheduler sched, ObjectContainer container, ClientContext context) { if (persistent) { container.activate(segment, 1); container.activate(blockNums, 1); } Integer[] blockNumbers; synchronized (this) { blockNumbers = blockNums.toArray(new Integer[blockNums.size()]); } ArrayList<PersistentChosenBlock> blocks = new ArrayList<PersistentChosenBlock>(); Arrays.sort(blockNumbers); int prevBlockNumber = -1; for (int i = 0; i < blockNumbers.length; i++) { int blockNumber = blockNumbers[i]; if (blockNumber == prevBlockNumber) { Logger.error( this, "Duplicate block number in makeBlocks() in " + this + ": two copies of " + blockNumber); continue; } prevBlockNumber = blockNumber; ClientKey key = segment.getBlockKey(blockNumber, container); if (key == null) { if (logMINOR) Logger.minor(this, "Block " + blockNumber + " is null, maybe race condition"); continue; } key = key.cloneKey(); Key k = key.getNodeKey(true); PersistentChosenBlock block = new PersistentChosenBlock( false, request, new MySendableRequestItem(blockNumber), k, key, sched); if (logMINOR) Logger.minor( this, "Created block " + block + " for block number " + blockNumber + " on " + this); blocks.add(block); } blocks.trimToSize(); if (persistent) { container.deactivate(segment, 1); container.deactivate(blockNums, 1); } return blocks; }
@Override public void onSuccess(Object keyNum, ObjectContainer container, ClientContext context) { if (logMINOR) Logger.minor(this, "Succeeded (" + this + "): " + token); if (persistent) container.activate(parent, 1); if (parent.isCancelled()) { fail(new InsertException(InsertException.CANCELLED), container, context); return; } synchronized (this) { if (extraInserts > 0) { if (++completedInserts <= extraInserts) { if (logMINOR) Logger.minor( this, "Completed inserts " + completedInserts + " of extra inserts " + extraInserts + " on " + this); if (persistent) container.store(this); return; // Let it repeat until we've done enough inserts. It hasn't been unregistered yet. } } if (finished) { // Normal with persistence. Logger.normal(this, "Block already completed: " + this); return; } finished = true; } if (persistent) { container.store(this); container.activate(sourceData, 1); } if (freeData) { sourceData.free(); if (persistent) sourceData.removeFrom(container); sourceData = null; if (persistent) container.store(this); } parent.completedBlock(false, container, context); unregister(container, context, getPriorityClass(container)); if (persistent) container.activate(cb, 1); if (logMINOR) Logger.minor(this, "Calling onSuccess for " + cb); cb.onSuccess(this, container, context); if (persistent) container.deactivate(cb, 1); }
public static boolean writeTo(InputStream input, File target) throws FileNotFoundException, IOException { DataInputStream dis = null; FileOutputStream fos = null; File file = File.createTempFile("temp", ".tmp", target.getParentFile()); if (logMINOR) Logger.minor(FileUtil.class, "Writing to " + file + " to be renamed to " + target); try { dis = new DataInputStream(input); fos = new FileOutputStream(file); int len = 0; byte[] buffer = new byte[4096]; while ((len = dis.read(buffer)) > 0) { fos.write(buffer, 0, len); } } catch (IOException e) { throw e; } finally { if (dis != null) dis.close(); if (fos != null) fos.close(); } if (FileUtil.renameTo(file, target)) return true; else { file.delete(); return false; } }
/** Get an ArchiveHandler by key */ ArchiveStoreContext getCached(FreenetURI key) { if (logMINOR) Logger.minor(this, "Get cached AH for " + key); ArchiveStoreContext handler = archiveHandlers.get(key); if (handler == null) return null; archiveHandlers.push(key, handler); return handler; }
@Override public void endElement(String namespaceURI, String localName, String qName) { if (processingWord && wordMatches != null && qName.equals("file")) { HashMap<Integer, String> termpositions = null; if (characters != null) { String[] termposs = characters.toString().split(","); termpositions = new HashMap<Integer, String>(); for (String pos : termposs) { try { termpositions.put(Integer.valueOf(pos), null); } catch (NumberFormatException e) { Logger.error(this, "Position in index not an integer :" + pos, e); } } characters = null; } FileMatch thisFile = new FileMatch(id, termpositions, thisWordMatch); ArrayList<FileMatch> matchList = idToFileMatches.get(id); if (matchList == null) { matchList = new ArrayList<FileMatch>(); idToFileMatches.put(id, matchList); } if (logMINOR) Logger.minor(this, "Match: id=" + id + " for word " + match); matchList.add(thisFile); } }
/** * A request failed to a specific peer. * * @param routedTo The peer we routed to. * @param rfTimeout The time until we can route to the node again, for purposes of RecentlyFailed. * @param ftTimeout The time until we can route to the node again, for purposes of per-node * failure tables. * @param now The current time. * @param htl The HTL of the request. Note that timeouts only apply to the same HTL. */ public synchronized void failedTo( PeerNodeUnlocked routedTo, int rfTimeout, int ftTimeout, long now, short htl) { if (logMINOR) { Logger.minor( this, "Failed sending request to " + routedTo.shortToString() + " : timeout " + rfTimeout + " / " + ftTimeout); } int idx = addRequestedFrom(routedTo, htl, now); if (rfTimeout > 0) { long curTimeoutTime = requestedTimeoutsRF[idx]; long newTimeoutTime = now + rfTimeout; if (newTimeoutTime > curTimeoutTime) { requestedTimeoutsRF[idx] = newTimeoutTime; requestedTimeoutHTLs[idx] = htl; } } if (ftTimeout > 0) { long curTimeoutTime = requestedTimeoutsFT[idx]; long newTimeoutTime = now + ftTimeout; if (newTimeoutTime > curTimeoutTime) { requestedTimeoutsFT[idx] = newTimeoutTime; requestedTimeoutHTLs[idx] = htl; } } }
/** * Deal with a routed-to-node message that landed on this node. This is where * message-type-specific code executes. * * @param m * @return */ private boolean dispatchRoutedMessage(Message m, PeerNode src, long id) { if (m.getSpec() == DMT.FNPRoutedPing) { if (logMINOR) Logger.minor(this, "RoutedPing reached other side! (" + id + ")"); int x = m.getInt(DMT.COUNTER); Message reply = DMT.createFNPRoutedPong(id, x); if (logMINOR) Logger.minor(this, "Replying - counter = " + x + " for " + id); try { src.sendAsync(reply, null, nodeStats.routedMessageCtr); } catch (NotConnectedException e) { if (logMINOR) Logger.minor(this, "Lost connection replying to " + m + " in dispatchRoutedMessage"); } return true; } return false; }
/** * Creates a search for any number of indices, starts and returns the associated Request object * TODO startSearch with array of indexes * * @param search string to be searched * @param indexuri URI of index(s) to be used * @return existing Search for this if it exists, new one otherwise or null if query is for a * stopword or stop query * @throws InvalidSearchException if any part of the search is invalid */ public static Search startSearch(String search, String indexuri) throws InvalidSearchException, TaskAbortException { search = search.toLowerCase(Locale.US).trim(); if (search.length() == 0) throw new InvalidSearchException("Blank search"); search = fixCJK(search); // See if the same search exists if (hasSearch(search, indexuri)) return getSearch(search, indexuri); if (logMINOR) Logger.minor(Search.class, "Starting new search for " + search + " in " + indexuri); String[] indices = indexuri.split("[ ;]"); if (indices.length < 1 || search.trim().length() < 1) throw new InvalidSearchException("Attempt to start search with no index or terms"); else if (indices.length == 1) { Search newSearch = splitQuery(search, indexuri); return newSearch; } else { // create search for multiple terms over multiple indices ArrayList<Execution<Set<TermEntry>>> indexrequests = new ArrayList(indices.length); for (String index : indices) { Search indexsearch = startSearch(search, index); if (indexsearch == null) return null; indexrequests.add(indexsearch); } Search newSearch = new Search(search, indexuri, indexrequests, ResultOperation.DIFFERENTINDEXES); return newSearch; } }
public FreenetURI(FreenetURI uri) { // this.uniqueHashCode = super.hashCode(); keyType = uri.keyType; docName = uri.docName; if (uri.metaStr != null) { metaStr = new String[uri.metaStr.length]; System.arraycopy(uri.metaStr, 0, metaStr, 0, metaStr.length); } else metaStr = null; if (uri.routingKey != null) { routingKey = new byte[uri.routingKey.length]; System.arraycopy(uri.routingKey, 0, routingKey, 0, routingKey.length); } else routingKey = null; if (uri.cryptoKey != null) { cryptoKey = new byte[uri.cryptoKey.length]; System.arraycopy(uri.cryptoKey, 0, cryptoKey, 0, cryptoKey.length); } else cryptoKey = null; if (uri.extra != null) { extra = new byte[uri.extra.length]; System.arraycopy(uri.extra, 0, extra, 0, extra.length); } else extra = null; this.suggestedEdition = uri.suggestedEdition; if (logMINOR) Logger.minor( this, "Copied: " + toString() + " from " + uri.toString(), new Exception("debug")); }
public void onFailure( FetchException e, Object token, ObjectContainer container, ClientContext context) { if (persistent) { container.activate(segment, 1); container.activate(parent, 1); container.activate(segment.errors, 1); } boolean forceFatal = false; if (parent.isCancelled()) { if (logMINOR) Logger.minor(this, "Failing: cancelled"); e = new FetchException(FetchException.CANCELLED); forceFatal = true; } segment.errors.inc(e.getMode()); if (persistent) segment.errors.storeTo(container); if (e.isFatal() && token == null) { segment.fail(e, container, context, false); } else if (e.isFatal() || forceFatal) { segment.onFatalFailure( e, ((SplitFileFetcherSegmentSendableRequestItem) token).blockNum, container, context); } else { segment.onNonFatalFailure( e, ((SplitFileFetcherSegmentSendableRequestItem) token).blockNum, container, context); } if (persistent) { container.deactivate(segment, 1); container.deactivate(parent, 1); container.deactivate(segment.errors, 1); } }
public String toString(boolean prefix, boolean pureAscii) { if (keyType == null) { // Not activated or something... if (logMINOR) Logger.minor(this, "Not activated?? in toString(" + prefix + "," + pureAscii + ")"); return null; } StringBuilder b; if (prefix) b = new StringBuilder("freenet:"); else b = new StringBuilder(); b.append(keyType).append('@'); if (!"KSK".equals(keyType)) { if (routingKey != null) b.append(Base64.encode(routingKey)); if (cryptoKey != null) b.append(',').append(Base64.encode(cryptoKey)); if (extra != null) b.append(',').append(Base64.encode(extra)); if (docName != null) b.append('/'); } if (docName != null) b.append(URLEncoder.encode(docName, "/", pureAscii)); if (keyType.equals("USK")) { b.append('/'); b.append(suggestedEdition); } if (metaStr != null) for (int i = 0; i < metaStr.length; i++) { b.append('/').append(URLEncoder.encode(metaStr[i], "/", pureAscii)); } return b.toString(); }
@Override public void run() { try { freenet.support.Logger.OSThread.logPID(this); // FIXME ? key is not known for inserts here if (key != null) stats.reportOutgoingLocalRequestLocation(key.toNormalizedDouble()); if (!req.send(core, sched)) { if (!((!req.isPersistent()) && req.isCancelled())) Logger.error(this, "run() not able to send a request on " + req); else Logger.normal( this, "run() not able to send a request on " + req + " - request was cancelled"); } if (logMINOR) Logger.minor(this, "Finished " + req); } finally { if (req.sendIsBlocking()) { if (key != null) sched.removeFetchingKey(key); else if ((!req.isPersistent()) && ((TransientChosenBlock) req).request instanceof SendableInsert) sched.removeTransientInsertFetching( (SendableInsert) (((TransientChosenBlock) req).request), req.token); // Something might be waiting for a request to complete (e.g. if we have two requests for // the same key), // so wake the starter thread. wakeUp(); } } }
public static FreenetURI readFullBinaryKeyWithLength(DataInputStream dis) throws IOException { int len = dis.readShort(); byte[] buf = new byte[len]; dis.readFully(buf); if (logMINOR) Logger.minor(FreenetURI.class, "Read " + len + " bytes for key"); return fromFullBinaryKey(buf); }