/** Search totally failed */ protected void fail() { if (isLocal()) { if (_log.shouldLog(Log.ERROR)) _log.error( getJobId() + ": why did we fail if the target is local?: " + _state.getTarget().toBase64(), new Exception("failure cause")); succeed(); return; } if (_log.shouldLog(Log.INFO)) _log.info(getJobId() + ": Failed search for key " + _state.getTarget()); if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": State of failed search: " + _state); long time = getContext().clock().now() - _state.getWhenStarted(); int attempted = _state.getAttempted().size(); getContext().statManager().addRateData("netDb.failedAttemptedPeers", attempted, time); if (_keepStats) { getContext().statManager().addRateData("netDb.failedTime", time, 0); // _facade.fail(_state.getTarget()); } if (_onFailure != null) getContext().jobQueue().addJob(_onFailure); _facade.searchComplete(_state.getTarget()); handleDeferred(false); }
public int addDeferred(Job onFind, Job onFail, long expiration, boolean isLease) { Search search = new Search(onFind, onFail, expiration, isLease); boolean ok = true; int deferred = 0; synchronized (_deferredSearches) { if (_deferredCleared) ok = false; else _deferredSearches.add(search); deferred = _deferredSearches.size(); } if (!ok) { // race between adding deferred and search completing if (_log.shouldLog(Log.WARN)) _log.warn( "Race deferred before searchCompleting? our onFind=" + _onSuccess + " new one: " + onFind); // the following /shouldn't/ be necessary, but it doesnt hurt _facade.searchComplete(_state.getTarget()); _facade.search( _state.getTarget(), onFind, onFail, expiration - getContext().clock().now(), isLease); return 0; } else { return deferred; } }
/** Search was totally successful */ private void succeed() { if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": Succeeded search for key " + _state.getTarget() + " after querying " + _state.getAttempted().size()); if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": State of successful search: " + _state); if (_keepStats) { long time = getContext().clock().now() - _state.getWhenStarted(); getContext().statManager().addRateData("netDb.successTime", time, 0); getContext() .statManager() .addRateData("netDb.successPeers", _state.getAttempted().size(), time); } if (_onSuccess != null) getContext().jobQueue().addJob(_onSuccess); _facade.searchComplete(_state.getTarget()); handleDeferred(true); resend(); }
/** Send a search to the given peer */ protected void sendSearch(RouterInfo router) { if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) { // don't search ourselves if (_log.shouldLog(Log.ERROR)) _log.error(getJobId() + ": Dont send search to ourselves - why did we try?"); return; } else { if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": Send search to " + router.getIdentity().getHash().toBase64() + " for " + _state.getTarget().toBase64() + " w/ timeout " + getPerPeerTimeoutMs(router.getIdentity().calculateHash())); } getContext().statManager().addRateData("netDb.searchMessageCount", 1, 0); // if (_isLease || true) // always send searches out tunnels sendLeaseSearch(router); // else // sendRouterSearch(router); }
/** * Build the database search message * * @param replyTunnelId tunnel to receive replies through * @param replyGateway gateway for the reply tunnel * @param expiration when the search should stop */ protected DatabaseLookupMessage buildMessage( TunnelId replyTunnelId, Hash replyGateway, long expiration) { DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true); msg.setSearchKey(_state.getTarget()); // msg.setFrom(replyGateway.getIdentity().getHash()); msg.setFrom(replyGateway); msg.setDontIncludePeers(_state.getClosestAttempted(MAX_CLOSEST)); msg.setMessageExpiration(expiration); msg.setReplyTunnel(replyTunnelId); return msg; }
/** * After we get the data we were searching for, rebroadcast it to the peers we would query first * if we were to search for it again (healing the network). */ private void resend() { DatabaseEntry ds = _facade.lookupLeaseSetLocally(_state.getTarget()); if (ds == null) { if (SHOULD_RESEND_ROUTERINFO) { ds = _facade.lookupRouterInfoLocally(_state.getTarget()); if (ds != null) _facade.sendStore( _state.getTarget(), ds, null, null, RESEND_TIMEOUT, _state.getSuccessful()); } } else { Set sendTo = _state.getRepliedPeers(); // _state.getFailed(); sendTo.addAll(_state.getPending()); int numSent = 0; for (Iterator iter = sendTo.iterator(); iter.hasNext(); ) { Hash peer = (Hash) iter.next(); RouterInfo peerInfo = _facade.lookupRouterInfoLocally(peer); if (peerInfo == null) continue; if (resend(peerInfo, (LeaseSet) ds)) numSent++; if (numSent >= MAX_LEASE_RESEND) break; } getContext().statManager().addRateData("netDb.republishQuantity", numSent, numSent); } }
/** * Send a series of searches to the next available peers as selected by the routing table, but * making sure no more than SEARCH_BREDTH are outstanding at any time */ protected void continueSearch() { if (_state.completed()) { if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": Search already completed", new Exception("already completed")); return; } int toCheck = getBredth() - _state.getPending().size(); if (toCheck <= 0) { // too many already pending if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": Too many searches already pending (pending: " + _state.getPending().size() + " max: " + getBredth() + ")"); requeuePending(); return; } int sent = 0; Set attempted = _state.getAttempted(); while (sent <= 0) { // boolean onlyFloodfill = onlyQueryFloodfillPeers(getContext()); boolean onlyFloodfill = true; if (_floodfillPeersExhausted && onlyFloodfill && _state.getPending().isEmpty()) { if (_log.shouldLog(Log.WARN)) _log.warn( getJobId() + ": no non-floodfill peers left, and no more pending. Searched: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); fail(); return; } List closestHashes = getClosestRouters(_state.getTarget(), toCheck, attempted); if ((closestHashes == null) || (closestHashes.isEmpty())) { if (_state.getPending().isEmpty()) { // we tried to find some peers, but there weren't any and no one else is going to answer if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": No peers left, and none pending! Already searched: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); fail(); } else { // no more to try, but we might get data or close peers from some outstanding requests if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": No peers left, but some are pending! Pending: " + _state.getPending().size() + " attempted: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); requeuePending(); } return; } else { attempted.addAll(closestHashes); for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) { Hash peer = (Hash) iter.next(); DatabaseEntry ds = _facade.getDataStore().get(peer); if (ds == null) { if (_log.shouldLog(Log.INFO)) _log.info( "Next closest peer " + peer + " was only recently referred to us, sending a search for them"); getContext().netDb().lookupRouterInfo(peer, null, null, _timeoutMs); } else if (!(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) { if (_log.shouldLog(Log.WARN)) _log.warn( getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds.getClass().getName()); _state.replyTimeout(peer); } else { RouterInfo ri = (RouterInfo) ds; if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) { _floodfillPeersExhausted = true; if (onlyFloodfill) continue; } if (ri.isHidden()) { // || // allow querying shitlisted, since its indirect // getContext().shitlist().isShitlisted(peer)) { // dont bother } else { _state.addPending(peer); sendSearch((RouterInfo) ds); sent++; } } } /* if (sent <= 0) { // the (potentially) last peers being searched for could not be, // er, searched for, so lets retry ASAP (causing either another // peer to be selected, or the whole search to fail) if (_log.shouldLog(Log.INFO)) _log.info(getJobId() + ": No new peer queued up, so we are going to requeue " + "ourselves in our search for " + _state.getTarget().toBase64()); requeuePending(0); } */ } } }
/** True if the data is already locally stored */ private boolean isLocal() { return _facade.getDataStore().isKnown(_state.getTarget()); }
public void runJob() { if (_startedOn <= 0) _startedOn = getContext().clock().now(); if (_log.shouldLog(Log.INFO)) _log.info(getJobId() + ": Searching for " + _state.getTarget()); // , getAddedBy()); searchNext(); }