/** * Allow the choice as to whether failed searches should count against the peer (such as if we * search for a random key) */ public FailedJob(RouterContext enclosingContext, RouterInfo peer, boolean penalizePeer) { super(enclosingContext); _penalizePeer = penalizePeer; _peer = peer.getIdentity().getHash(); _sentOn = enclosingContext.clock().now(); _isFloodfill = FloodfillNetworkDatabaseFacade.isFloodfill(peer); }
/** * we're (probably) searching for a LeaseSet, so to be (overly) cautious, we're sending the * request out through a tunnel w/ reply back through another tunnel. */ protected void sendLeaseSearch(RouterInfo router) { Hash to = router.getIdentity().getHash(); TunnelInfo inTunnel = getContext().tunnelManager().selectInboundExploratoryTunnel(to); if (inTunnel == null) { _log.warn("No tunnels to get search replies through! wtf!"); getContext().jobQueue().addJob(new FailedJob(getContext(), router)); return; } TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0); // this will fail if we've shitlisted our inbound gateway, but the gw may not necessarily // be shitlisted by whomever needs to contact them, so we don't need to check this // RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0)); // if (inGateway == null) { // _log.error("We can't find the gateway to our inbound tunnel?! wtf"); // getContext().jobQueue().addJob(new FailedJob(getContext(), router)); // return; // } int timeout = getPerPeerTimeoutMs(to); long expiration = getContext().clock().now() + timeout; DatabaseLookupMessage msg = buildMessage(inTunnelId, inTunnel.getPeer(0), expiration); TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to); if (outTunnel == null) { _log.warn("No tunnels to send search out through! wtf!"); getContext().jobQueue().addJob(new FailedJob(getContext(), router)); return; } TunnelId outTunnelId = outTunnel.getSendTunnelId(0); if (_log.shouldLog(Log.DEBUG)) _log.debug( getJobId() + ": Sending search to " + to + " for " + msg.getSearchKey().toBase64() + " w/ replies through [" + msg.getFrom().toBase64() + "] via tunnel [" + msg.getReplyTunnel() + "]"); SearchMessageSelector sel = new SearchMessageSelector(getContext(), router, _expiration, _state); SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob( getContext(), router, _state, _facade, this, outTunnel, inTunnel); if (FloodfillNetworkDatabaseFacade.isFloodfill(router)) _floodfillSearchesOutstanding++; getContext() .messageRegistry() .registerPending(sel, reply, new FailedJob(getContext(), router), timeout); getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnelId, to); }
/** * Send a series of searches to the next available peers as selected by the routing table, but * making sure no more than SEARCH_BREDTH are outstanding at any time */ protected void continueSearch() { if (_state.completed()) { if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": Search already completed", new Exception("already completed")); return; } int toCheck = getBredth() - _state.getPending().size(); if (toCheck <= 0) { // too many already pending if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": Too many searches already pending (pending: " + _state.getPending().size() + " max: " + getBredth() + ")"); requeuePending(); return; } int sent = 0; Set attempted = _state.getAttempted(); while (sent <= 0) { // boolean onlyFloodfill = onlyQueryFloodfillPeers(getContext()); boolean onlyFloodfill = true; if (_floodfillPeersExhausted && onlyFloodfill && _state.getPending().isEmpty()) { if (_log.shouldLog(Log.WARN)) _log.warn( getJobId() + ": no non-floodfill peers left, and no more pending. Searched: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); fail(); return; } List closestHashes = getClosestRouters(_state.getTarget(), toCheck, attempted); if ((closestHashes == null) || (closestHashes.isEmpty())) { if (_state.getPending().isEmpty()) { // we tried to find some peers, but there weren't any and no one else is going to answer if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": No peers left, and none pending! Already searched: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); fail(); } else { // no more to try, but we might get data or close peers from some outstanding requests if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": No peers left, but some are pending! Pending: " + _state.getPending().size() + " attempted: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); requeuePending(); } return; } else { attempted.addAll(closestHashes); for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) { Hash peer = (Hash) iter.next(); DatabaseEntry ds = _facade.getDataStore().get(peer); if (ds == null) { if (_log.shouldLog(Log.INFO)) _log.info( "Next closest peer " + peer + " was only recently referred to us, sending a search for them"); getContext().netDb().lookupRouterInfo(peer, null, null, _timeoutMs); } else if (!(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) { if (_log.shouldLog(Log.WARN)) _log.warn( getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds.getClass().getName()); _state.replyTimeout(peer); } else { RouterInfo ri = (RouterInfo) ds; if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) { _floodfillPeersExhausted = true; if (onlyFloodfill) continue; } if (ri.isHidden()) { // || // allow querying shitlisted, since its indirect // getContext().shitlist().isShitlisted(peer)) { // dont bother } else { _state.addPending(peer); sendSearch((RouterInfo) ds); sent++; } } } /* if (sent <= 0) { // the (potentially) last peers being searched for could not be, // er, searched for, so lets retry ASAP (causing either another // peer to be selected, or the whole search to fail) if (_log.shouldLog(Log.INFO)) _log.info(getJobId() + ": No new peer queued up, so we are going to requeue " + "ourselves in our search for " + _state.getTarget().toBase64()); requeuePending(0); } */ } } }