/** Send a search to the given peer */ protected void sendSearch(RouterInfo router) { if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) { // don't search ourselves if (_log.shouldLog(Log.ERROR)) _log.error(getJobId() + ": Dont send search to ourselves - why did we try?"); return; } else { if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": Send search to " + router.getIdentity().getHash().toBase64() + " for " + _state.getTarget().toBase64() + " w/ timeout " + getPerPeerTimeoutMs(router.getIdentity().calculateHash())); } getContext().statManager().addRateData("netDb.searchMessageCount", 1, 0); // if (_isLease || true) // always send searches out tunnels sendLeaseSearch(router); // else // sendRouterSearch(router); }
public void runJob() { RouterInfo updated = lookupRouterInfoLocally(_peer); if ((updated != null) && (updated.getPublished() > _info.getPublished())) { // great, a legitimate update } else { // they just sent us what we already had. kill 'em both dropAfterLookupFailed(_peer); } }
/** * Allow the choice as to whether failed searches should count against the peer (such as if we * search for a random key) */ public FailedJob(RouterContext enclosingContext, RouterInfo peer, boolean penalizePeer) { super(enclosingContext); _penalizePeer = penalizePeer; _peer = peer.getIdentity().getHash(); _sentOn = enclosingContext.clock().now(); _isFloodfill = FloodfillNetworkDatabaseFacade.isFloodfill(peer); }
@Override public String toString() { StringBuilder buf = new StringBuilder(256); buf.append("[OutNetMessage containing "); if (_message == null) { buf.append("*no message*"); } else { buf.append("a ").append(_messageSize).append(" byte "); buf.append(getMessageType()); } buf.append(" expiring on ").append(new Date(_expiration)); if (_failedTransports != null) buf.append(" failed delivery on transports ").append(_failedTransports); if (_target == null) buf.append(" targetting no one in particular..."); else buf.append(" targetting ").append(_target.getIdentity().getHash().toBase64()); if (_onReply != null) buf.append(" with onReply job: ").append(_onReply); if (_onSend != null) buf.append(" with onSend job: ").append(_onSend); if (_onFailedReply != null) buf.append(" with onFailedReply job: ").append(_onFailedReply); if (_onFailedSend != null) buf.append(" with onFailedSend job: ").append(_onFailedSend); if (_timestamps != null && _timestampOrder != null && _log.shouldLog(Log.INFO)) { buf.append(" {timestamps: \n"); renderTimestamps(buf); buf.append("}"); } buf.append("]"); return buf.toString(); }
/** * we're (probably) searching for a LeaseSet, so to be (overly) cautious, we're sending the * request out through a tunnel w/ reply back through another tunnel. */ protected void sendLeaseSearch(RouterInfo router) { Hash to = router.getIdentity().getHash(); TunnelInfo inTunnel = getContext().tunnelManager().selectInboundExploratoryTunnel(to); if (inTunnel == null) { _log.warn("No tunnels to get search replies through! wtf!"); getContext().jobQueue().addJob(new FailedJob(getContext(), router)); return; } TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0); // this will fail if we've shitlisted our inbound gateway, but the gw may not necessarily // be shitlisted by whomever needs to contact them, so we don't need to check this // RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0)); // if (inGateway == null) { // _log.error("We can't find the gateway to our inbound tunnel?! wtf"); // getContext().jobQueue().addJob(new FailedJob(getContext(), router)); // return; // } int timeout = getPerPeerTimeoutMs(to); long expiration = getContext().clock().now() + timeout; DatabaseLookupMessage msg = buildMessage(inTunnelId, inTunnel.getPeer(0), expiration); TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to); if (outTunnel == null) { _log.warn("No tunnels to send search out through! wtf!"); getContext().jobQueue().addJob(new FailedJob(getContext(), router)); return; } TunnelId outTunnelId = outTunnel.getSendTunnelId(0); if (_log.shouldLog(Log.DEBUG)) _log.debug( getJobId() + ": Sending search to " + to + " for " + msg.getSearchKey().toBase64() + " w/ replies through [" + msg.getFrom().toBase64() + "] via tunnel [" + msg.getReplyTunnel() + "]"); SearchMessageSelector sel = new SearchMessageSelector(getContext(), router, _expiration, _state); SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob( getContext(), router, _state, _facade, this, outTunnel, inTunnel); if (FloodfillNetworkDatabaseFacade.isFloodfill(router)) _floodfillSearchesOutstanding++; getContext() .messageRegistry() .registerPending(sel, reply, new FailedJob(getContext(), router), timeout); getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnelId, to); }
/** * Send our RI to the closest floodfill. * * @throws IllegalArgumentException if the local router info is invalid */ @Override public void publish(RouterInfo localRouterInfo) throws IllegalArgumentException { if (localRouterInfo == null) throw new IllegalArgumentException("wtf, null localRouterInfo?"); // should this be after super? why not publish locally? if (_context.router().isHidden()) return; // DE-nied! super.publish(localRouterInfo); // wait until we've read in the RI's so we can find the closest floodfill if (!isInitialized()) return; // no use sending if we have no addresses // (unless maybe we used to have addresses? not worth it if (localRouterInfo.getAddresses().isEmpty()) return; _log.info("Publishing our RI"); // Don't delay, helps IB tunnel builds // if (_context.router().getUptime() > PUBLISH_JOB_DELAY) sendStore( localRouterInfo.getIdentity().calculateHash(), localRouterInfo, null, null, PUBLISH_TIMEOUT, null); }
/** * Resend the leaseSet to the peer who had previously failed to provide us with the data when we * asked them. */ private boolean resend(RouterInfo toPeer, LeaseSet ls) { Hash to = toPeer.getIdentity().getHash(); DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext()); msg.setEntry(ls); msg.setMessageExpiration(getContext().clock().now() + RESEND_TIMEOUT); TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to); if (outTunnel != null) { TunnelId targetTunnelId = null; // not needed Job onSend = null; // not wanted if (_log.shouldLog(Log.DEBUG)) _log.debug("resending leaseSet out to " + to + " through " + outTunnel + ": " + msg); getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0), null, to); return true; } else { if (_log.shouldLog(Log.WARN)) _log.warn("unable to resend a leaseSet - no outbound exploratory tunnels!"); return false; } }
/** * Search for a newer router info, drop it from the db if the search fails, unless just started up * or have bigger problems. */ @Override protected void lookupBeforeDropping(Hash peer, RouterInfo info) { // following are some special situations, we don't want to // drop the peer in these cases // yikes don't do this - stack overflow // getFloodfillPeers().size() == 0 || // yikes2 don't do this either - deadlock! // getKnownRouters() < MIN_REMAINING_ROUTERS || if (info.getNetworkId() == Router.NETWORK_ID && (getKBucketSetSize() < MIN_REMAINING_ROUTERS || _context.router().getUptime() < DONT_FAIL_PERIOD || _context.commSystem().countActivePeers() <= MIN_ACTIVE_PEERS)) { if (_log.shouldLog(Log.WARN)) _log.warn( "Not failing " + peer.toBase64() + " as we are just starting up or have problems"); return; } // should we skip the search? if (_floodfillEnabled || _context.jobQueue().getMaxLag() > 500 || getKBucketSetSize() > MAX_DB_BEFORE_SKIPPING_SEARCH) { // don't try to overload ourselves (e.g. failing 3000 router refs at // once, and then firing off 3000 netDb lookup tasks) // Also don't queue a search if we have plenty of routerinfos // (KBucketSetSize() includes leasesets but avoids locking) super.lookupBeforeDropping(peer, info); return; } // this sends out the search to the floodfill peers even if we already have the // entry locally, firing no job if it gets a reply with an updated value (meaning // we shouldn't drop them but instead use the new data), or if they all time out, // firing the dropLookupFailedJob, which actually removes out local reference search( peer, new DropLookupFoundJob(_context, peer, info), new DropLookupFailedJob(_context, peer, info), 10 * 1000, false); }
/** @param peer may be null, returns false if null */ public static boolean isFloodfill(RouterInfo peer) { if (peer == null) return false; String caps = peer.getCapabilities(); return caps.indexOf(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL) >= 0; }
/** * Send a series of searches to the next available peers as selected by the routing table, but * making sure no more than SEARCH_BREDTH are outstanding at any time */ protected void continueSearch() { if (_state.completed()) { if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": Search already completed", new Exception("already completed")); return; } int toCheck = getBredth() - _state.getPending().size(); if (toCheck <= 0) { // too many already pending if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": Too many searches already pending (pending: " + _state.getPending().size() + " max: " + getBredth() + ")"); requeuePending(); return; } int sent = 0; Set attempted = _state.getAttempted(); while (sent <= 0) { // boolean onlyFloodfill = onlyQueryFloodfillPeers(getContext()); boolean onlyFloodfill = true; if (_floodfillPeersExhausted && onlyFloodfill && _state.getPending().isEmpty()) { if (_log.shouldLog(Log.WARN)) _log.warn( getJobId() + ": no non-floodfill peers left, and no more pending. Searched: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); fail(); return; } List closestHashes = getClosestRouters(_state.getTarget(), toCheck, attempted); if ((closestHashes == null) || (closestHashes.isEmpty())) { if (_state.getPending().isEmpty()) { // we tried to find some peers, but there weren't any and no one else is going to answer if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": No peers left, and none pending! Already searched: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); fail(); } else { // no more to try, but we might get data or close peers from some outstanding requests if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": No peers left, but some are pending! Pending: " + _state.getPending().size() + " attempted: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); requeuePending(); } return; } else { attempted.addAll(closestHashes); for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) { Hash peer = (Hash) iter.next(); DatabaseEntry ds = _facade.getDataStore().get(peer); if (ds == null) { if (_log.shouldLog(Log.INFO)) _log.info( "Next closest peer " + peer + " was only recently referred to us, sending a search for them"); getContext().netDb().lookupRouterInfo(peer, null, null, _timeoutMs); } else if (!(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) { if (_log.shouldLog(Log.WARN)) _log.warn( getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds.getClass().getName()); _state.replyTimeout(peer); } else { RouterInfo ri = (RouterInfo) ds; if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) { _floodfillPeersExhausted = true; if (onlyFloodfill) continue; } if (ri.isHidden()) { // || // allow querying shitlisted, since its indirect // getContext().shitlist().isShitlisted(peer)) { // dont bother } else { _state.addPending(peer); sendSearch((RouterInfo) ds); sent++; } } } /* if (sent <= 0) { // the (potentially) last peers being searched for could not be, // er, searched for, so lets retry ASAP (causing either another // peer to be selected, or the whole search to fail) if (_log.shouldLog(Log.INFO)) _log.info(getJobId() + ": No new peer queued up, so we are going to requeue " + "ourselves in our search for " + _state.getTarget().toBase64()); requeuePending(0); } */ } } }
/** * Convenience method for getting the router hash. Equivalent to * context.router().getRouterInfo().getIdentity().getHash() * * @return may be null if called very early */ public Hash routerHash() { if (_router == null) return null; RouterInfo ri = _router.getRouterInfo(); if (ri == null) return null; return ri.getIdentity().getHash(); }