public List<RouterInfo> getKnownRouterData() { List<RouterInfo> rv = new ArrayList(); DataStore ds = getDataStore(); if (ds != null) { for (DatabaseEntry o : ds.getEntries()) { if (o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) rv.add((RouterInfo) o); } } return rv; }
/** * Send to a subset of all floodfill peers. We do this to implement Kademlia within the * floodfills, i.e. we flood to those closest to the key. */ public void flood(DatabaseEntry ds) { Hash key = ds.getHash(); Hash rkey = _context.routingKeyGenerator().getRoutingKey(key); FloodfillPeerSelector sel = (FloodfillPeerSelector) getPeerSelector(); List<Hash> peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets()); int flooded = 0; for (int i = 0; i < peers.size(); i++) { Hash peer = peers.get(i); RouterInfo target = lookupRouterInfoLocally(peer); if ((target == null) || (_context.banlist().isBanlisted(peer))) continue; // Don't flood a RI back to itself // Not necessary, a ff will do its own flooding (reply token == 0) // if (peer.equals(target.getIdentity().getHash())) // continue; if (peer.equals(_context.routerHash())) continue; DatabaseStoreMessage msg = new DatabaseStoreMessage(_context); msg.setEntry(ds); OutNetMessage m = new OutNetMessage( _context, msg, _context.clock().now() + FLOOD_TIMEOUT, FLOOD_PRIORITY, target); // note send failure but don't give credit on success // might need to change this Job floodFail = new FloodFailedJob(_context, peer); m.setOnFailedSendJob(floodFail); _context.commSystem().processMessage(m); flooded++; if (_log.shouldLog(Log.INFO)) _log.info("Flooding the entry for " + key.toBase64() + " to " + peer.toBase64()); } if (_log.shouldLog(Log.INFO)) _log.info("Flooded the data to " + flooded + " of " + peers.size() + " peers"); }
@Override public void sendStore( Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) { // if we are a part of the floodfill netDb, don't send out our own leaseSets as part // of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out. // perhaps statistically adjust this so we are the source every 1/N times... or something. if (floodfillEnabled() && (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) { flood(ds); if (onSuccess != null) _context.jobQueue().addJob(onSuccess); } else { _context .jobQueue() .addJob( new FloodfillStoreJob( _context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore)); } }
/** * Send a series of searches to the next available peers as selected by the routing table, but * making sure no more than SEARCH_BREDTH are outstanding at any time */ protected void continueSearch() { if (_state.completed()) { if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": Search already completed", new Exception("already completed")); return; } int toCheck = getBredth() - _state.getPending().size(); if (toCheck <= 0) { // too many already pending if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": Too many searches already pending (pending: " + _state.getPending().size() + " max: " + getBredth() + ")"); requeuePending(); return; } int sent = 0; Set attempted = _state.getAttempted(); while (sent <= 0) { // boolean onlyFloodfill = onlyQueryFloodfillPeers(getContext()); boolean onlyFloodfill = true; if (_floodfillPeersExhausted && onlyFloodfill && _state.getPending().isEmpty()) { if (_log.shouldLog(Log.WARN)) _log.warn( getJobId() + ": no non-floodfill peers left, and no more pending. Searched: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); fail(); return; } List closestHashes = getClosestRouters(_state.getTarget(), toCheck, attempted); if ((closestHashes == null) || (closestHashes.isEmpty())) { if (_state.getPending().isEmpty()) { // we tried to find some peers, but there weren't any and no one else is going to answer if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": No peers left, and none pending! Already searched: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); fail(); } else { // no more to try, but we might get data or close peers from some outstanding requests if (_log.shouldLog(Log.INFO)) _log.info( getJobId() + ": No peers left, but some are pending! Pending: " + _state.getPending().size() + " attempted: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size()); requeuePending(); } return; } else { attempted.addAll(closestHashes); for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) { Hash peer = (Hash) iter.next(); DatabaseEntry ds = _facade.getDataStore().get(peer); if (ds == null) { if (_log.shouldLog(Log.INFO)) _log.info( "Next closest peer " + peer + " was only recently referred to us, sending a search for them"); getContext().netDb().lookupRouterInfo(peer, null, null, _timeoutMs); } else if (!(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) { if (_log.shouldLog(Log.WARN)) _log.warn( getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds.getClass().getName()); _state.replyTimeout(peer); } else { RouterInfo ri = (RouterInfo) ds; if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) { _floodfillPeersExhausted = true; if (onlyFloodfill) continue; } if (ri.isHidden()) { // || // allow querying shitlisted, since its indirect // getContext().shitlist().isShitlisted(peer)) { // dont bother } else { _state.addPending(peer); sendSearch((RouterInfo) ds); sent++; } } } /* if (sent <= 0) { // the (potentially) last peers being searched for could not be, // er, searched for, so lets retry ASAP (causing either another // peer to be selected, or the whole search to fail) if (_log.shouldLog(Log.INFO)) _log.info(getJobId() + ": No new peer queued up, so we are going to requeue " + "ourselves in our search for " + _state.getTarget().toBase64()); requeuePending(0); } */ } } }