/** * Send to a subset of all floodfill peers. We do this to implement Kademlia within the * floodfills, i.e. we flood to those closest to the key. */ public void flood(DatabaseEntry ds) { Hash key = ds.getHash(); Hash rkey = _context.routingKeyGenerator().getRoutingKey(key); FloodfillPeerSelector sel = (FloodfillPeerSelector) getPeerSelector(); List<Hash> peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets()); int flooded = 0; for (int i = 0; i < peers.size(); i++) { Hash peer = peers.get(i); RouterInfo target = lookupRouterInfoLocally(peer); if ((target == null) || (_context.banlist().isBanlisted(peer))) continue; // Don't flood a RI back to itself // Not necessary, a ff will do its own flooding (reply token == 0) // if (peer.equals(target.getIdentity().getHash())) // continue; if (peer.equals(_context.routerHash())) continue; DatabaseStoreMessage msg = new DatabaseStoreMessage(_context); msg.setEntry(ds); OutNetMessage m = new OutNetMessage( _context, msg, _context.clock().now() + FLOOD_TIMEOUT, FLOOD_PRIORITY, target); // note send failure but don't give credit on success // might need to change this Job floodFail = new FloodFailedJob(_context, peer); m.setOnFailedSendJob(floodFail); _context.commSystem().processMessage(m); flooded++; if (_log.shouldLog(Log.INFO)) _log.info("Flooding the entry for " + key.toBase64() + " to " + peer.toBase64()); } if (_log.shouldLog(Log.INFO)) _log.info("Flooded the data to " + flooded + " of " + peers.size() + " peers"); }
/** * list of the Hashes of currently known floodfill peers; Returned list will not include our own * hash. List is not sorted and not shuffled. */ public List<Hash> getFloodfillPeers() { FloodfillPeerSelector sel = (FloodfillPeerSelector) getPeerSelector(); return sel.selectFloodfillParticipants(getKBuckets()); }