/** * Initialize the message history according to the router's configuration. Call this whenever the * router identity changes. */ public synchronized void initialize(boolean forceReinitialize) { if (!forceReinitialize) return; if (_context.router().getRouterInfo() == null) { _reinitializeJob.getTiming().setStartAfter(_context.clock().now() + 15 * 1000); _context.jobQueue().addJob(_reinitializeJob); } else { _localIdent = getName(_context.routerHash()); // _unwrittenEntries = new ArrayList(64); updateSettings(); // clear the history file on startup if (_firstPass) { File f = new File(_historyFile); if (!f.isAbsolute()) f = new File(_context.getLogDir(), _historyFile); f.delete(); _writeJob.getTiming().setStartAfter(_context.clock().now() + WRITE_DELAY); _context.jobQueue().addJob(_writeJob); _firstPass = false; } if (_doLog) addEntry(getPrefix() + "** Router initialized (started up or changed identities)"); // _submitMessageHistoryJob.getTiming().setStartAfter(_context.clock().now() + 2*60*1000); // _context.jobQueue().addJob(_submitMessageHistoryJob); } }
public Set<PeerProfile> readProfiles() { long start = _context.clock().now(); List<File> files = selectFiles(); Set<PeerProfile> profiles = new HashSet(files.size()); for (File f : files) { PeerProfile profile = readProfile(f); if (profile != null) profiles.add(profile); } long duration = _context.clock().now() - start; if (_log.shouldLog(Log.DEBUG)) _log.debug("Loading " + profiles.size() + " took " + duration + "ms"); return profiles; }
/** Enqueue the specified job */ public void addJob(Job job) { if (job == null || !_alive) return; // This does nothing // if (job instanceof JobImpl) // ((JobImpl)job).addedToQueue(); long numReady = 0; boolean alreadyExists = false; boolean dropped = false; // getNext() is now outside the jobLock, is that ok? synchronized (_jobLock) { if (_readyJobs.contains(job)) alreadyExists = true; numReady = _readyJobs.size(); if (!alreadyExists) { // if (_timedJobs.contains(job)) // alreadyExists = true; // Always remove and re-add, since it needs to be // re-sorted in the TreeSet. boolean removed = _timedJobs.remove(job); if (removed && _log.shouldLog(Log.WARN)) _log.warn("Rescheduling job: " + job); } if ((!alreadyExists) && shouldDrop(job, numReady)) { job.dropped(); dropped = true; } else { if (!alreadyExists) { if (job.getTiming().getStartAfter() <= _context.clock().now()) { // don't skew us - its 'start after' its been queued, or later job.getTiming().setStartAfter(_context.clock().now()); if (job instanceof JobImpl) ((JobImpl) job).madeReady(); _readyJobs.offer(job); } else { _timedJobs.add(job); // only notify for _timedJobs, as _readyJobs does not use that lock // only notify if sooner, to reduce contention if (job.getTiming().getStartAfter() < _nextPumperRun) _jobLock.notifyAll(); } } } } _context.statManager().addRateData("jobQueue.readyJobs", numReady, 0); if (dropped) { _context.statManager().addRateData("jobQueue.droppedJobs", 1, 0); _log.logAlways( Log.WARN, "Dropping job due to overload! # ready jobs: " + numReady + ": job = " + job); } }
/** * Allow the choice as to whether failed searches should count against the peer (such as if we * search for a random key) */ public FailedJob(RouterContext enclosingContext, RouterInfo peer, boolean penalizePeer) { super(enclosingContext); _penalizePeer = penalizePeer; _peer = peer.getIdentity().getHash(); _sentOn = enclosingContext.clock().now(); _isFloodfill = FloodfillNetworkDatabaseFacade.isFloodfill(peer); }
/** * Note that we successfully stored to a floodfill peer and verified the result by asking another * floodfill peer */ public void storeSuccessful() { // Fixme, redefined this to include both lookup and store fails, // need to fix the javadocs _failedLookupRate.addData(0, 0); _context.statManager().addRateData("peer.failedLookupRate", 0, 0); _lastStoreSuccessful = _context.clock().now(); }
/** * @param claimedAddress an IP/port based RemoteHostId, or null if unknown * @param remoteHostId non-null, == claimedAddress if direct, or a hash-based one if indirect * @param addr non-null */ public OutboundEstablishState( RouterContext ctx, RemoteHostId claimedAddress, RemoteHostId remoteHostId, RouterIdentity remotePeer, SessionKey introKey, UDPAddress addr, DHSessionKeyBuilder.Factory dh) { _context = ctx; _log = ctx.logManager().getLog(OutboundEstablishState.class); if (claimedAddress != null) { _bobIP = claimedAddress.getIP(); _bobPort = claimedAddress.getPort(); } else { // _bobIP = null; _bobPort = -1; } _claimedAddress = claimedAddress; _remoteHostId = remoteHostId; _remotePeer = remotePeer; _introKey = introKey; _queuedMessages = new LinkedBlockingQueue<OutNetMessage>(); _establishBegin = ctx.clock().now(); _remoteAddress = addr; _introductionNonce = -1; _keyFactory = dh; if (addr.getIntroducerCount() > 0) { if (_log.shouldLog(Log.DEBUG)) _log.debug( "new outbound establish to " + remotePeer.calculateHash() + ", with address: " + addr); _currentState = OutboundState.OB_STATE_PENDING_INTRO; } else { _currentState = OutboundState.OB_STATE_UNKNOWN; } }
public long getMaxLag() { // first job is the one that has been waiting the longest Job j = _readyJobs.peek(); if (j == null) return 0; JobTiming jt = j.getTiming(); // PoisonJob timing is null, prevent NPE at shutdown if (jt == null) return 0; long startAfter = jt.getStartAfter(); return _context.clock().now() - startAfter; }
/** write out the data from the profile to the stream */ public void writeProfile(PeerProfile profile) { if (isExpired(profile.getLastSendSuccessful())) return; File f = pickFile(profile); long before = _context.clock().now(); OutputStream fos = null; try { fos = new BufferedOutputStream(new GZIPOutputStream(new SecureFileOutputStream(f))); writeProfile(profile, fos); } catch (IOException ioe) { _log.error("Error writing profile to " + f); } finally { if (fos != null) try { fos.close(); } catch (IOException ioe) { } } long delay = _context.clock().now() - before; if (_log.shouldLog(Log.DEBUG)) _log.debug("Writing the profile to " + f.getName() + " took " + delay + "ms"); }
/** The SessionCreated validation failed */ public synchronized void fail() { _receivedY = null; _aliceIP = null; _receivedRelayTag = 0; _receivedSignedOnTime = -1; _receivedEncryptedSignature = null; _receivedIV = null; _receivedSignature = null; // sure, there's a chance the packet was corrupted, but in practice // this means that Bob doesn't know his external port, so give up. _currentState = OutboundState.OB_STATE_VALIDATION_FAILED; _nextSend = _context.clock().now(); }
public void renderStatusHTML(Writer out) throws IOException { StringBuilder buf = new StringBuilder(1024); // move to the jsp //buf.append("<h2>Banned Peers</h2>"); Map<Hash, Banlist.Entry> entries = new TreeMap<Hash, Banlist.Entry>(new HashComparator()); entries.putAll(_context.banlist().getEntries()); if (entries.isEmpty()) { buf.append("<i>").append(_("none")).append("</i>"); out.write(buf.toString()); return; } buf.append("<ul>"); for (Map.Entry<Hash, Banlist.Entry> e : entries.entrySet()) { Hash key = e.getKey(); Banlist.Entry entry = e.getValue(); long expires = entry.expireOn-_context.clock().now(); if (expires <= 0) continue; buf.append("<li>").append(_context.commSystem().renderPeerHTML(key)); buf.append(' '); String expireString = DataHelper.formatDuration2(expires); if (key.equals(Hash.FAKE_HASH)) buf.append(_("Permanently banned")); else if (expires < 5l*24*60*60*1000) buf.append(_("Temporary ban expiring in {0}", expireString)); else buf.append(_("Banned until restart or in {0}", expireString)); Set<String> transports = entry.transports; if ( (transports != null) && (!transports.isEmpty()) ) buf.append(" on the following transport: ").append(transports); if (entry.cause != null) { buf.append("<br>\n"); if (entry.causeCode != null) buf.append(_(entry.cause, entry.causeCode)); else buf.append(_(entry.cause)); } if (!key.equals(Hash.FAKE_HASH)) { buf.append(" (<a href=\"configpeer?peer=").append(key.toBase64()) .append("#unsh\">").append(_("unban now")).append("</a>)"); } buf.append("</li>\n"); } buf.append("</ul>\n"); out.write(buf.toString()); out.flush(); }
public OutNetMessage(RouterContext context) { _context = context; _log = context.logManager().getLog(OutNetMessage.class); _priority = -1; _expiration = -1; // _createdBy = new Exception("Created by"); _created = context.clock().now(); if (_log.shouldLog(Log.INFO)) timestamp("Created"); // _context.messageStateMonitor().outboundMessageAdded(); // _context.statManager().createRateStat("outNetMessage.timeToDiscard", // "How long until we discard an outbound msg?", // "OutNetMessage", new long[] { 5*60*1000, 30*60*1000, // 60*60*1000 }); }
/** * Blocking, may take a while */ public synchronized void start() { if (_log.shouldLog(Log.DEBUG)) _log.debug("UPnP Start"); _shouldBeRunning = true; if (!_isRunning) { long b = _context.clock().now(); try { _isRunning = _upnp.runPlugin(); if (_log.shouldLog(Log.INFO)) _log.info("UPnP runPlugin took " + (_context.clock().now() - b)); } catch (Exception e) { // NPE in UPnP (ticket #728), can't let it bring us down if (!_errorLogged) { _log.error("UPnP error, please report", e); _errorLogged = true; } } } if (_isRunning) { _rescanner.schedule(RESCAN_LONG_DELAY); if (_log.shouldLog(Log.DEBUG)) _log.debug("UPnP Start Done"); } else { _rescanner.schedule(RESCAN_SHORT_DELAY); // Do we have a non-loopback, non-broadcast address? // If not, that's why it failed (HTTPServer won't start) if (!Addresses.isConnected()) { if (!_disconLogged) { _log.logAlways(Log.WARN, "UPnP start failed - no network connection?"); _disconLogged = true; } } else { _log.error("UPnP start failed - port conflict?"); } } }
/** Note that the peer sent us a lookup */ public void lookupReceived() { long now = _context.clock().now(); long delay = now - _lastLookupReceived; _lastLookupReceived = now; _lookupsReceived++; if (_avgDelayBetweenLookupsReceived <= 0) { _avgDelayBetweenLookupsReceived = delay; } else { if (delay > _avgDelayBetweenLookupsReceived) _avgDelayBetweenLookupsReceived = _avgDelayBetweenLookupsReceived + (delay / _lookupsReceived); else _avgDelayBetweenLookupsReceived = _avgDelayBetweenLookupsReceived - (delay / _lookupsReceived); } }
/** * Stamp the message's progress. Only useful if log level is INFO or DEBUG * * @param eventName what occurred * @return how long this message has been 'in flight' */ public long timestamp(String eventName) { long now = _context.clock().now(); if (_log.shouldLog(Log.INFO)) { // only timestamp if we are debugging synchronized (this) { locked_initTimestamps(); // ??? // while (_timestamps.containsKey(eventName)) { // eventName = eventName + '.'; // } _timestamps.put(eventName, Long.valueOf(now)); _timestampOrder.add(eventName); } } return now - _created; }
/** We received another message we weren't waiting for and don't know how to handle */ public void droppedOtherMessage(I2NPMessage message, Hash from) { if (!_doLog) return; if (message == null) return; StringBuilder buf = new StringBuilder(512); buf.append(getPrefix()); buf.append("dropped [") .append(message.getClass().getName()) .append("] ") .append(message.getUniqueId()); buf.append(" [").append(message.toString()).append("] from ["); if (from != null) buf.append(from.toBase64()); else buf.append("unknown"); buf.append("] expiring in ") .append(message.getMessageExpiration() - _context.clock().now()) .append("ms"); addEntry(buf.toString()); }
/** * We've done what we need to do with the data from this message, though we may keep the object * around for a while to use its ID, jobs, etc. */ public void discardData() { if ((_message != null) && (_messageSize <= 0)) _messageSize = _message.getMessageSize(); if (_log.shouldLog(Log.DEBUG)) { long timeToDiscard = _context.clock().now() - _created; _log.debug( "Discard " + _messageSize + "byte " + getMessageType() + " message after " + timeToDiscard); } _message = null; // _context.statManager().addRateData("outNetMessage.timeToDiscard", timeToDiscard, // timeToDiscard); // _context.messageStateMonitor().outboundMessageDiscarded(); }
/** note that we just sent the RelayRequest packet */ public synchronized void introSent() { _lastSend = _context.clock().now(); long delay; if (_introSentCount == 0) { delay = RETRANSMIT_DELAY; _introSentTime = _lastSend; } else { delay = Math.min( RETRANSMIT_DELAY << _introSentCount, _introSentTime + EstablishmentManager.OB_MESSAGE_TIMEOUT - _lastSend); } _introSentCount++; _nextSend = _lastSend + delay; if (_currentState == OutboundState.OB_STATE_UNKNOWN) _currentState = OutboundState.OB_STATE_PENDING_INTRO; }
/** * This changes the remoteHostId from a hash-based one or possibly incorrect IP/port to what the * introducer told us. All params are for the remote end (NOT the introducer) and must have been * validated already. */ public synchronized void introduced(byte bobIP[], int bobPort) { if (_currentState != OutboundState.OB_STATE_PENDING_INTRO) return; // we've already successfully been introduced, so don't overwrite old settings _nextSend = _context.clock().now() + 500; // wait briefly for the hole punching _currentState = OutboundState.OB_STATE_INTRODUCED; if (_claimedAddress != null && bobPort == _bobPort && DataHelper.eq(bobIP, _bobIP)) { // he's who he said he was _remoteHostId = _claimedAddress; } else { // no IP/port or wrong IP/port in RI _bobIP = bobIP; _bobPort = bobPort; _remoteHostId = new RemoteHostId(bobIP, bobPort); } if (_log.shouldLog(Log.INFO)) _log.info("Introduced to " + _remoteHostId + ", now lets get on with establishing"); }
/** note that we just sent the SessionRequest packet */ public synchronized void requestSent() { _lastSend = _context.clock().now(); long delay; if (_requestSentCount == 0) { delay = RETRANSMIT_DELAY; _requestSentTime = _lastSend; } else { delay = Math.min( RETRANSMIT_DELAY << _requestSentCount, _requestSentTime + EstablishmentManager.OB_MESSAGE_TIMEOUT - _lastSend); } _requestSentCount++; _nextSend = _lastSend + delay; if (_log.shouldLog(Log.DEBUG)) _log.debug("Send a request packet, nextSend in " + delay); if (_currentState == OutboundState.OB_STATE_UNKNOWN || _currentState == OutboundState.OB_STATE_INTRODUCED) _currentState = OutboundState.OB_STATE_REQUEST_SENT; }
/** note that we just sent the SessionConfirmed packet */ public synchronized void confirmedPacketsSent() { _lastSend = _context.clock().now(); long delay; if (_confirmedSentCount == 0) { delay = RETRANSMIT_DELAY; _confirmedSentTime = _lastSend; } else { delay = Math.min( RETRANSMIT_DELAY << _confirmedSentCount, _confirmedSentTime + EstablishmentManager.OB_MESSAGE_TIMEOUT - _lastSend); } _confirmedSentCount++; _nextSend = _lastSend + delay; if (_log.shouldLog(Log.DEBUG)) _log.debug("Send confirm packets, nextSend in " + delay); if (_currentState == OutboundState.OB_STATE_UNKNOWN || _currentState == OutboundState.OB_STATE_PENDING_INTRO || _currentState == OutboundState.OB_STATE_INTRODUCED || _currentState == OutboundState.OB_STATE_REQUEST_SENT || _currentState == OutboundState.OB_STATE_CREATED_RECEIVED) _currentState = OutboundState.OB_STATE_CONFIRMED_PARTIALLY; }
/** * Let's sign everything so we can fragment properly. * * <p>Note that while a SessionConfirmed could in theory be fragmented, in practice a * RouterIdentity is 387 bytes and a single fragment is 512 bytes max, so it will never be * fragmented. */ public synchronized void prepareSessionConfirmed() { if (_sentSignedOnTime > 0) return; byte signed[] = new byte [256 + 256 // X + Y + _aliceIP.length + 2 + _bobIP.length + 2 + 4 // Alice's relay key + 4 // signed on time ]; _sentSignedOnTime = _context.clock().now() / 1000; int off = 0; System.arraycopy(_sentX, 0, signed, off, _sentX.length); off += _sentX.length; System.arraycopy(_receivedY, 0, signed, off, _receivedY.length); off += _receivedY.length; System.arraycopy(_aliceIP, 0, signed, off, _aliceIP.length); off += _aliceIP.length; DataHelper.toLong(signed, off, 2, _alicePort); off += 2; System.arraycopy(_bobIP, 0, signed, off, _bobIP.length); off += _bobIP.length; DataHelper.toLong(signed, off, 2, _bobPort); off += 2; DataHelper.toLong(signed, off, 4, _receivedRelayTag); off += 4; DataHelper.toLong(signed, off, 4, _sentSignedOnTime); // BUG - if SigningPrivateKey is null, _sentSignature will be null, leading to NPE later // should we throw something from here? _sentSignature = _context.dsa().sign(signed, _context.keyManager().getSigningPrivateKey()); }
private boolean isExpired(long lastSentToSuccessfully) { long timeSince = _context.clock().now() - lastSentToSuccessfully; return (timeSince > EXPIRE_AGE); }
/** Note that floodfill verify failed */ public void storeFailed() { // Fixme, redefined this to include both lookup and store fails, // need to fix the javadocs _failedLookupRate.addData(1, 0); _lastStoreFailed = _context.clock().now(); }
/** Note that the peer failed to respond to the db lookup in any way */ public void lookupFailed() { _failedLookups++; _failedLookupRate.addData(1, 0); _context.statManager().addRateData("peer.failedLookupRate", 1, 0); _lastLookupFailed = _context.clock().now(); }
/** * Note that the peer was not only able to respond to the lookup, but sent us the data we wanted! */ public void lookupSuccessful() { _successfulLookups++; _failedLookupRate.addData(0, 0); _context.statManager().addRateData("peer.failedLookupRate", 0, 0); _lastLookupSuccessful = _context.clock().now(); }
private final String getPrefix() { StringBuilder buf = new StringBuilder(48); buf.append(getTime(_context.clock().now())); buf.append(' ').append(_localIdent).append(": "); return buf.toString(); }
/** time the transport tries to send the message (including any queueing) */ public long getSendTime() { return _context.clock().now() - _sendBegin; }
/** * Send out a build request message. * * @param cfg ReplyMessageId must be set * @return success */ public static boolean request( RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, BuildExecutor exec) { // new style crypto fills in all the blanks, while the old style waits for replies to fill in // the next hop, etc prepare(ctx, cfg); if (cfg.getLength() <= 1) { buildZeroHop(ctx, pool, cfg, exec); return true; } Log log = ctx.logManager().getLog(BuildRequestor.class); cfg.setTunnelPool(pool); TunnelInfo pairedTunnel = null; Hash farEnd = cfg.getFarEnd(); TunnelManagerFacade mgr = ctx.tunnelManager(); boolean isInbound = pool.getSettings().isInbound(); if (pool.getSettings().isExploratory() || !usePairedTunnels(ctx)) { if (isInbound) pairedTunnel = mgr.selectOutboundExploratoryTunnel(farEnd); else pairedTunnel = mgr.selectInboundExploratoryTunnel(farEnd); } else { // building a client tunnel if (isInbound) pairedTunnel = mgr.selectOutboundTunnel(pool.getSettings().getDestination(), farEnd); else pairedTunnel = mgr.selectInboundTunnel(pool.getSettings().getDestination(), farEnd); if (pairedTunnel == null) { if (isInbound) { // random more reliable than closest ?? // pairedTunnel = mgr.selectOutboundExploratoryTunnel(farEnd); pairedTunnel = mgr.selectOutboundTunnel(); if (pairedTunnel != null && pairedTunnel.getLength() <= 1 && mgr.getOutboundSettings().getLength() > 0 && mgr.getOutboundSettings().getLength() + mgr.getOutboundSettings().getLengthVariance() > 0) { // don't build using a zero-hop expl., // as it is both very bad for anonomyity, // and it takes a build slot away from exploratory pairedTunnel = null; } } else { // random more reliable than closest ?? // pairedTunnel = mgr.selectInboundExploratoryTunnel(farEnd); pairedTunnel = mgr.selectInboundTunnel(); if (pairedTunnel != null && pairedTunnel.getLength() <= 1 && mgr.getInboundSettings().getLength() > 0 && mgr.getInboundSettings().getLength() + mgr.getInboundSettings().getLengthVariance() > 0) { // ditto pairedTunnel = null; } } if (pairedTunnel != null && log.shouldLog(Log.INFO)) log.info("Couldn't find a paired tunnel for " + cfg + ", using exploratory tunnel"); } } if (pairedTunnel == null) { if (log.shouldLog(Log.WARN)) log.warn("Tunnel build failed, as we couldn't find a paired tunnel for " + cfg); exec.buildComplete(cfg, pool); // Not even an exploratory tunnel? We are in big trouble. // Let's not spin through here too fast. // But don't let a client tunnel waiting for exploratories slow things down too much, // as there may be other tunnel pools who can build int ms = pool.getSettings().isExploratory() ? 250 : 25; try { Thread.sleep(ms); } catch (InterruptedException ie) { } return false; } // long beforeCreate = System.currentTimeMillis(); TunnelBuildMessage msg = createTunnelBuildMessage(ctx, pool, cfg, pairedTunnel, exec); // long createTime = System.currentTimeMillis()-beforeCreate; if (msg == null) { if (log.shouldLog(Log.WARN)) log.warn("Tunnel build failed, as we couldn't create the tunnel build message for " + cfg); exec.buildComplete(cfg, pool); return false; } // cfg.setPairedTunnel(pairedTunnel); // long beforeDispatch = System.currentTimeMillis(); if (cfg.isInbound()) { if (log.shouldLog(Log.INFO)) log.info( "Sending the tunnel build request " + msg.getUniqueId() + " out the tunnel " + pairedTunnel + " to " + cfg.getPeer(0) + " for " + cfg + " waiting for the reply of " + cfg.getReplyMessageId()); // send it out a tunnel targetting the first hop // TODO - would be nice to have a TunnelBuildFirstHopFailJob queued if the // pairedTunnel is zero-hop, but no way to do that? ctx.tunnelDispatcher().dispatchOutbound(msg, pairedTunnel.getSendTunnelId(0), cfg.getPeer(0)); } else { if (log.shouldLog(Log.INFO)) log.info( "Sending the tunnel build request directly to " + cfg.getPeer(1) + " for " + cfg + " waiting for the reply of " + cfg.getReplyMessageId() + " with msgId=" + msg.getUniqueId()); // send it directly to the first hop // Add some fuzz to the TBM expiration to make it harder to guess how many hops // or placement in the tunnel msg.setMessageExpiration( ctx.clock().now() + BUILD_MSG_TIMEOUT + ctx.random().nextLong(20 * 1000)); // We set the OutNetMessage expiration much shorter, so that the // TunnelBuildFirstHopFailJob fires before the 13s build expiration. RouterInfo peer = ctx.netDb().lookupRouterInfoLocally(cfg.getPeer(1)); if (peer == null) { if (log.shouldLog(Log.WARN)) log.warn("Could not find the next hop to send the outbound request to: " + cfg); exec.buildComplete(cfg, pool); return false; } OutNetMessage outMsg = new OutNetMessage(ctx, msg, ctx.clock().now() + FIRST_HOP_TIMEOUT, PRIORITY, peer); outMsg.setOnFailedSendJob(new TunnelBuildFirstHopFailJob(ctx, pool, cfg, exec)); ctx.outNetMessagePool().add(outMsg); } // if (log.shouldLog(Log.DEBUG)) // log.debug("Tunnel build message " + msg.getUniqueId() + " created in " + createTime // + "ms and dispatched in " + (System.currentTimeMillis()-beforeDispatch)); return true; }
private boolean shouldBeFloodfill() { if (!SigType.ECDSA_SHA256_P256.isAvailable()) return false; // Hidden trumps netDb.floodfillParticipant=true if (getContext().router().isHidden()) return false; String enabled = getContext().getProperty(PROP_FLOODFILL_PARTICIPANT, "auto"); if ("true".equals(enabled)) return true; if ("false".equals(enabled)) return false; // auto from here down // Only if not shutting down... if (getContext().router().gracefulShutdownInProgress()) return false; // ARM ElG decrypt is too slow if (SystemVersion.isARM() || SystemVersion.isAndroid()) return false; if (getContext().getBooleanProperty(UDPTransport.PROP_LAPTOP_MODE)) return false; if (getContext().commSystem().isInBadCountry()) return false; String country = getContext().commSystem().getOurCountry(); // anonymous proxy, satellite provider (not in bad country list) if ("a1".equals(country) || "a2".equals(country)) return false; // Only if up a while... if (getContext().router().getUptime() < MIN_UPTIME) return false; RouterInfo ri = getContext().router().getRouterInfo(); if (ri == null) return false; char bw = ri.getBandwidthTier().charAt(0); // Only if class M, N, O, P, X if (bw != Router.CAPABILITY_BW64 && bw != Router.CAPABILITY_BW128 && bw != Router.CAPABILITY_BW256 && bw != Router.CAPABILITY_BW512 && bw != Router.CAPABILITY_BW_UNLIMITED) return false; // This list will not include ourselves... List<Hash> floodfillPeers = _facade.getFloodfillPeers(); long now = getContext().clock().now(); // We know none at all! Must be our turn... if (floodfillPeers == null || floodfillPeers.isEmpty()) { _lastChanged = now; return true; } // Only change status every so often boolean wasFF = _facade.floodfillEnabled(); if (_lastChanged + MIN_CHANGE_DELAY > now) return wasFF; // This is similar to the qualification we do in FloodOnlySearchJob.runJob(). // Count the "good" ff peers. // // Who's not good? // the unheard-from, unprofiled, failing, unreachable and banlisted ones. // We should hear from floodfills pretty frequently so set a 60m time limit. // If unprofiled we haven't talked to them in a long time. // We aren't contacting the peer directly, so banlist doesn't strictly matter, // but it's a bad sign, and we often banlist a peer before we fail it... // // Future: use Integration calculation // int ffcount = floodfillPeers.size(); int failcount = 0; long before = now - 60 * 60 * 1000; for (Hash peer : floodfillPeers) { PeerProfile profile = getContext().profileOrganizer().getProfile(peer); if (profile == null || profile.getLastHeardFrom() < before || profile.getIsFailing() || getContext().banlist().isBanlisted(peer) || getContext().commSystem().wasUnreachable(peer)) failcount++; } if (wasFF) ffcount++; int good = ffcount - failcount; boolean happy = getContext().router().getRouterInfo().getCapabilities().indexOf('R') >= 0; // TODO - limit may still be too high // For reference, the avg lifetime job lag on my Pi is 6. // Should we consider avg. dropped ff jobs? RateStat lagStat = getContext().statManager().getRate("jobQueue.jobLag"); RateStat queueStat = getContext().statManager().getRate("router.tunnelBacklog"); happy = happy && lagStat.getRate(60 * 60 * 1000L).getAvgOrLifetimeAvg() < 25; happy = happy && queueStat.getRate(60 * 60 * 1000L).getAvgOrLifetimeAvg() < 5; // Only if we're pretty well integrated... happy = happy && _facade.getKnownRouters() >= 400; happy = happy && getContext().commSystem().countActivePeers() >= 50; happy = happy && getContext().tunnelManager().getParticipatingCount() >= 25; happy = happy && Math.abs(getContext().clock().getOffset()) < 10 * 1000; // We need an address and no introducers if (happy) { RouterAddress ra = getContext().router().getRouterInfo().getTargetAddress("SSU"); if (ra == null) happy = false; else { if (ra.getOption("ihost0") != null) happy = false; } } double elG = 0; RateStat stat = getContext().statManager().getRate("crypto.elGamal.decrypt"); if (stat != null) { Rate rate = stat.getRate(60 * 60 * 1000L); if (rate != null) { elG = rate.getAvgOrLifetimeAvg(); happy = happy && elG <= 40.0d; } } if (_log.shouldLog(Log.DEBUG)) { final RouterContext rc = getContext(); final String log = String.format( "FF criteria breakdown: happy=%b, capabilities=%s, maxLag=%d, known=%d, " + "active=%d, participating=%d, offset=%d, ssuAddr=%s ElG=%f", happy, rc.router().getRouterInfo().getCapabilities(), rc.jobQueue().getMaxLag(), _facade.getKnownRouters(), rc.commSystem().countActivePeers(), rc.tunnelManager().getParticipatingCount(), Math.abs(rc.clock().getOffset()), rc.router().getRouterInfo().getTargetAddress("SSU").toString(), elG); _log.debug(log); } // Too few, and we're reachable, let's volunteer if (good < MIN_FF && happy) { if (!wasFF) { _lastChanged = now; _log.logAlways( Log.INFO, "Only " + good + " ff peers and we want " + MIN_FF + " so we are becoming floodfill"); } return true; } // Too many, or we aren't reachable, let's stop if (good > MAX_FF || (good > MIN_FF && !happy)) { if (wasFF) { _lastChanged = now; _log.logAlways( Log.INFO, "Have " + good + " ff peers and we need only " + MIN_FF + " to " + MAX_FF + " so we are disabling floodfill; reachable? " + happy); } return false; } if (_log.shouldLog(Log.INFO)) _log.info( "Have " + good + " ff peers, not changing, enabled? " + wasFF + "; reachable? " + happy); return wasFF; }
/** time since the message was created */ public long getLifetime() { return _context.clock().now() - _created; }