Ejemplo n.º 1
0
 public void runJob() {
   boolean wasFF = _facade.floodfillEnabled();
   boolean ff = shouldBeFloodfill();
   _facade.setFloodfillEnabled(ff);
   if (ff != wasFF) {
     if (ff) {
       getContext().router().eventLog().addEvent(EventLog.BECAME_FLOODFILL);
     } else {
       getContext().router().eventLog().addEvent(EventLog.NOT_FLOODFILL);
     }
     getContext().router().rebuildRouterInfo(true);
     Job routerInfoFlood = new FloodfillRouterInfoFloodJob(getContext(), _facade);
     if (getContext().router().getUptime() < 5 * 60 * 1000) {
       // Needed to prevent race if router.floodfillParticipant=true (not auto)
       routerInfoFlood.getTiming().setStartAfter(getContext().clock().now() + 5 * 60 * 1000);
       getContext().jobQueue().addJob(routerInfoFlood);
       if (_log.shouldLog(Log.DEBUG)) {
         _log.logAlways(
             Log.DEBUG, "Deferring our FloodfillRouterInfoFloodJob run because of low uptime.");
       }
     } else {
       routerInfoFlood.runJob();
       if (_log.shouldLog(Log.DEBUG)) {
         _log.logAlways(Log.DEBUG, "Running FloodfillRouterInfoFloodJob");
       }
     }
   }
   if (_log.shouldLog(Log.INFO)) _log.info("Should we be floodfill? " + ff);
   int delay = (REQUEUE_DELAY / 2) + getContext().random().nextInt(REQUEUE_DELAY);
   // there's a lot of eligible non-floodfills, keep them from all jumping in at once
   // TODO: somehow assess the size of the network to make this adaptive?
   if (!ff) delay *= 4; // this was 7, reduced for moar FFs --zab
   requeue(delay);
 }
Ejemplo n.º 2
0
 /**
  * Allow the choice as to whether failed searches should count against the peer (such as if we
  * search for a random key)
  */
 public FailedJob(RouterContext enclosingContext, RouterInfo peer, boolean penalizePeer) {
   super(enclosingContext);
   _penalizePeer = penalizePeer;
   _peer = peer.getIdentity().getHash();
   _sentOn = enclosingContext.clock().now();
   _isFloodfill = FloodfillNetworkDatabaseFacade.isFloodfill(peer);
 }
Ejemplo n.º 3
0
  /**
   * we're (probably) searching for a LeaseSet, so to be (overly) cautious, we're sending the
   * request out through a tunnel w/ reply back through another tunnel.
   */
  protected void sendLeaseSearch(RouterInfo router) {
    Hash to = router.getIdentity().getHash();
    TunnelInfo inTunnel = getContext().tunnelManager().selectInboundExploratoryTunnel(to);
    if (inTunnel == null) {
      _log.warn("No tunnels to get search replies through!  wtf!");
      getContext().jobQueue().addJob(new FailedJob(getContext(), router));
      return;
    }
    TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0);

    // this will fail if we've shitlisted our inbound gateway, but the gw may not necessarily
    // be shitlisted by whomever needs to contact them, so we don't need to check this

    // RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
    // if (inGateway == null) {
    //    _log.error("We can't find the gateway to our inbound tunnel?! wtf");
    //    getContext().jobQueue().addJob(new FailedJob(getContext(), router));
    //    return;
    // }

    int timeout = getPerPeerTimeoutMs(to);
    long expiration = getContext().clock().now() + timeout;

    DatabaseLookupMessage msg = buildMessage(inTunnelId, inTunnel.getPeer(0), expiration);

    TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to);
    if (outTunnel == null) {
      _log.warn("No tunnels to send search out through!  wtf!");
      getContext().jobQueue().addJob(new FailedJob(getContext(), router));
      return;
    }
    TunnelId outTunnelId = outTunnel.getSendTunnelId(0);

    if (_log.shouldLog(Log.DEBUG))
      _log.debug(
          getJobId()
              + ": Sending search to "
              + to
              + " for "
              + msg.getSearchKey().toBase64()
              + " w/ replies through ["
              + msg.getFrom().toBase64()
              + "] via tunnel ["
              + msg.getReplyTunnel()
              + "]");

    SearchMessageSelector sel =
        new SearchMessageSelector(getContext(), router, _expiration, _state);
    SearchUpdateReplyFoundJob reply =
        new SearchUpdateReplyFoundJob(
            getContext(), router, _state, _facade, this, outTunnel, inTunnel);

    if (FloodfillNetworkDatabaseFacade.isFloodfill(router)) _floodfillSearchesOutstanding++;
    getContext()
        .messageRegistry()
        .registerPending(sel, reply, new FailedJob(getContext(), router), timeout);
    getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnelId, to);
  }
Ejemplo n.º 4
0
 /**
  * this is now misnamed, as it is only used to determine whether to return floodfill peers only
  */
 static boolean onlyQueryFloodfillPeers(RouterContext ctx) {
   // if (isCongested(ctx))
   //    return true;
   // If we are floodfill, we want the FloodfillPeerSelector (in add()) to include
   // non-ff peers (if required) in DatabaseSearchReplyMessage responses
   // so that Exploration works.
   // ExploreJob is disabled if we are floodfill.
   // The other two places this was called (one below and one in FNDF)
   // have been commented out.
   // Returning false essentially enables kademlia as a backup to floodfill for search responses.
   if (FloodfillNetworkDatabaseFacade.floodfillEnabled(ctx)) return false;
   return Boolean.valueOf(ctx.getProperty("netDb.floodfillOnly", DEFAULT_FLOODFILL_ONLY + ""))
       .booleanValue();
 }
Ejemplo n.º 5
0
 /**
  * Send a series of searches to the next available peers as selected by the routing table, but
  * making sure no more than SEARCH_BREDTH are outstanding at any time
  */
 protected void continueSearch() {
   if (_state.completed()) {
     if (_log.shouldLog(Log.DEBUG))
       _log.debug(getJobId() + ": Search already completed", new Exception("already completed"));
     return;
   }
   int toCheck = getBredth() - _state.getPending().size();
   if (toCheck <= 0) {
     // too many already pending
     if (_log.shouldLog(Log.INFO))
       _log.info(
           getJobId()
               + ": Too many searches already pending (pending: "
               + _state.getPending().size()
               + " max: "
               + getBredth()
               + ")");
     requeuePending();
     return;
   }
   int sent = 0;
   Set attempted = _state.getAttempted();
   while (sent <= 0) {
     // boolean onlyFloodfill = onlyQueryFloodfillPeers(getContext());
     boolean onlyFloodfill = true;
     if (_floodfillPeersExhausted && onlyFloodfill && _state.getPending().isEmpty()) {
       if (_log.shouldLog(Log.WARN))
         _log.warn(
             getJobId()
                 + ": no non-floodfill peers left, and no more pending.  Searched: "
                 + _state.getAttempted().size()
                 + " failed: "
                 + _state.getFailed().size());
       fail();
       return;
     }
     List closestHashes = getClosestRouters(_state.getTarget(), toCheck, attempted);
     if ((closestHashes == null) || (closestHashes.isEmpty())) {
       if (_state.getPending().isEmpty()) {
         // we tried to find some peers, but there weren't any and no one else is going to answer
         if (_log.shouldLog(Log.INFO))
           _log.info(
               getJobId()
                   + ": No peers left, and none pending!  Already searched: "
                   + _state.getAttempted().size()
                   + " failed: "
                   + _state.getFailed().size());
         fail();
       } else {
         // no more to try, but we might get data or close peers from some outstanding requests
         if (_log.shouldLog(Log.INFO))
           _log.info(
               getJobId()
                   + ": No peers left, but some are pending!  Pending: "
                   + _state.getPending().size()
                   + " attempted: "
                   + _state.getAttempted().size()
                   + " failed: "
                   + _state.getFailed().size());
         requeuePending();
       }
       return;
     } else {
       attempted.addAll(closestHashes);
       for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) {
         Hash peer = (Hash) iter.next();
         DatabaseEntry ds = _facade.getDataStore().get(peer);
         if (ds == null) {
           if (_log.shouldLog(Log.INFO))
             _log.info(
                 "Next closest peer "
                     + peer
                     + " was only recently referred to us, sending a search for them");
           getContext().netDb().lookupRouterInfo(peer, null, null, _timeoutMs);
         } else if (!(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
           if (_log.shouldLog(Log.WARN))
             _log.warn(
                 getJobId()
                     + ": Error selecting closest hash that wasnt a router! "
                     + peer
                     + " : "
                     + ds.getClass().getName());
           _state.replyTimeout(peer);
         } else {
           RouterInfo ri = (RouterInfo) ds;
           if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
             _floodfillPeersExhausted = true;
             if (onlyFloodfill) continue;
           }
           if (ri.isHidden()) { // || // allow querying shitlisted, since its indirect
             // getContext().shitlist().isShitlisted(peer)) {
             // dont bother
           } else {
             _state.addPending(peer);
             sendSearch((RouterInfo) ds);
             sent++;
           }
         }
       }
       /*
       if (sent <= 0) {
           // the (potentially) last peers being searched for could not be,
           // er, searched for, so lets retry ASAP (causing either another
           // peer to be selected, or the whole search to fail)
           if (_log.shouldLog(Log.INFO))
               _log.info(getJobId() + ": No new peer queued up, so we are going to requeue " +
                         "ourselves in our search for " + _state.getTarget().toBase64());
           requeuePending(0);
       }
        */
     }
   }
 }
Ejemplo n.º 6
0
  private boolean shouldBeFloodfill() {
    if (!SigType.ECDSA_SHA256_P256.isAvailable()) return false;

    // Hidden trumps netDb.floodfillParticipant=true
    if (getContext().router().isHidden()) return false;

    String enabled = getContext().getProperty(PROP_FLOODFILL_PARTICIPANT, "auto");
    if ("true".equals(enabled)) return true;
    if ("false".equals(enabled)) return false;

    // auto from here down

    // Only if not shutting down...
    if (getContext().router().gracefulShutdownInProgress()) return false;

    // ARM ElG decrypt is too slow
    if (SystemVersion.isARM() || SystemVersion.isAndroid()) return false;

    if (getContext().getBooleanProperty(UDPTransport.PROP_LAPTOP_MODE)) return false;

    if (getContext().commSystem().isInBadCountry()) return false;
    String country = getContext().commSystem().getOurCountry();
    // anonymous proxy, satellite provider (not in bad country list)
    if ("a1".equals(country) || "a2".equals(country)) return false;

    // Only if up a while...
    if (getContext().router().getUptime() < MIN_UPTIME) return false;

    RouterInfo ri = getContext().router().getRouterInfo();
    if (ri == null) return false;
    char bw = ri.getBandwidthTier().charAt(0);
    // Only if class M, N, O, P, X
    if (bw != Router.CAPABILITY_BW64
        && bw != Router.CAPABILITY_BW128
        && bw != Router.CAPABILITY_BW256
        && bw != Router.CAPABILITY_BW512
        && bw != Router.CAPABILITY_BW_UNLIMITED) return false;

    // This list will not include ourselves...
    List<Hash> floodfillPeers = _facade.getFloodfillPeers();
    long now = getContext().clock().now();
    // We know none at all! Must be our turn...
    if (floodfillPeers == null || floodfillPeers.isEmpty()) {
      _lastChanged = now;
      return true;
    }

    // Only change status every so often
    boolean wasFF = _facade.floodfillEnabled();
    if (_lastChanged + MIN_CHANGE_DELAY > now) return wasFF;

    // This is similar to the qualification we do in FloodOnlySearchJob.runJob().
    // Count the "good" ff peers.
    //
    // Who's not good?
    // the unheard-from, unprofiled, failing, unreachable and banlisted ones.
    // We should hear from floodfills pretty frequently so set a 60m time limit.
    // If unprofiled we haven't talked to them in a long time.
    // We aren't contacting the peer directly, so banlist doesn't strictly matter,
    // but it's a bad sign, and we often banlist a peer before we fail it...
    //
    // Future: use Integration calculation
    //
    int ffcount = floodfillPeers.size();
    int failcount = 0;
    long before = now - 60 * 60 * 1000;
    for (Hash peer : floodfillPeers) {
      PeerProfile profile = getContext().profileOrganizer().getProfile(peer);
      if (profile == null
          || profile.getLastHeardFrom() < before
          || profile.getIsFailing()
          || getContext().banlist().isBanlisted(peer)
          || getContext().commSystem().wasUnreachable(peer)) failcount++;
    }

    if (wasFF) ffcount++;
    int good = ffcount - failcount;
    boolean happy = getContext().router().getRouterInfo().getCapabilities().indexOf('R') >= 0;
    // TODO - limit may still be too high
    // For reference, the avg lifetime job lag on my Pi is 6.
    // Should we consider avg. dropped ff jobs?
    RateStat lagStat = getContext().statManager().getRate("jobQueue.jobLag");
    RateStat queueStat = getContext().statManager().getRate("router.tunnelBacklog");
    happy = happy && lagStat.getRate(60 * 60 * 1000L).getAvgOrLifetimeAvg() < 25;
    happy = happy && queueStat.getRate(60 * 60 * 1000L).getAvgOrLifetimeAvg() < 5;
    // Only if we're pretty well integrated...
    happy = happy && _facade.getKnownRouters() >= 400;
    happy = happy && getContext().commSystem().countActivePeers() >= 50;
    happy = happy && getContext().tunnelManager().getParticipatingCount() >= 25;
    happy = happy && Math.abs(getContext().clock().getOffset()) < 10 * 1000;
    // We need an address and no introducers
    if (happy) {
      RouterAddress ra = getContext().router().getRouterInfo().getTargetAddress("SSU");
      if (ra == null) happy = false;
      else {
        if (ra.getOption("ihost0") != null) happy = false;
      }
    }

    double elG = 0;
    RateStat stat = getContext().statManager().getRate("crypto.elGamal.decrypt");
    if (stat != null) {
      Rate rate = stat.getRate(60 * 60 * 1000L);
      if (rate != null) {
        elG = rate.getAvgOrLifetimeAvg();
        happy = happy && elG <= 40.0d;
      }
    }

    if (_log.shouldLog(Log.DEBUG)) {
      final RouterContext rc = getContext();
      final String log =
          String.format(
              "FF criteria breakdown: happy=%b, capabilities=%s, maxLag=%d, known=%d, "
                  + "active=%d, participating=%d, offset=%d, ssuAddr=%s ElG=%f",
              happy,
              rc.router().getRouterInfo().getCapabilities(),
              rc.jobQueue().getMaxLag(),
              _facade.getKnownRouters(),
              rc.commSystem().countActivePeers(),
              rc.tunnelManager().getParticipatingCount(),
              Math.abs(rc.clock().getOffset()),
              rc.router().getRouterInfo().getTargetAddress("SSU").toString(),
              elG);
      _log.debug(log);
    }

    // Too few, and we're reachable, let's volunteer
    if (good < MIN_FF && happy) {
      if (!wasFF) {
        _lastChanged = now;
        _log.logAlways(
            Log.INFO,
            "Only " + good + " ff peers and we want " + MIN_FF + " so we are becoming floodfill");
      }
      return true;
    }

    // Too many, or we aren't reachable, let's stop
    if (good > MAX_FF || (good > MIN_FF && !happy)) {
      if (wasFF) {
        _lastChanged = now;
        _log.logAlways(
            Log.INFO,
            "Have "
                + good
                + " ff peers and we need only "
                + MIN_FF
                + " to "
                + MAX_FF
                + " so we are disabling floodfill; reachable? "
                + happy);
      }
      return false;
    }

    if (_log.shouldLog(Log.INFO))
      _log.info(
          "Have " + good + " ff peers, not changing, enabled? " + wasFF + "; reachable? " + happy);
    return wasFF;
  }