Пример #1
0
 /** Create a new search for the routingKey specified */
 public SearchJob(
     RouterContext context,
     KademliaNetworkDatabaseFacade facade,
     Hash key,
     Job onSuccess,
     Job onFailure,
     long timeoutMs,
     boolean keepStats,
     boolean isLease) {
   super(context);
   if ((key == null) || (key.getData() == null))
     throw new IllegalArgumentException("Search for null key?  wtf");
   _log = getContext().logManager().getLog(getClass());
   _facade = facade;
   _state = new SearchState(getContext(), key);
   _onSuccess = onSuccess;
   _onFailure = onFailure;
   _timeoutMs = timeoutMs;
   _keepStats = keepStats;
   _isLease = isLease;
   _deferredSearches = new ArrayList(0);
   _peerSelector = facade.getPeerSelector();
   _startedOn = -1;
   _expiration = getContext().clock().now() + timeoutMs;
   getContext().statManager().addRateData("netDb.searchCount", 1, 0);
   if (_log.shouldLog(Log.DEBUG))
     _log.debug(
         "Search (" + getClass().getName() + " for " + key.toBase64(),
         new Exception("Search enqueued by"));
 }
  /**
   * Send to a subset of all floodfill peers. We do this to implement Kademlia within the
   * floodfills, i.e. we flood to those closest to the key.
   */
  public void flood(DatabaseEntry ds) {
    Hash key = ds.getHash();
    Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
    FloodfillPeerSelector sel = (FloodfillPeerSelector) getPeerSelector();
    List<Hash> peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets());
    int flooded = 0;
    for (int i = 0; i < peers.size(); i++) {
      Hash peer = peers.get(i);
      RouterInfo target = lookupRouterInfoLocally(peer);
      if ((target == null) || (_context.banlist().isBanlisted(peer))) continue;
      // Don't flood a RI back to itself
      // Not necessary, a ff will do its own flooding (reply token == 0)
      // if (peer.equals(target.getIdentity().getHash()))
      //    continue;
      if (peer.equals(_context.routerHash())) continue;
      DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
      msg.setEntry(ds);
      OutNetMessage m =
          new OutNetMessage(
              _context, msg, _context.clock().now() + FLOOD_TIMEOUT, FLOOD_PRIORITY, target);
      // note send failure but don't give credit on success
      // might need to change this
      Job floodFail = new FloodFailedJob(_context, peer);
      m.setOnFailedSendJob(floodFail);
      _context.commSystem().processMessage(m);
      flooded++;
      if (_log.shouldLog(Log.INFO))
        _log.info("Flooding the entry for " + key.toBase64() + " to " + peer.toBase64());
    }

    if (_log.shouldLog(Log.INFO))
      _log.info("Flooded the data to " + flooded + " of " + peers.size() + " peers");
  }
Пример #3
0
    public void renderStatusHTML(Writer out) throws IOException {
        StringBuilder buf = new StringBuilder(1024);
        // move to the jsp
        //buf.append("<h2>Banned Peers</h2>");
        Map<Hash, Banlist.Entry> entries = new TreeMap<Hash, Banlist.Entry>(new HashComparator());
        
        entries.putAll(_context.banlist().getEntries());
        if (entries.isEmpty()) {
            buf.append("<i>").append(_("none")).append("</i>");
            out.write(buf.toString());
            return;
        }

        buf.append("<ul>");
        
        for (Map.Entry<Hash, Banlist.Entry> e : entries.entrySet()) {
            Hash key = e.getKey();
            Banlist.Entry entry = e.getValue();
            long expires = entry.expireOn-_context.clock().now();
            if (expires <= 0)
                continue;
            buf.append("<li>").append(_context.commSystem().renderPeerHTML(key));
            buf.append(' ');
            String expireString = DataHelper.formatDuration2(expires);
            if (key.equals(Hash.FAKE_HASH))
                buf.append(_("Permanently banned"));
            else if (expires < 5l*24*60*60*1000)
                buf.append(_("Temporary ban expiring in {0}", expireString));
            else
                buf.append(_("Banned until restart or in {0}", expireString));
            Set<String> transports = entry.transports;
            if ( (transports != null) && (!transports.isEmpty()) )
                buf.append(" on the following transport: ").append(transports);
            if (entry.cause != null) {
                buf.append("<br>\n");
                if (entry.causeCode != null)
                    buf.append(_(entry.cause, entry.causeCode));
                else
                    buf.append(_(entry.cause));
            }
            if (!key.equals(Hash.FAKE_HASH)) {
                buf.append(" (<a href=\"configpeer?peer=").append(key.toBase64())
                   .append("#unsh\">").append(_("unban now")).append("</a>)");
            }
            buf.append("</li>\n");
        }
        buf.append("</ul>\n");
        out.write(buf.toString());
        out.flush();
    }
Пример #4
0
 public void tunnelDispatched(
     long messageId, long tunnelId, long toTunnel, Hash toPeer, String type) {
   if (!_doLog) return;
   if (toPeer != null)
     addEntry(
         getPrefix()
             + "message "
             + messageId
             + " on tunnel "
             + tunnelId
             + " / "
             + toTunnel
             + " to "
             + toPeer.toBase64()
             + " as "
             + type);
   else
     addEntry(
         getPrefix()
             + "message "
             + messageId
             + " on tunnel "
             + tunnelId
             + " / "
             + toTunnel
             + " as "
             + type);
 }
Пример #5
0
  @Override
  protected void doGet(HttpServletRequest request, HttpServletResponse response)
      throws ServletException, IOException {
    if (!validPassphrase()) return;

    response.setContentType("text/html");
    PrintWriter out = null;
    try {
      out = response.getWriter();
      out.println("<html><head><title>Addresses</title></head>");
      out.println("<form method=POST>");
      out.println("<table>");
      for (Record record : RecordIndex.getInstance()) {
        out.println("<tr>");
        out.println(
            "<td><input name=\"checked\" type=\"checkbox\" value=\""
                + Long.toHexString(record.id)
                + "\" /></td>");
        out.println(" <td>" + record.getName() + "</td>");
        out.println(" <td>" + formatter.format(record.getModified()) + "</td>");
        out.println(" <td>" + record.getAddress().toBase64() + "</td>");
        out.println("</tr>");
      }
      out.println("</table>");

      out.println("<input name=\"delete\" type=\"submit\" value=\"Delete Selected\" />");
      out.println("</form>");
      out.println("<h3>Deleted addresses:</h3>");
      out.println("<form method=POST>");
      out.println("<table>");
      for (Hash hash : blacklist) {
        String shash = Base32.encode(hash.getData());
        out.println("<tr>");
        out.println(
            "  <td><input name=\"checked\" type=\"checkbox\" value=\"" + shash + "\" /></td>");
        out.println("  <td>" + shash + "</td>");
        out.println("</tr>");
      }
      out.println("</table>");

      out.println("<input type=\"submit\" name=\"submit\" value=\"Undelete Selected\" />");

      out.println("</form></body></html>");
    } finally {
      if (out != null) out.close();
    }
  }
Пример #6
0
 public void runJob() {
   if (_isFloodfill) _floodfillSearchesOutstanding--;
   if (_state.completed()) return;
   _state.replyTimeout(_peer);
   if (_penalizePeer) {
     if (_log.shouldLog(Log.INFO))
       _log.info(
           "Penalizing peer for timeout on search: "
               + _peer.toBase64()
               + " after "
               + (getContext().clock().now() - _sentOn));
     getContext().profileManager().dbLookupFailed(_peer);
   } else {
     if (_log.shouldLog(Log.ERROR))
       _log.error("NOT (!!) Penalizing peer for timeout on search: " + _peer.toBase64());
   }
   getContext().statManager().addRateData("netDb.failedPeers", 1, 0);
   searchNext();
 }
 @Override
 protected byte[] doWriteMessage() throws I2CPMessageException, IOException {
   if (_sessionId == null)
     throw new I2CPMessageException("Unable to write out the message as there is not enough data");
   ByteArrayOutputStream os = new ByteArrayOutputStream(256);
   try {
     _sessionId.writeBytes(os);
     DataHelper.writeLong(os, 1, _endpoints.size());
     for (int i = 0; i < _endpoints.size(); i++) {
       Hash router = getRouter(i);
       router.writeBytes(os);
       TunnelId tunnel = getTunnelId(i);
       tunnel.writeBytes(os);
     }
     DataHelper.writeDate(os, _end);
   } catch (DataFormatException dfe) {
     throw new I2CPMessageException("Error writing out the message data", dfe);
   }
   return os.toByteArray();
 }
 void sessionEstablished(SessionConfig config) {
   _destHashCache = config.getDestination().calculateHash();
   if (_log.shouldLog(Log.DEBUG))
     _log.debug("SessionEstablished called for destination " + _destHashCache.toBase64());
   _config = config;
   // We process a few options here, but most are handled by the tunnel manager.
   // The ones here can't be changed later.
   Properties opts = config.getOptions();
   if (opts != null) {
     _dontSendMSM =
         "none".equals(opts.getProperty(I2PClient.PROP_RELIABILITY, "").toLowerCase(Locale.US));
     _dontSendMSMOnReceive = Boolean.parseBoolean(opts.getProperty(I2PClient.PROP_FAST_RECEIVE));
   }
   // per-destination session key manager to prevent rather easy correlation
   if (_sessionKeyManager == null) {
     int tags = TransientSessionKeyManager.DEFAULT_TAGS;
     int thresh = TransientSessionKeyManager.LOW_THRESHOLD;
     if (opts != null) {
       String ptags = opts.getProperty(PROP_TAGS);
       if (ptags != null) {
         try {
           tags = Integer.parseInt(ptags);
         } catch (NumberFormatException nfe) {
         }
       }
       String pthresh = opts.getProperty(PROP_THRESH);
       if (pthresh != null) {
         try {
           thresh = Integer.parseInt(pthresh);
         } catch (NumberFormatException nfe) {
         }
       }
     }
     _sessionKeyManager = new TransientSessionKeyManager(_context, tags, thresh);
   } else {
     _log.error(
         "SessionEstablished called for twice for destination "
             + _destHashCache.toBase64().substring(0, 4));
   }
   _manager.destinationEstablished(this);
 }
Пример #9
0
 public void droppedInboundMessage(long messageId, Hash from, String info) {
   if (!_doLog) return;
   StringBuilder buf = new StringBuilder(512);
   buf.append(getPrefix());
   buf.append("dropped inbound message ").append(messageId);
   buf.append(" from ");
   if (from != null) buf.append(from.toBase64());
   else buf.append("unknown");
   buf.append(": ").append(info);
   addEntry(buf.toString());
   // if (_log.shouldLog(Log.ERROR))
   //    _log.error(buf.toString(), new Exception("source"));
 }
Пример #10
0
 /** connect to the given destination */
 I2PSocket connect(PeerID peer) throws IOException {
   I2PSocketManager mgr = _manager;
   if (mgr == null) throw new IOException("No socket manager");
   Destination addr = peer.getAddress();
   if (addr == null) throw new IOException("Null address");
   if (addr.equals(getMyDestination())) throw new IOException("Attempt to connect to myself");
   Hash dest = addr.calculateHash();
   if (_banlist.contains(dest))
     throw new IOException("Not trying to contact " + dest.toBase64() + ", as they are banlisted");
   try {
     // TODO opts.setPort(xxx); connect(addr, opts)
     // DHT moved above 6881 in 0.9.9
     I2PSocket rv = _manager.connect(addr);
     if (rv != null) _banlist.remove(dest);
     return rv;
   } catch (I2PException ie) {
     _banlist.add(dest);
     _context.simpleScheduler().addEvent(new Unbanlist(dest), 10 * 60 * 1000);
     IOException ioe = new IOException("Unable to reach the peer " + peer);
     ioe.initCause(ie);
     throw ioe;
   }
 }
 private Hash getHash(String name) {
   String key = name.substring(PREFIX.length());
   key = key.substring(0, 44);
   // Hash h = new Hash();
   try {
     // h.fromBase64(key);
     byte[] b = Base64.decode(key);
     if (b == null) return null;
     Hash h = Hash.create(b);
     return h;
   } catch (Exception dfe) {
     _log.warn("Invalid base64 [" + key + "]", dfe);
     return null;
   }
 }
Пример #12
0
 /** We received another message we weren't waiting for and don't know how to handle */
 public void droppedOtherMessage(I2NPMessage message, Hash from) {
   if (!_doLog) return;
   if (message == null) return;
   StringBuilder buf = new StringBuilder(512);
   buf.append(getPrefix());
   buf.append("dropped [")
       .append(message.getClass().getName())
       .append("] ")
       .append(message.getUniqueId());
   buf.append(" [").append(message.toString()).append("] from [");
   if (from != null) buf.append(from.toBase64());
   else buf.append("unknown");
   buf.append("] expiring in ")
       .append(message.getMessageExpiration() - _context.clock().now())
       .append("ms");
   addEntry(buf.toString());
 }
  /**
   * Begin a kademlia style search for the key specified, which can take up to timeoutMs and will
   * fire the appropriate jobs on success or timeout (or if the kademlia search completes without
   * any match)
   *
   * @return null always
   */
  @Override
  SearchJob search(
      Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
    // if (true) return super.search(key, onFindJob, onFailedLookupJob, timeoutMs, isLease);
    if (key == null) throw new IllegalArgumentException("searchin for nothin, eh?");
    boolean isNew = false;
    FloodSearchJob searchJob = null;
    synchronized (_activeFloodQueries) {
      searchJob = _activeFloodQueries.get(key);
      if (searchJob == null) {
        // if (SearchJob.onlyQueryFloodfillPeers(_context)) {
        // searchJob = new FloodOnlySearchJob(_context, this, key, onFindJob, onFailedLookupJob,
        // (int)timeoutMs, isLease);
        searchJob =
            new IterativeSearchJob(
                _context, this, key, onFindJob, onFailedLookupJob, (int) timeoutMs, isLease);
        // } else {
        //    searchJob = new FloodSearchJob(_context, this, key, onFindJob, onFailedLookupJob,
        // (int)timeoutMs, isLease);
        // }
        _activeFloodQueries.put(key, searchJob);
        isNew = true;
      }
    }

    if (isNew) {
      if (_log.shouldLog(Log.DEBUG))
        _log.debug("this is the first search for that key, fire off the FloodSearchJob");
      _context.jobQueue().addJob(searchJob);
    } else {
      if (_log.shouldLog(Log.INFO))
        _log.info(
            "Deferring flood search for "
                + key.toBase64()
                + " with "
                + _activeFloodQueries.size()
                + " in progress");
      searchJob.addDeferred(onFindJob, onFailedLookupJob, timeoutMs, isLease);
      // not necessarily LS
      _context
          .statManager()
          .addRateData(
              "netDb.lookupDeferred", 1, searchJob.getExpiration() - _context.clock().now());
    }
    return null;
  }
Пример #14
0
  @Override
  public String toString() {
    StringBuilder buf = new StringBuilder(64);
    if (_receiveTunnelId != null) {
      buf.append("recv on ");
      buf.append(DataHelper.fromLong(_receiveTunnelId, 0, 4));
      buf.append(" ");
    }

    if (_sendTo != null) {
      buf.append("send to ").append(_sendTo.toBase64().substring(0, 4)).append(":");
      if (_sendTunnelId != null) buf.append(DataHelper.fromLong(_sendTunnelId, 0, 4));
    }

    buf.append(" expiring on ").append(TunnelCreatorConfig.format(_expiration));
    buf.append(" having transferred ").append(_messagesProcessed).append("KB");
    return buf.toString();
  }
Пример #15
0
 /**
  * Add a message to be sent down the tunnel (immediately forwarding it to the {@link
  * InboundMessageDistributor} or {@link OutboundMessageDistributor}, as necessary).
  *
  * @param msg message to be sent through the tunnel
  * @param toRouter router to send to after the endpoint (or null for endpoint processing)
  * @param toTunnel tunnel to send to after the endpoint (or null for endpoint or router
  *     processing)
  */
 @Override
 public void add(I2NPMessage msg, Hash toRouter, TunnelId toTunnel) {
   if (_log.shouldLog(Log.DEBUG))
     _log.debug(
         "zero hop gateway: distribute "
             + (_config.isInbound() ? "inbound" : " outbound")
             + " to "
             + (toRouter != null ? toRouter.toBase64().substring(0, 4) : "")
             + "."
             + (toTunnel != null ? toTunnel.getTunnelId() + "" : "")
             + ": "
             + msg);
   if (_config.isInbound()) {
     _inDistributor.distribute(msg, toRouter, toTunnel);
   } else {
     _outDistributor.distribute(msg, toRouter, toTunnel);
   }
   _config.incrementProcessedMessages();
 }
Пример #16
0
 @Override
 protected void doReadMessage(InputStream in, int size) throws I2CPMessageException, IOException {
   try {
     _sessionId = new SessionId();
     _sessionId.readBytes(in);
     int numTunnels = (int) DataHelper.readLong(in, 1);
     _endpoints.clear();
     for (int i = 0; i < numTunnels; i++) {
       // Hash router = new Hash();
       // router.readBytes(in);
       Hash router = Hash.create(in);
       TunnelId tunnel = new TunnelId();
       tunnel.readBytes(in);
       _endpoints.add(new TunnelEndpoint(router, tunnel));
     }
     _end = DataHelper.readDate(in);
   } catch (DataFormatException dfe) {
     throw new I2CPMessageException("Unable to load the message data", dfe);
   }
 }
Пример #17
0
 /** Base64 Hash or Hash.i2p or name.i2p using naming service */
 Destination getDestination(String ip) {
   if (ip == null) return null;
   if (ip.endsWith(".i2p")) {
     if (ip.length() < 520) { // key + ".i2p"
       if (_manager != null && ip.length() == BASE32_HASH_LENGTH + 8 && ip.endsWith(".b32.i2p")) {
         // Use existing I2PSession for b32 lookups if we have it
         // This is much more efficient than using the naming service
         I2PSession sess = _manager.getSession();
         if (sess != null) {
           byte[] b = Base32.decode(ip.substring(0, BASE32_HASH_LENGTH));
           if (b != null) {
             // Hash h = new Hash(b);
             Hash h = Hash.create(b);
             if (_log.shouldLog(Log.INFO)) _log.info("Using existing session for lookup of " + ip);
             try {
               return sess.lookupDest(h, 15 * 1000);
             } catch (I2PSessionException ise) {
             }
           }
         }
       }
       if (_log.shouldLog(Log.INFO)) _log.info("Using naming service for lookup of " + ip);
       return _context.namingService().lookup(ip);
     }
     if (_log.shouldLog(Log.INFO)) _log.info("Creating Destination for " + ip);
     try {
       return new Destination(ip.substring(0, ip.length() - 4)); // sans .i2p
     } catch (DataFormatException dfe) {
       return null;
     }
   } else {
     if (_log.shouldLog(Log.INFO)) _log.info("Creating Destination for " + ip);
     try {
       return new Destination(ip);
     } catch (DataFormatException dfe) {
       return null;
     }
   }
 }
  /**
   * Search for a newer router info, drop it from the db if the search fails, unless just started up
   * or have bigger problems.
   */
  @Override
  protected void lookupBeforeDropping(Hash peer, RouterInfo info) {
    // following are some special situations, we don't want to
    // drop the peer in these cases
    // yikes don't do this - stack overflow //  getFloodfillPeers().size() == 0 ||
    // yikes2 don't do this either - deadlock! // getKnownRouters() < MIN_REMAINING_ROUTERS ||
    if (info.getNetworkId() == Router.NETWORK_ID
        && (getKBucketSetSize() < MIN_REMAINING_ROUTERS
            || _context.router().getUptime() < DONT_FAIL_PERIOD
            || _context.commSystem().countActivePeers() <= MIN_ACTIVE_PEERS)) {
      if (_log.shouldLog(Log.WARN))
        _log.warn(
            "Not failing " + peer.toBase64() + " as we are just starting up or have problems");
      return;
    }

    // should we skip the search?
    if (_floodfillEnabled
        || _context.jobQueue().getMaxLag() > 500
        || getKBucketSetSize() > MAX_DB_BEFORE_SKIPPING_SEARCH) {
      // don't try to overload ourselves (e.g. failing 3000 router refs at
      // once, and then firing off 3000 netDb lookup tasks)
      // Also don't queue a search if we have plenty of routerinfos
      // (KBucketSetSize() includes leasesets but avoids locking)
      super.lookupBeforeDropping(peer, info);
      return;
    }
    // this sends out the search to the floodfill peers even if we already have the
    // entry locally, firing no job if it gets a reply with an updated value (meaning
    // we shouldn't drop them but instead use the new data), or if they all time out,
    // firing the dropLookupFailedJob, which actually removes out local reference
    search(
        peer,
        new DropLookupFoundJob(_context, peer, info),
        new DropLookupFailedJob(_context, peer, info),
        10 * 1000,
        false);
  }
  public PeerProfile readProfile(File file) {
    Hash peer = getHash(file.getName());
    try {
      if (peer == null) {
        _log.error("The file " + file.getName() + " is not a valid hash");
        return null;
      }
      PeerProfile profile = new PeerProfile(_context, peer);
      Properties props = new Properties();

      loadProps(props, file);

      long lastSentToSuccessfully = getLong(props, "lastSentToSuccessfully");
      if (isExpired(lastSentToSuccessfully)) {
        if (_log.shouldLog(Log.INFO))
          _log.info(
              "Dropping old profile "
                  + file.getName()
                  + ", since we haven't heard from them in a long time");
        file.delete();
        return null;
      } else if (file.getName().endsWith(OLD_SUFFIX)) {
        // migrate to new file name, ignore failure
        String newName = file.getAbsolutePath();
        newName = newName.substring(0, newName.length() - OLD_SUFFIX.length()) + SUFFIX;
        boolean success = file.renameTo(new File(newName));
        if (!success)
          // new file exists and on Windows?
          file.delete();
      }

      profile.setCapacityBonus(getLong(props, "capacityBonus"));
      profile.setIntegrationBonus(getLong(props, "integrationBonus"));
      profile.setSpeedBonus(getLong(props, "speedBonus"));

      profile.setLastHeardAbout(getLong(props, "lastHeardAbout"));
      profile.setFirstHeardAbout(getLong(props, "firstHeardAbout"));
      profile.setLastSendSuccessful(getLong(props, "lastSentToSuccessfully"));
      profile.setLastSendFailed(getLong(props, "lastFailedSend"));
      profile.setLastHeardFrom(getLong(props, "lastHeardFrom"));
      profile.setTunnelTestTimeAverage(getDouble(props, "tunnelTestTimeAverage"));
      profile.setPeakThroughputKBps(getDouble(props, "tunnelPeakThroughput"));
      profile.setPeakTunnelThroughputKBps(getDouble(props, "tunnelPeakTunnelThroughput"));
      profile.setPeakTunnel1mThroughputKBps(getDouble(props, "tunnelPeakTunnel1mThroughput"));

      profile.getTunnelHistory().load(props);

      // In the interest of keeping the in-memory profiles small,
      // don't load the DB info at all unless there is something interesting there
      // (i.e. floodfills)
      // It seems like we do one or two lookups as a part of handshaking?
      // Not sure, to be researched.
      if (getLong(props, "dbHistory.successfulLookups") > 1
          || getLong(props, "dbHistory.failedlLokups") > 1) {
        profile.expandDBProfile();
        profile.getDBHistory().load(props);
        profile.getDbIntroduction().load(props, "dbIntroduction", true);
        profile.getDbResponseTime().load(props, "dbResponseTime", true);
      }

      // profile.getReceiveSize().load(props, "receiveSize", true);
      // profile.getSendSuccessSize().load(props, "sendSuccessSize", true);
      profile.getTunnelCreateResponseTime().load(props, "tunnelCreateResponseTime", true);
      profile.getTunnelTestResponseTime().load(props, "tunnelTestResponseTime", true);

      if (_log.shouldLog(Log.DEBUG))
        _log.debug("Loaded the profile for " + peer.toBase64() + " from " + file.getName());

      return profile;
    } catch (Exception e) {
      if (_log.shouldLog(Log.WARN))
        _log.warn("Error loading properties from " + file.getAbsolutePath(), e);
      file.delete();
      return null;
    }
  }
Пример #20
0
  private void runTest() {
    I2PAppContext ctx = I2PAppContext.getGlobalContext();
    Log log = ctx.logManager().getLog(getClass());

    List order = pickOrder(ctx);

    TunnelCreatorConfig cfg = createConfig(ctx);
    _replyRouter = new Hash();
    byte h[] = new byte[Hash.HASH_LENGTH];
    Arrays.fill(h, (byte) 0xFF);
    _replyRouter.setData(h);
    _replyTunnel = 42;

    // populate and encrypt the message
    BuildMessageGenerator gen = new BuildMessageGenerator();
    TunnelBuildMessage msg = new TunnelBuildMessage(ctx);
    for (int i = 0; i < BuildMessageGenerator.ORDER.length; i++) {
      int hop = ((Integer) order.get(i)).intValue();
      PublicKey key = null;
      if (hop < _pubKeys.length) key = _pubKeys[hop];
      gen.createRecord(i, hop, msg, cfg, _replyRouter, _replyTunnel, ctx, key);
    }
    gen.layeredEncrypt(ctx, msg, cfg, order);

    log.debug(
        "\n================================================================"
            + "\nMessage fully encrypted"
            + "\n================================================================");

    // now msg is fully encrypted, so lets go through the hops, decrypting and replying
    // as necessary

    BuildMessageProcessor proc = new BuildMessageProcessor(ctx);
    for (int i = 0; i < cfg.getLength(); i++) {
      // this not only decrypts the current hop's record, but encrypts the other records
      // with the reply key
      BuildRequestRecord req = proc.decrypt(ctx, msg, _peers[i], _privKeys[i]);
      if (req == null) {
        // no records matched the _peers[i], or the decryption failed
        throw new RuntimeException("foo @ " + i);
      }
      long ourId = req.readReceiveTunnelId();
      byte replyIV[] = req.readReplyIV();
      long nextId = req.readNextTunnelId();
      Hash nextPeer = req.readNextIdentity();
      boolean isInGW = req.readIsInboundGateway();
      boolean isOutEnd = req.readIsOutboundEndpoint();
      long time = req.readRequestTime();
      long now = (ctx.clock().now() / (60l * 60l * 1000l)) * (60 * 60 * 1000);
      int ourSlot = -1;

      BuildResponseRecord resp = new BuildResponseRecord();
      byte reply[] = resp.create(ctx, 0, req.readReplyKey(), req.readReplyIV(), -1);
      for (int j = 0; j < TunnelBuildMessage.RECORD_COUNT; j++) {
        if (msg.getRecord(j) == null) {
          ourSlot = j;
          msg.setRecord(j, new ByteArray(reply));
          break;
        }
      }

      log.debug(
          "Read slot "
              + ourSlot
              + " containing hop "
              + i
              + " @ "
              + _peers[i].toBase64()
              + " receives on "
              + ourId
              + " w/ replyIV "
              + Base64.encode(replyIV)
              + " sending to "
              + nextId
              + " on "
              + nextPeer.toBase64()
              + " inGW? "
              + isInGW
              + " outEnd? "
              + isOutEnd
              + " time difference "
              + (now - time));
    }

    log.debug(
        "\n================================================================"
            + "\nAll hops traversed and replies gathered"
            + "\n================================================================");

    // now all of the replies are populated, toss 'em into a reply message and handle it
    TunnelBuildReplyMessage reply = new TunnelBuildReplyMessage(ctx);
    for (int i = 0; i < TunnelBuildMessage.RECORD_COUNT; i++) reply.setRecord(i, msg.getRecord(i));

    BuildReplyHandler handler = new BuildReplyHandler();
    int statuses[] = handler.decrypt(ctx, reply, cfg, order);
    if (statuses == null) throw new RuntimeException("bar");
    boolean allAgree = true;
    for (int i = 0; i < cfg.getLength(); i++) {
      Hash peer = cfg.getPeer(i);
      int record = ((Integer) order.get(i)).intValue();
      if (statuses[record] != 0) allAgree = false;
      // else
      //    penalize peer according to the rejection cause
    }

    log.debug(
        "\n================================================================"
            + "\nAll peers agree? "
            + allAgree
            + "\n================================================================");
  }
  /** write out the data from the profile to the stream */
  public void writeProfile(PeerProfile profile, OutputStream out) throws IOException {
    String groups = null;
    if (_context.profileOrganizer().isFailing(profile.getPeer())) {
      groups = "Failing";
    } else if (!_context.profileOrganizer().isHighCapacity(profile.getPeer())) {
      groups = "Standard";
    } else {
      if (_context.profileOrganizer().isFast(profile.getPeer())) groups = "Fast, High Capacity";
      else groups = "High Capacity";

      if (_context.profileOrganizer().isWellIntegrated(profile.getPeer()))
        groups = groups + ", Integrated";
    }

    StringBuilder buf = new StringBuilder(512);
    buf.append("########################################################################")
        .append(NL);
    buf.append("# Profile for peer ").append(profile.getPeer().toBase64()).append(NL);
    if (_us != null) buf.append("# as calculated by ").append(_us.toBase64()).append(NL);
    buf.append("#").append(NL);
    buf.append("# Speed: ").append(profile.getSpeedValue()).append(NL);
    buf.append("# Capacity: ").append(profile.getCapacityValue()).append(NL);
    buf.append("# Integration: ").append(profile.getIntegrationValue()).append(NL);
    buf.append("# Groups: ").append(groups).append(NL);
    buf.append("#").append(NL);
    buf.append("########################################################################")
        .append(NL);
    buf.append("##").append(NL);
    add(buf, "speedBonus", profile.getSpeedBonus(), "Manual adjustment to the speed score");
    add(
        buf,
        "capacityBonus",
        profile.getCapacityBonus(),
        "Manual adjustment to the capacity score");
    add(
        buf,
        "integrationBonus",
        profile.getIntegrationBonus(),
        "Manual adjustment to the integration score");
    addDate(
        buf,
        "firstHeardAbout",
        profile.getFirstHeardAbout(),
        "When did we first get a reference to this peer?");
    addDate(
        buf,
        "lastHeardAbout",
        profile.getLastHeardAbout(),
        "When did we last get a reference to this peer?");
    addDate(
        buf,
        "lastHeardFrom",
        profile.getLastHeardFrom(),
        "When did we last get a message from the peer?");
    addDate(
        buf,
        "lastSentToSuccessfully",
        profile.getLastSendSuccessful(),
        "When did we last send the peer a message successfully?");
    addDate(
        buf,
        "lastFailedSend",
        profile.getLastSendFailed(),
        "When did we last fail to send a message to the peer?");
    add(
        buf,
        "tunnelTestTimeAverage",
        profile.getTunnelTestTimeAverage(),
        "Moving average as to how fast the peer replies");
    add(buf, "tunnelPeakThroughput", profile.getPeakThroughputKBps(), "KBytes/sec");
    add(buf, "tunnelPeakTunnelThroughput", profile.getPeakTunnelThroughputKBps(), "KBytes/sec");
    add(buf, "tunnelPeakTunnel1mThroughput", profile.getPeakTunnel1mThroughputKBps(), "KBytes/sec");
    buf.append(NL);

    out.write(buf.toString().getBytes());

    if (profile.getIsExpanded()) {
      // only write out expanded data if, uh, we've got it
      profile.getTunnelHistory().store(out);
      // profile.getReceiveSize().store(out, "receiveSize");
      // profile.getSendSuccessSize().store(out, "sendSuccessSize");
      profile.getTunnelCreateResponseTime().store(out, "tunnelCreateResponseTime");
      profile.getTunnelTestResponseTime().store(out, "tunnelTestResponseTime");
    }

    if (profile.getIsExpandedDB()) {
      profile.getDBHistory().store(out);
      profile.getDbIntroduction().store(out, "dbIntroduction");
      profile.getDbResponseTime().store(out, "dbResponseTime");
    }
  }
Пример #22
0
  public int compare(Hash l, Hash r) {
      return l.toBase64().compareTo(r.toBase64());
 }
Пример #23
0
 /** We shitlisted the peer */
 public void shitlist(Hash peer, String reason) {
   if (!_doLog) return;
   if (peer == null) return;
   addEntry("Shitlist " + peer.toBase64() + ": " + reason);
 }
Пример #24
0
 /** We unshitlisted the peer */
 public void unshitlist(Hash peer) {
   if (!_doLog) return;
   if (peer == null) return;
   addEntry("Unshitlist " + peer.toBase64());
 }
Пример #25
0
 /** Prettify the hash by doing a base64 and returning the first 6 characters */
 private static final String getName(Hash router) {
   if (router == null) return "unknown";
   String str = router.toBase64();
   if ((str == null) || (str.length() < 6)) return "invalid";
   return str.substring(0, 6);
 }