Example #1
1
 /**
  * Wait for the username/password message and verify or throw SOCKSException on failure
  *
  * @since 0.8.2
  */
 private void verifyPassword(DataInputStream in, DataOutputStream out)
     throws IOException, SOCKSException {
   int c = in.readUnsignedByte();
   if (c != AUTH_VERSION) throw new SOCKSException("Unsupported authentication version");
   c = in.readUnsignedByte();
   if (c <= 0) throw new SOCKSException("Bad authentication");
   byte[] user = new byte[c];
   in.readFully(user);
   c = in.readUnsignedByte();
   if (c <= 0) throw new SOCKSException("Bad authentication");
   byte[] pw = new byte[c];
   in.readFully(pw);
   // Hopefully these are in UTF-8, since that's what our config file is in
   // these throw UnsupportedEncodingException which is an IOE
   String u = new String(user, "UTF-8");
   String p = new String(pw, "UTF-8");
   String configUser = props.getProperty(I2PTunnelHTTPClientBase.PROP_USER);
   String configPW = props.getProperty(I2PTunnelHTTPClientBase.PROP_PW);
   if ((!u.equals(configUser)) || (!p.equals(configPW))) {
     _log.error("SOCKS authorization failure");
     sendAuthReply(AUTH_FAILURE, out);
     throw new SOCKSException("SOCKS authorization failure");
   }
   if (_log.shouldLog(Log.INFO)) _log.info("SOCKS authorization success, user: " + u);
   sendAuthReply(AUTH_SUCCESS, out);
 }
 private void loadProps(Properties props, File file) throws IOException {
   InputStream fin = null;
   try {
     fin = new BufferedInputStream(new FileInputStream(file), 1);
     fin.mark(1);
     int c = fin.read();
     fin.reset();
     if (c == '#') {
       // uncompressed
       if (_log.shouldLog(Log.INFO))
         _log.info("Loading uncompressed profile data from " + file.getName());
       DataHelper.loadProps(props, fin);
     } else {
       // compressed (or corrupt...)
       if (_log.shouldLog(Log.INFO))
         _log.info("Loading compressed profile data from " + file.getName());
       DataHelper.loadProps(props, new GZIPInputStream(fin));
     }
   } finally {
     try {
       if (fin != null) fin.close();
     } catch (IOException e) {
     }
   }
 }
 /** Die a horrible death. Cannot be restarted. */
 public synchronized void stopRunning() {
   if (_dead) return;
   if (_context.router().isAlive() && _log.shouldLog(Log.WARN))
     _log.warn(
         "Stop the I2CP connection!  current leaseSet: " + _currentLeaseSet,
         new Exception("Stop client connection"));
   _dead = true;
   // we need these keys to unpublish the leaseSet
   if (_reader != null) _reader.stopReading();
   if (_writer != null) _writer.stopWriting();
   if (_socket != null)
     try {
       _socket.close();
     } catch (IOException ioe) {
     }
   _messages.clear();
   _acceptedPending.clear();
   if (_sessionKeyManager != null) _sessionKeyManager.shutdown();
   _manager.unregisterConnection(this);
   if (_currentLeaseSet != null) _context.netDb().unpublish(_currentLeaseSet);
   _leaseRequest = null;
   synchronized (_alreadyProcessed) {
     _alreadyProcessed.clear();
   }
   // _config = null;
   // _manager = null;
 }
Example #4
0
  private void connectWithPeers() {
    if (_peerDestFiles != null) {
      for (int i = 0; i < _peerDestFiles.length; i++) {
        try {
          FileInputStream fin = new FileInputStream(_peerDestFiles[i]);
          byte dest[] = new byte[1024];
          int read = DataHelper.read(fin, dest);

          String remDest = new String(dest, 0, read);
          int con = 0;
          Flooder flooder = null;
          synchronized (_remotePeers) {
            con = _remotePeers.size() + 1;
            flooder = new Flooder(con, remDest);
            _remotePeers.put(new Integer(con), flooder);
          }

          byte msg[] =
              (DataHelper.getUTF8("STREAM CONNECT ID=" + con + " DESTINATION=" + remDest + "\n"));
          synchronized (_samOut) {
            _samOut.write(msg);
            _samOut.flush();
          }
          I2PThread flood = new I2PThread(flooder, "Flood " + con);
          flood.start();
          _log.debug("Starting flooder with peer from " + _peerDestFiles[i] + ": " + con);
        } catch (IOException ioe) {
          _log.error("Unable to read the peer from " + _peerDestFiles[i]);
        }
      }
    }
  }
  /**
   * Destroy the socket manager, freeing all the associated resources. This method will block until
   * all the managed sockets are closed.
   *
   * <p>CANNOT be restarted.
   */
  public void destroySocketManager() {
    if (!_isDestroyed.compareAndSet(false, true)) {
      // shouldn't happen, log a stack trace to find out why it happened
      _log.logCloseLoop("I2PSocketManager", getName());
      return;
    }
    _connectionManager.setAllowIncomingConnections(false);
    _connectionManager.shutdown();
    if (!_subsessions.isEmpty()) {
      for (I2PSession sess : _subsessions) {
        removeSubsession(sess);
      }
    }

    // should we destroy the _session too?
    // yes, since the old lib did (and SAM wants it to, and i dont know why not)
    if ((_session != null) && (!_session.isClosed())) {
      try {
        _session.destroySession();
      } catch (I2PSessionException ise) {
        _log.warn("Unable to destroy the session", ise);
      }
      PcapWriter pcap = null;
      synchronized (_pcapInitLock) {
        pcap = pcapWriter;
      }
      if (pcap != null) pcap.flush();
    }
  }
Example #6
0
 /**
  * Deferred deletion of plugins that we failed to delete before.
  *
  * @since 0.9.13
  */
 private static void deferredDeletePlugins(RouterContext ctx) {
   Log log = ctx.logManager().getLog(PluginStarter.class);
   boolean changed = false;
   Properties props = pluginProperties();
   for (Iterator<Map.Entry<Object, Object>> iter = props.entrySet().iterator(); iter.hasNext(); ) {
     Map.Entry<Object, Object> e = iter.next();
     String name = (String) e.getKey();
     if (name.startsWith(PREFIX) && name.endsWith(ENABLED)) {
       // deferred deletion of a plugin
       if (e.getValue().equals(DELETED)) {
         String app = name.substring(PREFIX.length(), name.lastIndexOf(ENABLED));
         // shouldn't happen, this is run early
         if (isPluginRunning(app, ctx)) continue;
         File pluginDir = new File(ctx.getConfigDir(), PLUGIN_DIR + '/' + app);
         boolean deleted = FileUtil.rmdir(pluginDir, false);
         if (deleted) {
           log.logAlways(Log.WARN, "Deferred deletion of " + pluginDir + " successful");
           iter.remove();
           changed = true;
         } else {
           if (log.shouldLog(Log.WARN)) log.warn("Deferred deletion of " + pluginDir + " failed");
         }
       }
     }
   }
   if (changed) storePluginProperties(props);
 }
Example #7
0
  public long receiveEncrypted(byte encrypted[]) {
    TunnelDataMessage msg = new TunnelDataMessage(_context);
    msg.setData(encrypted);
    msg.setTunnelId(_config.getConfig(0).getSendTunnel());

    if (_log.shouldLog(Log.DEBUG))
      _log.debug("received encrypted, sending out " + _config + ": " + msg);
    RouterInfo ri = _nextHopCache;
    if (ri == null) ri = _context.netDb().lookupRouterInfoLocally(_config.getPeer(1));
    if (ri != null) {
      _nextHopCache = ri;
      send(msg, ri);
      return msg.getUniqueId();
    } else {
      // It should be rare to forget the router info for a peer in our own tunnel.
      if (_log.shouldLog(Log.WARN))
        _log.warn("lookup of " + _config.getPeer(1) + " required for " + msg);
      _context
          .netDb()
          .lookupRouterInfo(
              _config.getPeer(1),
              new SendJob(_context, msg),
              new FailedJob(_context),
              MAX_LOOKUP_TIME);
      return -1;
    }
  }
Example #8
0
  /** Search was totally successful */
  private void succeed() {
    if (_log.shouldLog(Log.INFO))
      _log.info(
          getJobId()
              + ": Succeeded search for key "
              + _state.getTarget()
              + " after querying "
              + _state.getAttempted().size());
    if (_log.shouldLog(Log.DEBUG))
      _log.debug(getJobId() + ": State of successful search: " + _state);

    if (_keepStats) {
      long time = getContext().clock().now() - _state.getWhenStarted();
      getContext().statManager().addRateData("netDb.successTime", time, 0);
      getContext()
          .statManager()
          .addRateData("netDb.successPeers", _state.getAttempted().size(), time);
    }
    if (_onSuccess != null) getContext().jobQueue().addJob(_onSuccess);

    _facade.searchComplete(_state.getTarget());

    handleDeferred(true);

    resend();
  }
Example #9
0
 /** Create a new search for the routingKey specified */
 public SearchJob(
     RouterContext context,
     KademliaNetworkDatabaseFacade facade,
     Hash key,
     Job onSuccess,
     Job onFailure,
     long timeoutMs,
     boolean keepStats,
     boolean isLease) {
   super(context);
   if ((key == null) || (key.getData() == null))
     throw new IllegalArgumentException("Search for null key?  wtf");
   _log = getContext().logManager().getLog(getClass());
   _facade = facade;
   _state = new SearchState(getContext(), key);
   _onSuccess = onSuccess;
   _onFailure = onFailure;
   _timeoutMs = timeoutMs;
   _keepStats = keepStats;
   _isLease = isLease;
   _deferredSearches = new ArrayList(0);
   _peerSelector = facade.getPeerSelector();
   _startedOn = -1;
   _expiration = getContext().clock().now() + timeoutMs;
   getContext().statManager().addRateData("netDb.searchCount", 1, 0);
   if (_log.shouldLog(Log.DEBUG))
     _log.debug(
         "Search (" + getClass().getName() + " for " + key.toBase64(),
         new Exception("Search enqueued by"));
 }
Example #10
0
 /**
  * Send the specified reply during SOCKS5 authorization
  *
  * @since 0.8.2
  */
 private void sendAuthReply(int replyCode, DataOutputStream out) throws IOException {
   byte[] reply = new byte[2];
   reply[0] = AUTH_VERSION;
   reply[1] = (byte) replyCode;
   if (_log.shouldLog(Log.DEBUG)) _log.debug("Sending auth reply:\n" + HexDump.dump(reply));
   out.write(reply);
 }
Example #11
0
 /**
  * Set of Hash structures for routers we want to check next. This is the 'interesting' part of the
  * algorithm. But to keep you on your toes, we've refactored it to the
  * PeerSelector.selectNearestExplicit
  *
  * @return ordered list of Hash objects
  */
 private List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) {
   Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
   if (_log.shouldLog(Log.DEBUG))
     _log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey);
   return _peerSelector.selectNearestExplicit(
       rkey, numClosest, alreadyChecked, _facade.getKBuckets());
 }
  /**
   * There is no more data coming from the I2P side. Does NOT clear pending data. messageReceived()
   * MUST have been called previously with the messageId of the CLOSE packet.
   */
  public void closeReceived() {
    synchronized (_dataLock) {
      if (_log.shouldLog(Log.DEBUG)) {
        StringBuilder buf = new StringBuilder(128);
        buf.append("Close received, ready bytes: ");
        long available = 0;
        for (int i = 0; i < _readyDataBlocks.size(); i++)
          available += _readyDataBlocks.get(i).getValid();
        available -= _readyDataBlockIndex;
        buf.append(available);
        buf.append(" blocks: ").append(_readyDataBlocks.size());

        buf.append(" not ready blocks: ");
        long notAvailable = 0;
        for (Long id : _notYetReadyBlocks.keySet()) {
          ByteArray ba = _notYetReadyBlocks.get(id);
          buf.append(id).append(" ");

          if (ba != null) notAvailable += ba.getValid();
        }

        buf.append("not ready bytes: ").append(notAvailable);
        buf.append(" highest ready block: ").append(_highestReadyBlockId);

        _log.debug(buf.toString(), new Exception("closed"));
      }
      _closeReceived = true;
      _dataLock.notifyAll();
    }
  }
Example #13
0
 /**
  * Can't find a published standard for this anywhere. See the libtorrent code. Here we use the
  * "added" key as a single string of concatenated 32-byte peer hashes. added.f and dropped
  * unsupported
  *
  * @since 0.8.4
  */
 private static void handlePEX(Peer peer, PeerListener listener, byte[] bs, Log log) {
   if (log.shouldLog(Log.DEBUG)) log.debug("Got PEX msg from " + peer);
   try {
     InputStream is = new ByteArrayInputStream(bs);
     BDecoder dec = new BDecoder(is);
     BEValue bev = dec.bdecodeMap();
     Map<String, BEValue> map = bev.getMap();
     bev = map.get("added");
     if (bev == null) return;
     byte[] ids = bev.getBytes();
     if (ids.length < HASH_LENGTH) return;
     int len = Math.min(ids.length, (I2PSnarkUtil.MAX_CONNECTIONS - 1) * HASH_LENGTH);
     List<PeerID> peers = new ArrayList<PeerID>(len / HASH_LENGTH);
     for (int off = 0; off < len; off += HASH_LENGTH) {
       byte[] hash = new byte[HASH_LENGTH];
       System.arraycopy(ids, off, hash, 0, HASH_LENGTH);
       if (DataHelper.eq(hash, peer.getPeerID().getDestHash())) continue;
       PeerID pID = new PeerID(hash, listener.getUtil());
       peers.add(pID);
     }
     // could include ourselves, listener must remove
     listener.gotPeers(peer, peers);
   } catch (Exception e) {
     if (log.shouldLog(Log.INFO)) log.info("PEX msg exception from " + peer, e);
     // peer.disconnect(false);
   }
 }
Example #14
0
  protected boolean execStreamClose(Properties props) {
    if (props == null) {
      _log.debug("No parameters specified in STREAM CLOSE message");
      return false;
    }

    int id;
    {
      String strid = props.getProperty("ID");
      if (strid == null) {
        _log.debug("ID not specified in STREAM CLOSE message");
        return false;
      }
      try {
        id = Integer.parseInt(strid);
      } catch (NumberFormatException e) {
        _log.debug("Invalid STREAM CLOSE ID specified: " + strid);
        return false;
      }
    }

    boolean closed = getStreamSession().closeConnection(id);
    if ((!closed) && (_log.shouldLog(Log.WARN)))
      _log.warn("Stream unable to be closed, but this is non fatal");
    return true;
  }
  /**
   * Blocking call (run in the establisher thread) to determine if the session was created properly.
   * If it wasn't, all the SessionCreated remnants are dropped (perhaps they were spoofed, etc) so
   * that we can receive another one
   *
   * <p>Generates session key and mac key.
   *
   * @return true if valid
   */
  public synchronized boolean validateSessionCreated() {
    if (_currentState == OutboundState.OB_STATE_VALIDATION_FAILED) {
      if (_log.shouldLog(Log.WARN)) _log.warn("Session created already failed");
      return false;
    }
    if (_receivedSignature != null) {
      if (_log.shouldLog(Log.WARN)) _log.warn("Session created already validated");
      return true;
    }

    boolean valid = true;
    try {
      generateSessionKey();
    } catch (DHSessionKeyBuilder.InvalidPublicParameterException ippe) {
      if (_log.shouldLog(Log.WARN))
        _log.warn("Peer " + getRemoteHostId() + " sent us an invalid DH parameter", ippe);
      valid = false;
    }
    if (valid) decryptSignature();

    if (valid && verifySessionCreated()) {
      if (_log.shouldLog(Log.DEBUG)) _log.debug("Session created passed validation");
      return true;
    } else {
      if (_log.shouldLog(Log.WARN))
        _log.warn(
            "Session created failed validation, clearing state for " + _remoteHostId.toString());
      fail();
      return false;
    }
  }
 /**
  * @param claimedAddress an IP/port based RemoteHostId, or null if unknown
  * @param remoteHostId non-null, == claimedAddress if direct, or a hash-based one if indirect
  * @param addr non-null
  */
 public OutboundEstablishState(
     RouterContext ctx,
     RemoteHostId claimedAddress,
     RemoteHostId remoteHostId,
     RouterIdentity remotePeer,
     SessionKey introKey,
     UDPAddress addr,
     DHSessionKeyBuilder.Factory dh) {
   _context = ctx;
   _log = ctx.logManager().getLog(OutboundEstablishState.class);
   if (claimedAddress != null) {
     _bobIP = claimedAddress.getIP();
     _bobPort = claimedAddress.getPort();
   } else {
     // _bobIP = null;
     _bobPort = -1;
   }
   _claimedAddress = claimedAddress;
   _remoteHostId = remoteHostId;
   _remotePeer = remotePeer;
   _introKey = introKey;
   _queuedMessages = new LinkedBlockingQueue<OutNetMessage>();
   _establishBegin = ctx.clock().now();
   _remoteAddress = addr;
   _introductionNonce = -1;
   _keyFactory = dh;
   if (addr.getIntroducerCount() > 0) {
     if (_log.shouldLog(Log.DEBUG))
       _log.debug(
           "new outbound establish to " + remotePeer.calculateHash() + ", with address: " + addr);
     _currentState = OutboundState.OB_STATE_PENDING_INTRO;
   } else {
     _currentState = OutboundState.OB_STATE_UNKNOWN;
   }
 }
Example #17
0
    /**
     * Call when the ports might have changed
     * The transports can call this pretty quickly at startup,
     * which can have multiple UPnP threads running at once, but
     * that should be ok.
     */
    public void update(Set<TransportManager.Port> ports) {
        if (_log.shouldLog(Log.DEBUG))
            _log.debug("UPnP Update with " + ports.size() + " ports");

        //synchronized(this) {
            // TODO
            // called too often and may block for too long
            // may not have started if net was disconnected previously
            //if (!_isRunning && !ports.isEmpty())
            //    start();
            if (!_isRunning)
                return;
        //}

        Set<ForwardPort> forwards = new HashSet<ForwardPort>(ports.size());
        for (TransportManager.Port entry : ports) {
            String style = entry.style;
            int port = entry.port;
            int protocol = -1;
            if ("SSU".equals(style))
                protocol = ForwardPort.PROTOCOL_UDP_IPV4;
            else if ("NTCP".equals(style))
                protocol = ForwardPort.PROTOCOL_TCP_IPV4;
            else
                continue;
            if (_log.shouldLog(Log.DEBUG))
                _log.debug("Adding: " + style + " " + port);
            ForwardPort fp = new ForwardPort(style, false, protocol, port);
            forwards.add(fp);
        }
        // non-blocking
        _upnp.onChangePublicPorts(forwards, _upnpCallback);
    }
Example #18
0
 public int allocPage() throws IOException {
   if (freeListStart != 0) {
     try {
       if (flb == null) flb = new FreeListBlock(file, freeListStart);
       if (!flb.isEmpty()) {
         if (log.shouldLog(Log.DEBUG)) log.debug("Alloc from " + flb);
         return flb.takePage();
       } else {
         if (log.shouldLog(Log.DEBUG)) log.debug("Alloc returning empty " + flb);
         freeListStart = flb.getNextPage();
         writeSuperBlock();
         int rv = flb.page;
         flb = null;
         return rv;
       }
     } catch (IOException ioe) {
       log.error("Discarding corrupt free list block page " + freeListStart, ioe);
       freeListStart = 0;
     }
   }
   long offset = file.length();
   fileLen = offset + PAGESIZE;
   file.setLength(fileLen);
   writeSuperBlock();
   return (int) ((offset / PAGESIZE) + 1);
 }
Example #19
0
  /**
   * SOCKS5 connection initialization. This method assumes that SOCKS "VER" field has been stripped
   * from the input stream.
   */
  private void init(DataInputStream in, DataOutputStream out) throws IOException, SOCKSException {
    int nMethods = in.readUnsignedByte();
    int method = Method.NO_ACCEPTABLE_METHODS;

    for (int i = 0; i < nMethods; ++i) {
      int meth = in.readUnsignedByte();
      if (((!authRequired) && meth == Method.NO_AUTH_REQUIRED)
          || (authRequired && meth == Method.USERNAME_PASSWORD)) {
        // That's fine, we do support this method
        method = meth;
      }
    }

    switch (method) {
      case Method.USERNAME_PASSWORD:
        _log.debug("username/password authentication required");
        sendInitReply(Method.USERNAME_PASSWORD, out);
        verifyPassword(in, out);
        return;
      case Method.NO_AUTH_REQUIRED:
        _log.debug("no authentication required");
        sendInitReply(Method.NO_AUTH_REQUIRED, out);
        return;
      default:
        _log.debug(
            "no suitable authentication methods found (" + Integer.toHexString(method) + ")");
        sendInitReply(Method.NO_ACCEPTABLE_METHODS, out);
        throw new SOCKSException("Unsupported authentication method");
    }
  }
  /**
   * Distribute the message. If the dest is local, it blocks until its passed to the target
   * ClientConnectionRunner (which then fires it into a MessageReceivedJob). If the dest is remote,
   * it blocks until it is added into the ClientMessagePool
   */
  MessageId distributeMessage(SendMessageMessage message) {
    Payload payload = message.getPayload();
    Destination dest = message.getDestination();
    MessageId id = new MessageId();
    id.setMessageId(getNextMessageId());
    long expiration = 0;
    int flags = 0;
    if (message.getType() == SendMessageExpiresMessage.MESSAGE_TYPE) {
      SendMessageExpiresMessage msg = (SendMessageExpiresMessage) message;
      expiration = msg.getExpirationTime();
      flags = msg.getFlags();
    }
    if (message.getNonce() != 0 && !_dontSendMSM) _acceptedPending.add(id);

    if (_log.shouldLog(Log.DEBUG))
      _log.debug(
          "** Receiving message "
              + id.getMessageId()
              + " with payload of size "
              + payload.getSize()
              + " for session "
              + _sessionId.getSessionId());
    // long beforeDistribute = _context.clock().now();
    // the following blocks as described above
    SessionConfig cfg = _config;
    if (cfg != null)
      _manager.distributeMessage(cfg.getDestination(), dest, payload, id, expiration, flags);
    // else log error?
    // long timeToDistribute = _context.clock().now() - beforeDistribute;
    // if (_log.shouldLog(Log.DEBUG))
    //    _log.warn("Time to distribute in the manager to "
    //              + dest.calculateHash().toBase64() + ": "
    //              + timeToDistribute);
    return id;
  }
Example #21
0
  /** Send a search to the given peer */
  protected void sendSearch(RouterInfo router) {
    if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) {
      // don't search ourselves
      if (_log.shouldLog(Log.ERROR))
        _log.error(getJobId() + ": Dont send search to ourselves - why did we try?");
      return;
    } else {
      if (_log.shouldLog(Log.INFO))
        _log.info(
            getJobId()
                + ": Send search to "
                + router.getIdentity().getHash().toBase64()
                + " for "
                + _state.getTarget().toBase64()
                + " w/ timeout "
                + getPerPeerTimeoutMs(router.getIdentity().calculateHash()));
    }

    getContext().statManager().addRateData("netDb.searchMessageCount", 1, 0);

    // if (_isLease || true) // always send searches out tunnels
    sendLeaseSearch(router);
    // else
    //    sendRouterSearch(router);
  }
Example #22
0
  /**
   * @return lowest offset of any request for the piece
   * @since 0.8.2
   */
  private synchronized Request getLowestOutstandingRequest(int piece) {
    Request rv = null;
    int lowest = Integer.MAX_VALUE;
    for (Request r : outstandingRequests) {
      if (r.getPiece() == piece && r.off < lowest) {
        lowest = r.off;
        rv = r;
      }
    }
    if (pendingRequest != null && pendingRequest.getPiece() == piece && pendingRequest.off < lowest)
      rv = pendingRequest;

    if (_log.shouldLog(Log.DEBUG))
      _log.debug(
          peer
              + " lowest for "
              + piece
              + " is "
              + rv
              + " out of "
              + pendingRequest
              + " and "
              + outstandingRequests);
    return rv;
  }
Example #23
0
  public int addDeferred(Job onFind, Job onFail, long expiration, boolean isLease) {
    Search search = new Search(onFind, onFail, expiration, isLease);
    boolean ok = true;
    int deferred = 0;
    synchronized (_deferredSearches) {
      if (_deferredCleared) ok = false;
      else _deferredSearches.add(search);
      deferred = _deferredSearches.size();
    }

    if (!ok) {
      // race between adding deferred and search completing
      if (_log.shouldLog(Log.WARN))
        _log.warn(
            "Race deferred before searchCompleting?  our onFind="
                + _onSuccess
                + " new one: "
                + onFind);

      // the following /shouldn't/ be necessary, but it doesnt hurt
      _facade.searchComplete(_state.getTarget());
      _facade.search(
          _state.getTarget(), onFind, onFail, expiration - getContext().clock().now(), isLease);
      return 0;
    } else {
      return deferred;
    }
  }
Example #24
0
 synchronized void setChoking(boolean choke) {
   if (choking != choke) {
     if (_log.shouldLog(Log.DEBUG)) _log.debug(peer + " setChoking(" + choke + ")");
     choking = choke;
     out.sendChoke(choke);
   }
 }
Example #25
0
 private void receivePing(Packet packet) {
   boolean ok = packet.verifySignature(_context, packet.getOptionalFrom(), null);
   if (!ok) {
     if (_log.shouldLog(Log.WARN)) {
       if (packet.getOptionalFrom() == null)
         _log.warn(
             "Ping with no from (flagged? " + packet.isFlagSet(Packet.FLAG_FROM_INCLUDED) + ")");
       else if (packet.getOptionalSignature() == null)
         _log.warn(
             "Ping with no signature (flagged? "
                 + packet.isFlagSet(Packet.FLAG_SIGNATURE_INCLUDED)
                 + ")");
       else
         _log.warn(
             "Forged ping, discard (from="
                 + packet.getOptionalFrom().calculateHash().toBase64()
                 + " sig="
                 + packet.getOptionalSignature().toBase64()
                 + ")");
     }
   } else {
     PacketLocal pong = new PacketLocal(_context, packet.getOptionalFrom());
     pong.setFlag(Packet.FLAG_ECHO, true);
     pong.setFlag(Packet.FLAG_SIGNATURE_INCLUDED, false);
     pong.setReceiveStreamId(packet.getSendStreamId());
     _manager.getPacketQueue().enqueue(pong);
   }
 }
Example #26
0
 /**
  * Non-SYN packets with a zero SendStreamID may also be queued here so that they don't get thrown
  * away while the SYN packet before it is queued.
  *
  * <p>Additional overload protection may be required here... We don't have a 3-way handshake, so
  * the SYN fully opens a connection. Does that make us more or less vulnerable to SYN flooding?
  */
 public void receiveNewSyn(Packet packet) {
   if (!_active) {
     if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) {
       if (_log.shouldLog(Log.WARN)) _log.warn("Dropping new SYN request, as we're not listening");
       sendReset(packet);
     } else {
       if (_log.shouldLog(Log.WARN)) _log.warn("Dropping non-SYN packet - not listening");
     }
     return;
   }
   if (_manager.wasRecentlyClosed(packet.getSendStreamId())) {
     if (_log.shouldLog(Log.WARN))
       _log.warn("Dropping packet for recently closed stream: " + packet);
     return;
   }
   if (_log.shouldLog(Log.INFO))
     _log.info("Receive new SYN: " + packet + ": timeout in " + _acceptTimeout);
   // also check if expiration of the head is long past for overload detection with peek() ?
   boolean success = _synQueue.offer(packet); // fail immediately if full
   if (success) {
     _context.simpleScheduler().addEvent(new TimeoutSyn(packet), _acceptTimeout);
   } else {
     if (_log.shouldLog(Log.WARN)) _log.warn("Dropping new SYN request, as the queue is full");
     if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) sendReset(packet);
   }
 }
Example #27
0
  /**
   * Returns <code>true</code> if one or more client threads are running in a given plugin.
   *
   * @param pluginName
   * @return true if running
   */
  private static boolean isClientThreadRunning(String pluginName, RouterContext ctx) {
    ThreadGroup group = pluginThreadGroups.get(pluginName);
    if (group == null) return false;
    boolean rv = group.activeCount() > 0;

    // Plugins start before the eepsite, and will create the static Timer thread
    // in RolloverFileOutputStream, which never stops. Don't count it.
    if (rv) {
      Log log = ctx.logManager().getLog(PluginStarter.class);
      Thread[] activeThreads = new Thread[128];
      int count = group.enumerate(activeThreads);
      boolean notRollover = false;
      for (int i = 0; i < count; i++) {
        if (activeThreads[i] != null) {
          String name = activeThreads[i].getName();
          if (!"org.eclipse.jetty.util.RolloverFileOutputStream".equals(name)) notRollover = true;
          if (log.shouldLog(Log.DEBUG))
            log.debug(
                "Found " + activeThreads[i].getState() + " thread for " + pluginName + ": " + name);
        }
      }
      rv = notRollover;
    }

    return rv;
  }
 /**
  * Update the options on a running socket manager. Parameters in the I2PSocketOptions interface
  * may be changed directly with the setters; no need to use this method for those. This does NOT
  * update the underlying I2CP or tunnel options; use getSession().updateOptions() for that.
  *
  * <p>TODO There is no way to update the options on a subsession.
  *
  * @param options as created from a call to buildOptions(properties), non-null
  */
 public void setDefaultOptions(I2PSocketOptions options) {
   if (!(options instanceof ConnectionOptions)) throw new IllegalArgumentException();
   if (_log.shouldLog(Log.WARN))
     _log.warn("Changing options from:\n " + _defaultOptions + "\nto:\n " + options);
   _defaultOptions.updateAll((ConnectionOptions) options);
   _connectionManager.updateOptions();
 }
Example #29
0
  public void load(Properties props) {
    _successfulLookups = getLong(props, "dbHistory.successfulLookups");
    _failedLookups = getLong(props, "dbHistory.failedLookups");
    _lookupsReceived = getLong(props, "dbHistory.lookupsReceived");
    _lookupReplyDuplicate = getLong(props, "dbHistory.lookupReplyDuplicate");
    _lookupReplyInvalid = getLong(props, "dbHistory.lookupReplyInvalid");
    _lookupReplyNew = getLong(props, "dbHistory.lookupReplyNew");
    _lookupReplyOld = getLong(props, "dbHistory.lookupReplyOld");
    _unpromptedDbStoreNew = getLong(props, "dbHistory.unpromptedDbStoreNew");
    _unpromptedDbStoreOld = getLong(props, "dbHistory.unpromptedDbStoreOld");
    _lastLookupReceived = getLong(props, "dbHistory.lastLookupReceived");
    _avgDelayBetweenLookupsReceived = getLong(props, "dbHistory.avgDelayBetweenLookupsReceived");
    try {
      _failedLookupRate.load(props, "dbHistory.failedLookupRate", true);
      _log.debug("Loading dbHistory.failedLookupRate");
    } catch (IllegalArgumentException iae) {
      _log.warn("DB History failed lookup rate is corrupt, resetting", iae);
    }

    try {
      _invalidReplyRate.load(props, "dbHistory.invalidReplyRate", true);
    } catch (IllegalArgumentException iae) {
      _log.warn("DB History invalid reply rate is corrupt, resetting", iae);
      createRates(_statGroup);
    }
  }
Example #30
0
  /**
   * Verifies the DSA signature of a signed update file.
   *
   * @param signedFile The signed update file to check.
   * @param signingPublicKey An instance of {@link net.i2p.data.SigningPublicKey} to use for
   *     verification.
   * @return <code>true</code> if the file has a valid signature, otherwise <code>false</code>.
   */
  public boolean verify(File signedFile, SigningPublicKey signingPublicKey) {
    FileInputStream fileInputStream = null;

    try {
      fileInputStream = new FileInputStream(signedFile);
      Signature signature = new Signature();

      signature.readBytes(fileInputStream);

      return _context.dsa().verifySignature(signature, fileInputStream, signingPublicKey);
    } catch (IOException ioe) {
      if (_log.shouldLog(Log.WARN)) _log.warn("Error reading " + signedFile + " to verify", ioe);

      return false;
    } catch (DataFormatException dfe) {
      if (_log.shouldLog(Log.ERROR)) _log.error("Error reading the signature", dfe);

      return false;
    } finally {
      if (fileInputStream != null)
        try {
          fileInputStream.close();
        } catch (IOException ioe) {
        }
    }
  }