private void loadProps(Properties props, File file) throws IOException {
   InputStream fin = null;
   try {
     fin = new BufferedInputStream(new FileInputStream(file), 1);
     fin.mark(1);
     int c = fin.read();
     fin.reset();
     if (c == '#') {
       // uncompressed
       if (_log.shouldLog(Log.INFO))
         _log.info("Loading uncompressed profile data from " + file.getName());
       DataHelper.loadProps(props, fin);
     } else {
       // compressed (or corrupt...)
       if (_log.shouldLog(Log.INFO))
         _log.info("Loading compressed profile data from " + file.getName());
       DataHelper.loadProps(props, new GZIPInputStream(fin));
     }
   } finally {
     try {
       if (fin != null) fin.close();
     } catch (IOException e) {
     }
   }
 }
Exemplo n.º 2
0
 public int allocPage() throws IOException {
   if (freeListStart != 0) {
     try {
       if (flb == null) flb = new FreeListBlock(file, freeListStart);
       if (!flb.isEmpty()) {
         if (log.shouldLog(Log.DEBUG)) log.debug("Alloc from " + flb);
         return flb.takePage();
       } else {
         if (log.shouldLog(Log.DEBUG)) log.debug("Alloc returning empty " + flb);
         freeListStart = flb.getNextPage();
         writeSuperBlock();
         int rv = flb.page;
         flb = null;
         return rv;
       }
     } catch (IOException ioe) {
       log.error("Discarding corrupt free list block page " + freeListStart, ioe);
       freeListStart = 0;
     }
   }
   long offset = file.length();
   fileLen = offset + PAGESIZE;
   file.setLength(fileLen);
   writeSuperBlock();
   return (int) ((offset / PAGESIZE) + 1);
 }
Exemplo n.º 3
0
  private void x_startTorrent() {
    boolean ok = _util.connect();
    if (!ok) fatal("Unable to connect to I2P");
    if (coordinator == null) {
      I2PServerSocket serversocket = _util.getServerSocket();
      if (serversocket == null) fatal("Unable to listen for I2P connections");
      else {
        Destination d = serversocket.getManager().getSession().getMyDestination();
        if (_log.shouldLog(Log.INFO))
          _log.info(
              "Listening on I2P destination "
                  + d.toBase64()
                  + " / "
                  + d.calculateHash().toBase64());
      }
      if (_log.shouldLog(Log.INFO))
        _log.info("Starting PeerCoordinator, ConnectionAcceptor, and TrackerClient");
      activity = "Collecting pieces";
      coordinator = new PeerCoordinator(_util, id, infoHash, meta, storage, this, this);
      coordinator.setUploaded(savedUploaded);
      if (_peerCoordinatorSet != null) {
        // multitorrent
        _peerCoordinatorSet.add(coordinator);
      } else {
        // single torrent
        acceptor = new ConnectionAcceptor(_util, new PeerAcceptor(coordinator));
      }
      // TODO pass saved closest DHT nodes to the tracker? or direct to the coordinator?
      trackerclient = new TrackerClient(_util, meta, additionalTrackerURL, coordinator, this);
    }
    // ensure acceptor is running when in multitorrent
    if (_peerCoordinatorSet != null && acceptor != null) {
      acceptor.startAccepting();
    }

    stopped = false;
    if (coordinator.halted()) {
      coordinator.restart();
      if (_peerCoordinatorSet != null) _peerCoordinatorSet.add(coordinator);
    }
    if (!trackerclient.started()) {
      trackerclient.start();
    } else if (trackerclient.halted()) {
      if (storage != null) {
        try {
          storage.reopen();
        } catch (IOException ioe) {
          try {
            storage.close();
          } catch (IOException ioee) {
            ioee.printStackTrace();
          }
          fatal("Could not reopen storage", ioe);
        }
      }
      trackerclient.start();
    } else {
      if (_log.shouldLog(Log.INFO)) _log.info("NOT starting TrackerClient???");
    }
  }
Exemplo n.º 4
0
  /**
   * Verifies the DSA signature of a signed update file.
   *
   * @param signedFile The signed update file to check.
   * @param signingPublicKey An instance of {@link net.i2p.data.SigningPublicKey} to use for
   *     verification.
   * @return <code>true</code> if the file has a valid signature, otherwise <code>false</code>.
   */
  public boolean verify(File signedFile, SigningPublicKey signingPublicKey) {
    FileInputStream fileInputStream = null;

    try {
      fileInputStream = new FileInputStream(signedFile);
      Signature signature = new Signature();

      signature.readBytes(fileInputStream);

      return _context.dsa().verifySignature(signature, fileInputStream, signingPublicKey);
    } catch (IOException ioe) {
      if (_log.shouldLog(Log.WARN)) _log.warn("Error reading " + signedFile + " to verify", ioe);

      return false;
    } catch (DataFormatException dfe) {
      if (_log.shouldLog(Log.ERROR)) _log.error("Error reading the signature", dfe);

      return false;
    } finally {
      if (fileInputStream != null)
        try {
          fileInputStream.close();
        } catch (IOException ioe) {
        }
    }
  }
Exemplo n.º 5
0
  /**
   * Called when a full chunk (i.e. a piece message) has been received by PeerConnectionIn.
   *
   * <p>This may block quite a while if it is the last chunk for a piece, as it calls the listener,
   * who stores the piece and then calls havePiece for every peer on the torrent (including us).
   */
  void pieceMessage(Request req) {
    int size = req.len;
    peer.downloaded(size);
    listener.downloaded(peer, size);

    if (_log.shouldLog(Log.DEBUG))
      _log.debug(
          "got end of Chunk(" + req.getPiece() + "," + req.off + "," + req.len + ") from " + peer);

    // Last chunk needed for this piece?
    // FIXME if priority changed to skip, we will think we're done when we aren't
    if (getFirstOutstandingRequest(req.getPiece()) == -1) {
      // warning - may block here for a while
      if (listener.gotPiece(peer, req.getPartialPiece())) {
        if (_log.shouldLog(Log.DEBUG)) _log.debug("Got " + req.getPiece() + ": " + peer);
      } else {
        if (_log.shouldLog(Log.WARN)) _log.warn("Got BAD " + req.getPiece() + " from " + peer);
      }
    }

    // ok done with this one
    synchronized (this) {
      pendingRequest = null;
    }
  }
Exemplo n.º 6
0
  /**
   * Uses the given private key to sign the given input file along with its version string using
   * DSA. The output will be a signed update file where the first 40 bytes are the resulting DSA
   * signature, the next 16 bytes are the input file's version string encoded in UTF-8 (padded with
   * trailing <code>0h</code> characters if necessary), and the remaining bytes are the raw bytes of
   * the input file.
   *
   * @param inputFile The file to be signed.
   * @param signedFile The signed update file to write.
   * @param privateKeyFile The name of the file containing the private key to sign <code>inputFile
   *     </code> with.
   * @param version The version string of the input file. If this is longer than 16 characters it
   *     will be truncated.
   * @return An instance of {@link net.i2p.data.Signature}, or <code>null</code> if there was an
   *     error.
   */
  public Signature sign(
      String inputFile, String signedFile, String privateKeyFile, String version) {
    FileInputStream fileInputStream = null;
    SigningPrivateKey signingPrivateKey = new SigningPrivateKey();

    try {
      fileInputStream = new FileInputStream(privateKeyFile);
      signingPrivateKey.readBytes(fileInputStream);
    } catch (IOException ioe) {
      if (_log.shouldLog(Log.WARN)) _log.warn("Unable to load the signing key", ioe);

      return null;
    } catch (DataFormatException dfe) {
      if (_log.shouldLog(Log.WARN)) _log.warn("Unable to load the signing key", dfe);

      return null;
    } finally {
      if (fileInputStream != null)
        try {
          fileInputStream.close();
        } catch (IOException ioe) {
        }
    }

    return sign(inputFile, signedFile, signingPrivateKey, version);
  }
Exemplo n.º 7
0
 public void runJob() {
   boolean wasFF = _facade.floodfillEnabled();
   boolean ff = shouldBeFloodfill();
   _facade.setFloodfillEnabled(ff);
   if (ff != wasFF) {
     if (ff) {
       getContext().router().eventLog().addEvent(EventLog.BECAME_FLOODFILL);
     } else {
       getContext().router().eventLog().addEvent(EventLog.NOT_FLOODFILL);
     }
     getContext().router().rebuildRouterInfo(true);
     Job routerInfoFlood = new FloodfillRouterInfoFloodJob(getContext(), _facade);
     if (getContext().router().getUptime() < 5 * 60 * 1000) {
       // Needed to prevent race if router.floodfillParticipant=true (not auto)
       routerInfoFlood.getTiming().setStartAfter(getContext().clock().now() + 5 * 60 * 1000);
       getContext().jobQueue().addJob(routerInfoFlood);
       if (_log.shouldLog(Log.DEBUG)) {
         _log.logAlways(
             Log.DEBUG, "Deferring our FloodfillRouterInfoFloodJob run because of low uptime.");
       }
     } else {
       routerInfoFlood.runJob();
       if (_log.shouldLog(Log.DEBUG)) {
         _log.logAlways(Log.DEBUG, "Running FloodfillRouterInfoFloodJob");
       }
     }
   }
   if (_log.shouldLog(Log.INFO)) _log.info("Should we be floodfill? " + ff);
   int delay = (REQUEUE_DELAY / 2) + getContext().random().nextInt(REQUEUE_DELAY);
   // there's a lot of eligible non-floodfills, keep them from all jumping in at once
   // TODO: somehow assess the size of the network to make this adaptive?
   if (!ff) delay *= 4; // this was 7, reduced for moar FFs --zab
   requeue(delay);
 }
Exemplo n.º 8
0
  /** Send a search to the given peer */
  protected void sendSearch(RouterInfo router) {
    if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) {
      // don't search ourselves
      if (_log.shouldLog(Log.ERROR))
        _log.error(getJobId() + ": Dont send search to ourselves - why did we try?");
      return;
    } else {
      if (_log.shouldLog(Log.INFO))
        _log.info(
            getJobId()
                + ": Send search to "
                + router.getIdentity().getHash().toBase64()
                + " for "
                + _state.getTarget().toBase64()
                + " w/ timeout "
                + getPerPeerTimeoutMs(router.getIdentity().calculateHash()));
    }

    getContext().statManager().addRateData("netDb.searchMessageCount", 1, 0);

    // if (_isLease || true) // always send searches out tunnels
    sendLeaseSearch(router);
    // else
    //    sendRouterSearch(router);
  }
Exemplo n.º 9
0
    /**
     * Call when the ports might have changed
     * The transports can call this pretty quickly at startup,
     * which can have multiple UPnP threads running at once, but
     * that should be ok.
     */
    public void update(Set<TransportManager.Port> ports) {
        if (_log.shouldLog(Log.DEBUG))
            _log.debug("UPnP Update with " + ports.size() + " ports");

        //synchronized(this) {
            // TODO
            // called too often and may block for too long
            // may not have started if net was disconnected previously
            //if (!_isRunning && !ports.isEmpty())
            //    start();
            if (!_isRunning)
                return;
        //}

        Set<ForwardPort> forwards = new HashSet<ForwardPort>(ports.size());
        for (TransportManager.Port entry : ports) {
            String style = entry.style;
            int port = entry.port;
            int protocol = -1;
            if ("SSU".equals(style))
                protocol = ForwardPort.PROTOCOL_UDP_IPV4;
            else if ("NTCP".equals(style))
                protocol = ForwardPort.PROTOCOL_TCP_IPV4;
            else
                continue;
            if (_log.shouldLog(Log.DEBUG))
                _log.debug("Adding: " + style + " " + port);
            ForwardPort fp = new ForwardPort(style, false, protocol, port);
            forwards.add(fp);
        }
        // non-blocking
        _upnp.onChangePublicPorts(forwards, _upnpCallback);
    }
Exemplo n.º 10
0
  /**
   * Verifies the DSA signature of a signed update file.
   *
   * @param signedFile The signed update file to check.
   * @param publicKeyFile A file containing the public key to use for verification.
   * @return <code>true</code> if the file has a valid signature, otherwise <code>false</code>.
   */
  public boolean verify(String signedFile, String publicKeyFile) {
    SigningPublicKey signingPublicKey = new SigningPublicKey();
    FileInputStream fileInputStream = null;

    try {
      fileInputStream = new FileInputStream(signedFile);
      signingPublicKey.readBytes(fileInputStream);
    } catch (IOException ioe) {
      if (_log.shouldLog(Log.WARN)) _log.warn("Unable to load the signature", ioe);

      return false;
    } catch (DataFormatException dfe) {
      if (_log.shouldLog(Log.WARN)) _log.warn("Unable to load the signature", dfe);

      return false;
    } finally {
      if (fileInputStream != null)
        try {
          fileInputStream.close();
        } catch (IOException ioe) {
        }
    }

    return verify(new File(signedFile), signingPublicKey);
  }
Exemplo n.º 11
0
  /** Search was totally successful */
  private void succeed() {
    if (_log.shouldLog(Log.INFO))
      _log.info(
          getJobId()
              + ": Succeeded search for key "
              + _state.getTarget()
              + " after querying "
              + _state.getAttempted().size());
    if (_log.shouldLog(Log.DEBUG))
      _log.debug(getJobId() + ": State of successful search: " + _state);

    if (_keepStats) {
      long time = getContext().clock().now() - _state.getWhenStarted();
      getContext().statManager().addRateData("netDb.successTime", time, 0);
      getContext()
          .statManager()
          .addRateData("netDb.successPeers", _state.getAttempted().size(), time);
    }
    if (_onSuccess != null) getContext().jobQueue().addJob(_onSuccess);

    _facade.searchComplete(_state.getTarget());

    handleDeferred(true);

    resend();
  }
Exemplo n.º 12
0
  void bitfieldMessage(byte[] bitmap) {
    synchronized (this) {
      if (_log.shouldLog(Log.DEBUG)) _log.debug(peer + " rcv bitfield");
      if (bitfield != null) {
        // XXX - Be liberal in what you accept?
        if (_log.shouldLog(Log.WARN)) _log.warn("Got unexpected bitfield message from " + peer);
        return;
      }

      // XXX - Check for weird bitfield and disconnect?
      // FIXME will have to regenerate the bitfield after we know exactly
      // how many pieces there are, as we don't know how many spare bits there are.
      if (metainfo == null) bitfield = new BitField(bitmap, bitmap.length * 8);
      else bitfield = new BitField(bitmap, metainfo.getPieces());
    }
    if (metainfo == null) return;
    boolean interest = listener.gotBitField(peer, bitfield);
    setInteresting(interest);
    if (bitfield.complete() && !interest) {
      // They are seeding and we are seeding,
      // why did they contact us? (robert)
      // Dump them quick before we send our whole bitmap
      if (_log.shouldLog(Log.WARN)) _log.warn("Disconnecting seed that connects to seeds: " + peer);
      peer.disconnect(true);
    }
  }
Exemplo n.º 13
0
  /**
   * This is the callback that PeerConnectionOut calls
   *
   * @return bytes or null for errors
   * @since 0.8.2
   */
  public ByteArray loadData(int piece, int begin, int length) {
    ByteArray pieceBytes = listener.gotRequest(peer, piece, begin, length);
    if (pieceBytes == null) {
      // XXX - Protocol error-> diconnect?
      if (_log.shouldLog(Log.WARN)) _log.warn("Got request for unknown piece: " + piece);
      return null;
    }

    // More sanity checks
    if (length != pieceBytes.getData().length) {
      // XXX - Protocol error-> disconnect?
      if (_log.shouldLog(Log.WARN))
        _log.warn(
            "Got out of range 'request: "
                + piece
                + ", "
                + begin
                + ", "
                + length
                + "' message from "
                + peer);
      return null;
    }

    if (_log.shouldLog(Log.DEBUG))
      _log.debug("Sending (" + piece + ", " + begin + ", " + length + ")" + " to " + peer);
    return pieceBytes;
  }
Exemplo n.º 14
0
  /** Search totally failed */
  protected void fail() {
    if (isLocal()) {
      if (_log.shouldLog(Log.ERROR))
        _log.error(
            getJobId()
                + ": why did we fail if the target is local?: "
                + _state.getTarget().toBase64(),
            new Exception("failure cause"));
      succeed();
      return;
    }

    if (_log.shouldLog(Log.INFO))
      _log.info(getJobId() + ": Failed search for key " + _state.getTarget());
    if (_log.shouldLog(Log.DEBUG)) _log.debug(getJobId() + ": State of failed search: " + _state);

    long time = getContext().clock().now() - _state.getWhenStarted();
    int attempted = _state.getAttempted().size();
    getContext().statManager().addRateData("netDb.failedAttemptedPeers", attempted, time);

    if (_keepStats) {
      getContext().statManager().addRateData("netDb.failedTime", time, 0);
      // _facade.fail(_state.getTarget());
    }
    if (_onFailure != null) getContext().jobQueue().addJob(_onFailure);

    _facade.searchComplete(_state.getTarget());
    handleDeferred(false);
  }
Exemplo n.º 15
0
 /**
  * Not thread-safe. Blocking. Only used for external sockets. ClientWriterRunner thread is the
  * only caller. Others must use doSend().
  */
 void writeMessage(I2CPMessage msg) {
   // long before = _context.clock().now();
   try {
     // We don't need synchronization here, ClientWriterRunner is the only writer.
     // synchronized (_out) {
     msg.writeMessage(_out);
     _out.flush();
     // }
     // if (_log.shouldLog(Log.DEBUG))
     //    _log.debug("after writeMessage("+ msg.getClass().getName() + "): "
     //               + (_context.clock().now()-before) + "ms");
   } catch (I2CPMessageException ime) {
     _log.error("Error sending I2CP message to client", ime);
     stopRunning();
   } catch (EOFException eofe) {
     // only warn if client went away
     if (_log.shouldLog(Log.WARN))
       _log.warn("Error sending I2CP message - client went away", eofe);
     stopRunning();
   } catch (IOException ioe) {
     if (_log.shouldLog(Log.ERROR)) _log.error("IO Error sending I2CP message to client", ioe);
     stopRunning();
   } catch (Throwable t) {
     _log.log(Log.CRIT, "Unhandled exception sending I2CP message to client", t);
     stopRunning();
     // } finally {
     //    long after = _context.clock().now();
     //    long lag = after - before;
     //    if (lag > 300) {
     //        if (_log.shouldLog(Log.WARN))
     //            _log.warn("synchronization on the i2cp message send took too long (" + lag
     //                      + "ms): " + msg);
     //    }
   }
 }
Exemplo n.º 16
0
  public long receiveEncrypted(byte encrypted[]) {
    TunnelDataMessage msg = new TunnelDataMessage(_context);
    msg.setData(encrypted);
    msg.setTunnelId(_config.getConfig(0).getSendTunnel());

    if (_log.shouldLog(Log.DEBUG))
      _log.debug("received encrypted, sending out " + _config + ": " + msg);
    RouterInfo ri = _nextHopCache;
    if (ri == null) ri = _context.netDb().lookupRouterInfoLocally(_config.getPeer(1));
    if (ri != null) {
      _nextHopCache = ri;
      send(msg, ri);
      return msg.getUniqueId();
    } else {
      // It should be rare to forget the router info for a peer in our own tunnel.
      if (_log.shouldLog(Log.WARN))
        _log.warn("lookup of " + _config.getPeer(1) + " required for " + msg);
      _context
          .netDb()
          .lookupRouterInfo(
              _config.getPeer(1),
              new SendJob(_context, msg),
              new FailedJob(_context),
              MAX_LOOKUP_TIME);
      return -1;
    }
  }
Exemplo n.º 17
0
 /**
  * Can't find a published standard for this anywhere. See the libtorrent code. Here we use the
  * "added" key as a single string of concatenated 32-byte peer hashes. added.f and dropped
  * unsupported
  *
  * @since 0.8.4
  */
 private static void handlePEX(Peer peer, PeerListener listener, byte[] bs, Log log) {
   if (log.shouldLog(Log.DEBUG)) log.debug("Got PEX msg from " + peer);
   try {
     InputStream is = new ByteArrayInputStream(bs);
     BDecoder dec = new BDecoder(is);
     BEValue bev = dec.bdecodeMap();
     Map<String, BEValue> map = bev.getMap();
     bev = map.get("added");
     if (bev == null) return;
     byte[] ids = bev.getBytes();
     if (ids.length < HASH_LENGTH) return;
     int len = Math.min(ids.length, (I2PSnarkUtil.MAX_CONNECTIONS - 1) * HASH_LENGTH);
     List<PeerID> peers = new ArrayList<PeerID>(len / HASH_LENGTH);
     for (int off = 0; off < len; off += HASH_LENGTH) {
       byte[] hash = new byte[HASH_LENGTH];
       System.arraycopy(ids, off, hash, 0, HASH_LENGTH);
       if (DataHelper.eq(hash, peer.getPeerID().getDestHash())) continue;
       PeerID pID = new PeerID(hash, listener.getUtil());
       peers.add(pID);
     }
     // could include ourselves, listener must remove
     listener.gotPeers(peer, peers);
   } catch (Exception e) {
     if (log.shouldLog(Log.INFO)) log.info("PEX msg exception from " + peer, e);
     // peer.disconnect(false);
   }
 }
  /**
   * Blocking call (run in the establisher thread) to determine if the session was created properly.
   * If it wasn't, all the SessionCreated remnants are dropped (perhaps they were spoofed, etc) so
   * that we can receive another one
   *
   * <p>Generates session key and mac key.
   *
   * @return true if valid
   */
  public synchronized boolean validateSessionCreated() {
    if (_currentState == OutboundState.OB_STATE_VALIDATION_FAILED) {
      if (_log.shouldLog(Log.WARN)) _log.warn("Session created already failed");
      return false;
    }
    if (_receivedSignature != null) {
      if (_log.shouldLog(Log.WARN)) _log.warn("Session created already validated");
      return true;
    }

    boolean valid = true;
    try {
      generateSessionKey();
    } catch (DHSessionKeyBuilder.InvalidPublicParameterException ippe) {
      if (_log.shouldLog(Log.WARN))
        _log.warn("Peer " + getRemoteHostId() + " sent us an invalid DH parameter", ippe);
      valid = false;
    }
    if (valid) decryptSignature();

    if (valid && verifySessionCreated()) {
      if (_log.shouldLog(Log.DEBUG)) _log.debug("Session created passed validation");
      return true;
    } else {
      if (_log.shouldLog(Log.WARN))
        _log.warn(
            "Session created failed validation, clearing state for " + _remoteHostId.toString());
      fail();
      return false;
    }
  }
Exemplo n.º 19
0
 /**
  * Fetch to memory
  *
  * @param retries if < 0, set timeout to a few seconds
  * @param initialSize buffer size
  * @param maxSize fails if greater
  * @return null on error
  * @since 0.9.4
  */
 public byte[] get(String url, boolean rewrite, int retries, int initialSize, int maxSize) {
   if (_log.shouldLog(Log.DEBUG)) _log.debug("Fetching [" + url + "] to memory");
   String fetchURL = url;
   if (rewrite) fetchURL = rewriteAnnounce(url);
   int timeout;
   if (retries < 0) {
     if (!connected()) return null;
     timeout = EEPGET_CONNECT_TIMEOUT_SHORT;
     retries = 0;
   } else {
     timeout = EEPGET_CONNECT_TIMEOUT;
     if (!connected()) {
       if (!connect()) return null;
     }
   }
   ByteArrayOutputStream out = new ByteArrayOutputStream(initialSize);
   EepGet get = new I2PSocketEepGet(_context, _manager, retries, -1, maxSize, null, out, fetchURL);
   get.addHeader("User-Agent", EEPGET_USER_AGENT);
   if (get.fetch(timeout)) {
     if (_log.shouldLog(Log.DEBUG))
       _log.debug("Fetch successful [" + url + "]: size=" + out.size());
     return out.toByteArray();
   } else {
     if (_log.shouldLog(Log.WARN)) _log.warn("Fetch failed [" + url + "]");
     return null;
   }
 }
Exemplo n.º 20
0
 /**
  * Non-SYN packets with a zero SendStreamID may also be queued here so that they don't get thrown
  * away while the SYN packet before it is queued.
  *
  * <p>Additional overload protection may be required here... We don't have a 3-way handshake, so
  * the SYN fully opens a connection. Does that make us more or less vulnerable to SYN flooding?
  */
 public void receiveNewSyn(Packet packet) {
   if (!_active) {
     if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) {
       if (_log.shouldLog(Log.WARN)) _log.warn("Dropping new SYN request, as we're not listening");
       sendReset(packet);
     } else {
       if (_log.shouldLog(Log.WARN)) _log.warn("Dropping non-SYN packet - not listening");
     }
     return;
   }
   if (_manager.wasRecentlyClosed(packet.getSendStreamId())) {
     if (_log.shouldLog(Log.WARN))
       _log.warn("Dropping packet for recently closed stream: " + packet);
     return;
   }
   if (_log.shouldLog(Log.INFO))
     _log.info("Receive new SYN: " + packet + ": timeout in " + _acceptTimeout);
   // also check if expiration of the head is long past for overload detection with peek() ?
   boolean success = _synQueue.offer(packet); // fail immediately if full
   if (success) {
     _context.simpleScheduler().addEvent(new TimeoutSyn(packet), _acceptTimeout);
   } else {
     if (_log.shouldLog(Log.WARN)) _log.warn("Dropping new SYN request, as the queue is full");
     if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) sendReset(packet);
   }
 }
Exemplo n.º 21
0
Arquivo: Peer.java Projeto: hilbix/i2p
  /**
   * Sets DataIn/OutputStreams, does the handshake and returns the id reported by the other side.
   */
  private byte[] handshake(InputStream in, OutputStream out) throws IOException {
    din = new DataInputStream(in);
    dout = new DataOutputStream(out);

    // Handshake write - header
    dout.write(19);
    dout.write("BitTorrent protocol".getBytes("UTF-8"));
    // Handshake write - options
    long myOptions = OPTION_EXTENSION;
    // FIXME get util here somehow
    // if (util.getDHT() != null)
    //    myOptions |= OPTION_I2P_DHT;
    dout.writeLong(myOptions);
    // Handshake write - metainfo hash
    dout.write(infohash);
    // Handshake write - peer id
    dout.write(my_id);
    dout.flush();

    if (_log.shouldLog(Log.DEBUG)) _log.debug("Wrote my shared hash and ID to " + toString());

    // Handshake read - header
    byte b = din.readByte();
    if (b != 19)
      throw new IOException("Handshake failure, expected 19, got " + (b & 0xff) + " on " + sock);

    byte[] bs = new byte[19];
    din.readFully(bs);
    String bittorrentProtocol = new String(bs, "UTF-8");
    if (!"BitTorrent protocol".equals(bittorrentProtocol))
      throw new IOException(
          "Handshake failure, expected "
              + "'Bittorrent protocol', got '"
              + bittorrentProtocol
              + "'");

    // Handshake read - options
    options = din.readLong();

    // Handshake read - metainfo hash
    bs = new byte[20];
    din.readFully(bs);
    if (!Arrays.equals(infohash, bs)) throw new IOException("Unexpected MetaInfo hash");

    // Handshake read - peer id
    din.readFully(bs);
    if (_log.shouldLog(Log.DEBUG))
      _log.debug("Read the remote side's hash and peerID fully from " + toString());

    if (DataHelper.eq(my_id, bs)) throw new IOException("Connected to myself");

    if (options != 0) {
      // send them something in runConnection() above
      if (_log.shouldLog(Log.DEBUG))
        _log.debug("Peer supports options 0x" + Long.toString(options, 16) + ": " + toString());
    }

    return bs;
  }
Exemplo n.º 22
0
  /**
   * Starts requesting first chunk of next piece. Returns true if something has been added to the
   * requests, false otherwise. Caller should synchronize.
   */
  private boolean requestNextPiece() {
    // Check that we already know what the other side has.
    if (bitfield != null) {
      // Check for adopting an orphaned partial piece
      PartialPiece pp = listener.getPartialPiece(peer, bitfield);
      if (pp != null) {
        // Double-check that r not already in outstandingRequests
        if (!getRequestedPieces().contains(Integer.valueOf(pp.getPiece()))) {
          Request r = pp.getRequest();
          outstandingRequests.add(r);
          if (!choked) out.sendRequest(r);
          lastRequest = r;
          return true;
        } else {
          if (_log.shouldLog(Log.WARN)) _log.warn("Got dup from coord: " + pp);
          pp.release();
        }
      }

      /**
       * ***** getPartialPiece() does it all now // Note that in addition to the bitfield,
       * PeerCoordinator uses // its request tracking and isRequesting() to determine // what piece
       * to give us next. int nextPiece = listener.wantPiece(peer, bitfield); if (nextPiece != -1 &&
       * (lastRequest == null || lastRequest.getPiece() != nextPiece)) { if
       * (_log.shouldLog(Log.DEBUG)) _log.debug(peer + " want piece " + nextPiece); // Fail safe to
       * make sure we are interested // When we transition into the end game we may not be
       * interested... if (!interesting) { if (_log.shouldLog(Log.DEBUG)) _log.debug(peer + "
       * transition to end game, setting interesting"); interesting = true; out.sendInterest(true);
       * }
       *
       * <p>int piece_length = metainfo.getPieceLength(nextPiece); //Catch a common place for OOMs
       * esp. on 1MB pieces byte[] bs; try { bs = new byte[piece_length]; } catch (OutOfMemoryError
       * oom) { _log.warn("Out of memory, can't request piece " + nextPiece, oom); return false; }
       *
       * <p>int length = Math.min(piece_length, PARTSIZE); Request req = new Request(nextPiece, bs,
       * 0, length); outstandingRequests.add(req); if (!choked) out.sendRequest(req); lastRequest =
       * req; return true; } else { if (_log.shouldLog(Log.DEBUG)) _log.debug(peer + " no more
       * pieces to request"); } *****
       */
    }

    // failsafe
    // However this is bad as it thrashes the peer when we change our mind
    // Ticket 691 cause here?
    if (outstandingRequests.isEmpty()) lastRequest = null;

    // If we are not in the end game, we may run out of things to request
    // because we are asking other peers. Set not-interesting now rather than
    // wait for those other requests to be satisfied via havePiece()
    if (interesting && lastRequest == null) {
      interesting = false;
      out.sendInterest(false);
      if (_log.shouldLog(Log.DEBUG))
        _log.debug(peer + " nothing more to request, now uninteresting");
    }
    return false;
  }
Exemplo n.º 23
0
 /**
  * Remove the subsession
  *
  * @since 0.9.21
  */
 public void removeSubsession(I2PSession session) {
   _session.removeSubsession(session);
   boolean removed = _subsessions.remove(session);
   if (removed) {
     if (_log.shouldLog(Log.WARN)) _log.warn("Removeed subsession " + session);
   } else {
     if (_log.shouldLog(Log.WARN)) _log.warn("Subsession not found to remove " + session);
   }
 }
Exemplo n.º 24
0
 public static void handleMessage(Peer peer, PeerListener listener, int id, byte[] bs) {
   Log log = I2PAppContext.getGlobalContext().logManager().getLog(ExtensionHandler.class);
   if (log.shouldLog(Log.INFO))
     log.info("Got extension msg " + id + " length " + bs.length + " from " + peer);
   if (id == ID_HANDSHAKE) handleHandshake(peer, listener, bs, log);
   else if (id == ID_METADATA) handleMetadata(peer, listener, bs, log);
   else if (id == ID_PEX) handlePEX(peer, listener, bs, log);
   else if (id == ID_DHT) handleDHT(peer, listener, bs, log);
   else if (log.shouldLog(Log.INFO)) log.info("Unknown extension msg " + id + " from " + peer);
 }
Exemplo n.º 25
0
 /**
  *  Blocking, may take a while, up to 20 seconds
  */
 public synchronized void stop() {
     if (_log.shouldLog(Log.DEBUG))
         _log.debug("UPnP Stop");
     _shouldBeRunning = false;
     _rescanner.cancel();
     if (_isRunning)
         _upnp.terminate();
     _isRunning = false;
     _detectedAddress = null;
     if (_log.shouldLog(Log.DEBUG))
         _log.debug("UPnP Stop Done");
 }
Exemplo n.º 26
0
 void displayPacket(Packet packet, String prefix, String suffix) {
   if (!_log.shouldLog(Log.INFO)) return;
   StringBuilder buf = new StringBuilder(256);
   synchronized (_fmt) {
     buf.append(_fmt.format(new Date()));
   }
   buf.append(": ").append(prefix).append(" ");
   buf.append(packet.toString());
   if (suffix != null) buf.append(" ").append(suffix);
   String str = buf.toString();
   System.out.println(str);
   if (_log.shouldLog(Log.DEBUG)) _log.debug(str);
 }
Exemplo n.º 27
0
 /**
  * Run an integrity check on the blockfile and all the skiplists in it
  *
  * @return true if the levels were modified.
  */
 public boolean bfck(boolean fix) {
   if (log.shouldLog(Log.INFO)) {
     log.info("magic bytes " + magicBytes);
     log.info("fileLen " + fileLen);
     log.info("freeListStart " + freeListStart);
     log.info("mounted " + mounted);
     log.info("spanSize " + spanSize);
     log.info("Metaindex");
     log.info("Checking meta index in blockfile " + file);
   }
   boolean rv = metaIndex.bslck(fix, true);
   if (rv) {
     if (log.shouldLog(Log.WARN)) log.warn("Repaired meta index in blockfile " + file);
   } else {
     if (log.shouldLog(Log.INFO)) log.info("No errors in meta index in blockfile " + file);
   }
   int items = 0;
   for (SkipIterator iter = metaIndex.iterator(); iter.hasNext(); ) {
     String slname = (String) iter.nextKey();
     Integer page = (Integer) iter.next();
     if (log.shouldLog(Log.INFO)) log.info("List " + slname + " page " + page);
     try {
       // This uses IdentityBytes, so the value class won't be right, but at least
       // it won't fail the out-of-order check
       Serializer keyser =
           slname.equals("%%__REVERSE__%%") ? new IntBytes() : new UTF8StringBytes();
       BSkipList bsl = getIndex(slname, keyser, new IdentityBytes());
       if (bsl == null) {
         log.error("Can't find list? " + slname);
         continue;
       }
       // The check is now done in getIndex(), no need to do here...
       // but we can't get the return value of the check here.
       items++;
     } catch (IOException ioe) {
       log.error("Error with list " + slname, ioe);
     }
   }
   log.info("Checked meta index and " + items + " skiplists");
   if (freeListStart != 0) {
     try {
       if (flb == null) flb = new FreeListBlock(file, freeListStart);
       flb.flbck(true);
     } catch (IOException ioe) {
       log.error("Free list error", ioe);
     }
   } else {
     if (log.shouldLog(Log.INFO)) log.info("No freelist");
   }
   return rv;
 }
Exemplo n.º 28
0
 /**
  * Receive the DHT port numbers
  *
  * @since DHT
  */
 private static void handleDHT(Peer peer, PeerListener listener, byte[] bs, Log log) {
   if (log.shouldLog(Log.DEBUG)) log.debug("Got DHT msg from " + peer);
   try {
     InputStream is = new ByteArrayInputStream(bs);
     BDecoder dec = new BDecoder(is);
     BEValue bev = dec.bdecodeMap();
     Map<String, BEValue> map = bev.getMap();
     int qport = map.get("port").getInt();
     int rport = map.get("rport").getInt();
     listener.gotPort(peer, qport, rport);
   } catch (Exception e) {
     if (log.shouldLog(Log.INFO)) log.info("DHT msg exception from " + peer, e);
     // peer.disconnect(false);
   }
 }
Exemplo n.º 29
0
 /**
  * We found a non-SYN packet that was queued in the syn queue, check to see if it has a home now,
  * else drop it ...
  */
 private void reReceivePacket(Packet packet) {
   Connection con = _manager.getConnectionByOutboundId(packet.getReceiveStreamId());
   if (con != null) {
     // Send it through the packet handler again
     if (_log.shouldLog(Log.WARN)) _log.warn("Found con for queued non-syn packet: " + packet);
     // false -> don't requeue, fixes a race where a SYN gets dropped
     // between here and PacketHandler, causing the packet to loop forever....
     _manager.getPacketHandler().receivePacketDirect(packet, false);
   } else {
     // goodbye
     if (_log.shouldLog(Log.WARN))
       _log.warn("Did not find con for queued non-syn packet, dropping: " + packet);
     packet.releasePayload();
   }
 }
Exemplo n.º 30
0
  /**
   * Returns <code>true</code> if one or more client threads are running in a given plugin.
   *
   * @param pluginName
   * @return true if running
   */
  private static boolean isClientThreadRunning(String pluginName, RouterContext ctx) {
    ThreadGroup group = pluginThreadGroups.get(pluginName);
    if (group == null) return false;
    boolean rv = group.activeCount() > 0;

    // Plugins start before the eepsite, and will create the static Timer thread
    // in RolloverFileOutputStream, which never stops. Don't count it.
    if (rv) {
      Log log = ctx.logManager().getLog(PluginStarter.class);
      Thread[] activeThreads = new Thread[128];
      int count = group.enumerate(activeThreads);
      boolean notRollover = false;
      for (int i = 0; i < count; i++) {
        if (activeThreads[i] != null) {
          String name = activeThreads[i].getName();
          if (!"org.eclipse.jetty.util.RolloverFileOutputStream".equals(name)) notRollover = true;
          if (log.shouldLog(Log.DEBUG))
            log.debug(
                "Found " + activeThreads[i].getState() + " thread for " + pluginName + ": " + name);
        }
      }
      rv = notRollover;
    }

    return rv;
  }