public UDPReceiver( RouterContext ctx, UDPTransport transport, DatagramSocket socket, String name) { _context = ctx; _log = ctx.logManager().getLog(UDPReceiver.class); _name = name; _socket = socket; _transport = transport; _handler = transport.getPacketHandler(); if (_handler == null) throw new IllegalStateException(); _runner = new Runner(); // _context.statManager().createRateStat("udp.receivePacketSize", "How large packets received // are", "udp", UDPTransport.RATES); // _context.statManager().createRateStat("udp.receiveRemaining", "How many packets are left // sitting on the receiver's queue", "udp", UDPTransport.RATES); // _context.statManager().createRateStat("udp.droppedInbound", "How many packet are queued up // but not yet received when we drop", "udp", UDPTransport.RATES); _context .statManager() .createRateStat( "udp.receiveHolePunch", "How often we receive a NAT hole punch", "udp", UDPTransport.RATES); _context .statManager() .createRateStat( "udp.ignorePacketFromDroplist", "Packet lifetime for those dropped on the drop list", "udp", UDPTransport.RATES); }
/** Enqueue the specified job */ public void addJob(Job job) { if (job == null || !_alive) return; // This does nothing // if (job instanceof JobImpl) // ((JobImpl)job).addedToQueue(); long numReady = 0; boolean alreadyExists = false; boolean dropped = false; // getNext() is now outside the jobLock, is that ok? synchronized (_jobLock) { if (_readyJobs.contains(job)) alreadyExists = true; numReady = _readyJobs.size(); if (!alreadyExists) { // if (_timedJobs.contains(job)) // alreadyExists = true; // Always remove and re-add, since it needs to be // re-sorted in the TreeSet. boolean removed = _timedJobs.remove(job); if (removed && _log.shouldLog(Log.WARN)) _log.warn("Rescheduling job: " + job); } if ((!alreadyExists) && shouldDrop(job, numReady)) { job.dropped(); dropped = true; } else { if (!alreadyExists) { if (job.getTiming().getStartAfter() <= _context.clock().now()) { // don't skew us - its 'start after' its been queued, or later job.getTiming().setStartAfter(_context.clock().now()); if (job instanceof JobImpl) ((JobImpl) job).madeReady(); _readyJobs.offer(job); } else { _timedJobs.add(job); // only notify for _timedJobs, as _readyJobs does not use that lock // only notify if sooner, to reduce contention if (job.getTiming().getStartAfter() < _nextPumperRun) _jobLock.notifyAll(); } } } } _context.statManager().addRateData("jobQueue.readyJobs", numReady, 0); if (dropped) { _context.statManager().addRateData("jobQueue.droppedJobs", 1, 0); _log.logAlways( Log.WARN, "Dropping job due to overload! # ready jobs: " + numReady + ": job = " + job); } }
/** * Note that we successfully stored to a floodfill peer and verified the result by asking another * floodfill peer */ public void storeSuccessful() { // Fixme, redefined this to include both lookup and store fails, // need to fix the javadocs _failedLookupRate.addData(0, 0); _context.statManager().addRateData("peer.failedLookupRate", 0, 0); _lastStoreSuccessful = _context.clock().now(); }
public RouterDoSThrottle(RouterContext context) { super(context); context .statManager() .createRateStat( "router.throttleNetDbDoS", "How many netDb lookup messages have we received so far during a period with a DoS detected", "Throttle", new long[] {60 * 1000, 10 * 60 * 1000, 60 * 60 * 1000, 24 * 60 * 60 * 1000}); }
public JobQueue(RouterContext context) { _context = context; _log = context.logManager().getLog(JobQueue.class); _context .statManager() .createRateStat( "jobQueue.readyJobs", "How many ready and waiting jobs there are?", "JobQueue", new long[] {60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); _context .statManager() .createRateStat( "jobQueue.droppedJobs", "How many jobs do we drop due to insane overload?", "JobQueue", new long[] {60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); // following are for JobQueueRunner _context .statManager() .createRateStat( "jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] {60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); _context .statManager() .createRateStat( "jobQueue.jobRunSlow", "How long jobs that take over a second take", "JobQueue", new long[] {60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); _context .statManager() .createRequiredRateStat( "jobQueue.jobLag", "Job run delay (ms)", "JobQueue", new long[] {60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); _context .statManager() .createRateStat( "jobQueue.jobWait", "How long does a job sit on the job queue?", "JobQueue", new long[] {60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); // _context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners // inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); _alive = true; _readyJobs = new LinkedBlockingQueue(); _timedJobs = new TreeSet(new JobComparator()); _jobLock = new Object(); _queueRunners = new ConcurrentHashMap(RUNNERS); _jobStats = new ConcurrentHashMap(); _pumper = new QueuePumper(); I2PThread pumperThread = new I2PThread(_pumper, "Job Queue Pumper", true); // pumperThread.setPriority(I2PThread.NORM_PRIORITY+1); pumperThread.start(); }
/** * BLOCKING if queue between here and PacketHandler is full. * * @return zero (was queue size) */ private final int doReceive(UDPPacket packet) { if (!_keepRunning) return 0; if (_log.shouldLog(Log.INFO)) _log.info("Received: " + packet); RemoteHostId from = packet.getRemoteHost(); if (_transport.isInDropList(from)) { if (_log.shouldLog(Log.INFO)) _log.info("Ignoring packet from the drop-listed peer: " + from); _context.statManager().addRateData("udp.ignorePacketFromDroplist", packet.getLifetime()); packet.release(); return 0; } // drop anything apparently from our IP (any port) if (Arrays.equals(from.getIP(), _transport.getExternalIP()) && !_transport.allowLocal()) { if (_log.shouldLog(Log.WARN)) _log.warn("Dropping (spoofed?) packet from ourselves"); packet.release(); return 0; } /** * ** packet.enqueue(); boolean rejected = false; int queueSize = 0; long headPeriod = 0; * * <p>UDPPacket head = _inboundQueue.peek(); if (head != null) { headPeriod = * head.getLifetime(); if (headPeriod > MAX_QUEUE_PERIOD) { rejected = true; } } if (!rejected) * { ** */ try { _handler.queueReceived(packet); } catch (InterruptedException ie) { packet.release(); _keepRunning = false; } // return queueSize + 1; return 0; /** * ** } * * <p>// rejected packet.release(); _context.statManager().addRateData("udp.droppedInbound", * queueSize, headPeriod); if (_log.shouldLog(Log.WARN)) { queueSize = _inboundQueue.size(); * StringBuilder msg = new StringBuilder(); msg.append("Dropping inbound packet with "); * msg.append(queueSize); msg.append(" queued for "); msg.append(headPeriod); msg.append(" * packet handlers: ").append(_transport.getPacketHandlerStatus()); _log.warn(msg.toString()); } * return queueSize; ** */ }
private void createRates(String statGroup) { if (_failedLookupRate == null) _failedLookupRate = new RateStat( "dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", statGroup, new long[] {10 * 60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l}); if (_invalidReplyRate == null) _invalidReplyRate = new RateStat( "dbHistory.invalidReplyRate", "How often does this peer give us a bad (nonexistant, forged, etc) peer?", statGroup, new long[] {30 * 60 * 1000l}); _failedLookupRate.setStatLog(_context.statManager().getStatLog()); _invalidReplyRate.setStatLog(_context.statManager().getStatLog()); }
/** Note that the peer failed to respond to the db lookup in any way */ public void lookupFailed() { _failedLookups++; _failedLookupRate.addData(1, 0); _context.statManager().addRateData("peer.failedLookupRate", 1, 0); _lastLookupFailed = _context.clock().now(); }
/** * Note that the peer was not only able to respond to the lookup, but sent us the data we wanted! */ public void lookupSuccessful() { _successfulLookups++; _failedLookupRate.addData(0, 0); _context.statManager().addRateData("peer.failedLookupRate", 0, 0); _lastLookupSuccessful = _context.clock().now(); }