@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(nodes.length); for (NodeStats node : nodes) { node.writeTo(out); } }
private boolean handleGetOfferedKey(Message m, PeerNode source) { Key key = (Key) m.getObject(DMT.KEY); byte[] authenticator = ((ShortBuffer) m.getObject(DMT.OFFER_AUTHENTICATOR)).getData(); long uid = m.getLong(DMT.UID); if (!HMAC.verifyWithSHA256( node.failureTable.offerAuthenticatorKey, key.getFullKey(), authenticator)) { Logger.error( this, "Invalid offer request from " + source + " : authenticator did not verify"); try { source.sendAsync( DMT.createFNPGetOfferedKeyInvalid(uid, DMT.GET_OFFERED_KEY_REJECTED_BAD_AUTHENTICATOR), null, node.failureTable.senderCounter); } catch (NotConnectedException e) { // Too bad. } return true; } if (logMINOR) Logger.minor(this, "Valid GetOfferedKey for " + key + " from " + source); // Do we want it? We can RejectOverload if we don't have the bandwidth... boolean isSSK = key instanceof NodeSSK; OfferReplyTag tag = new OfferReplyTag(isSSK); node.lockUID(uid, isSSK, false, true, false, tag); boolean needPubKey; try { needPubKey = m.getBoolean(DMT.NEED_PUB_KEY); String reject = nodeStats.shouldRejectRequest(true, false, isSSK, false, true, source, false); if (reject != null) { Logger.normal( this, "Rejecting FNPGetOfferedKey from " + source + " for " + key + " : " + reject); Message rejected = DMT.createFNPRejectedOverload(uid, true); try { source.sendAsync(rejected, null, node.failureTable.senderCounter); } catch (NotConnectedException e) { Logger.normal( this, "Rejecting (overload) data request from " + source.getPeer() + ": " + e); } node.unlockUID(uid, isSSK, false, false, true, false, tag); return true; } } catch (Error e) { node.unlockUID(uid, isSSK, false, false, true, false, tag); throw e; } catch (RuntimeException e) { node.unlockUID(uid, isSSK, false, false, true, false, tag); throw e; } // Otherwise, sendOfferedKey is responsible for unlocking. // Accept it. try { node.failureTable.sendOfferedKey(key, isSSK, needPubKey, uid, source, tag); } catch (NotConnectedException e) { // Too bad. } return true; }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); nodes = new NodeStats[in.readVInt()]; for (int i = 0; i < nodes.length; i++) { nodes[i] = NodeStats.readNodeStats(in); } }
void realRun() { ChosenBlock req = null; // The last time at which we sent a request or decided not to long cycleTime = System.currentTimeMillis(); while (true) { // Allow 5 minutes before we start killing requests due to not connecting. OpennetManager om; if (core.node.peers.countConnectedPeers() < 3 && (om = core.node.getOpennet()) != null && System.currentTimeMillis() - om.getCreationTime() < 5 * 60 * 1000) { try { synchronized (this) { wait(1000); } } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } continue; } if (req == null) { req = sched.grabRequest(); } if (req != null) { if (logMINOR) Logger.minor(this, "Running " + req + " priority " + req.getPriority()); if (!req.localRequestOnly) { // Wait long delay; delay = throttle.getDelay(); if (logMINOR) Logger.minor(this, "Delay=" + delay + " from " + throttle); long sleepUntil = cycleTime + delay; if (!LOCAL_REQUESTS_COMPETE_FAIRLY) { inputBucket.blockingGrab( (int) (Math.max(0, averageInputBytesPerRequest.currentValue()))); outputBucket.blockingGrab( (int) (Math.max(0, averageOutputBytesPerRequest.currentValue()))); } long now; do { now = System.currentTimeMillis(); if (now < sleepUntil) try { Thread.sleep(sleepUntil - now); if (logMINOR) Logger.minor(this, "Slept: " + (sleepUntil - now) + "ms"); } catch (InterruptedException e) { // Ignore } } while (now < sleepUntil); } // if(!doAIMD) { // // Arbitrary limit on number of local requests waiting for slots. // // Firstly, they use threads. This could be a serious problem for faster nodes. // // Secondly, it may help to prevent wider problems: // // If all queues are full, the network will die. // int[] waiting = core.node.countRequestsWaitingForSlots(); // int localRequestsWaitingForSlots = waiting[0]; // int maxWaitingForSlots = MAX_WAITING_FOR_SLOTS; // // FIXME calibrate this by the number of local timeouts. // // FIXME consider an AIMD, or some similar mechanism. // // Local timeout-waiting-for-slots is largely dependant on // // the number of requests running, due to strict round-robin, // // so we can probably do something even simpler than an AIMD. // // For now we'll just have a fixed number. // // This should partially address the problem. // // Note that while waitFor() is blocking, we need such a limit anyway. // if(localRequestsWaitingForSlots > maxWaitingForSlots) continue; // } RejectReason reason; assert (req.realTimeFlag == realTime); if (LOCAL_REQUESTS_COMPETE_FAIRLY && !req.localRequestOnly) { if ((reason = stats.shouldRejectRequest( true, isInsert, isSSK, true, false, null, false, isInsert && Node.PREFER_INSERT_DEFAULT, req.realTimeFlag, null)) != null) { if (logMINOR) Logger.minor(this, "Not sending local request: " + reason); // Wait one throttle-delay before trying again cycleTime = System.currentTimeMillis(); continue; // Let local requests compete with all the others } } else { stats.waitUntilNotOverloaded(isInsert); } } else { if (logMINOR) Logger.minor(this, "Waiting..."); // Always take the lock on RequestStarter first. AFAICS we don't synchronize on // RequestStarter anywhere else. // Nested locks here prevent extra latency when there is a race, and therefore allow us to // sleep indefinitely synchronized (this) { req = sched.grabRequest(); if (req == null) { try { wait( 1 * 1000); // this can happen when most but not all stuff is already running but // there is still stuff to fetch, so don't wait *too* long. // FIXME increase when we can be *sure* there is nothing left in the queue (especially // for transient requests). } catch (InterruptedException e) { // Ignore } } } } if (req == null) continue; if (!startRequest(req, logMINOR)) { // Don't log if it's a cancelled transient request. if (!((!req.isPersistent()) && req.isCancelled())) Logger.normal(this, "No requests to start on " + req); } if (!req.localRequestOnly) cycleTime = System.currentTimeMillis(); req = null; } }
private boolean handleInsertRequest(Message m, PeerNode source, boolean isSSK) { ByteCounter ctr = isSSK ? node.nodeStats.sskInsertCtr : node.nodeStats.chkInsertCtr; long id = m.getLong(DMT.UID); if (node.recentlyCompleted(id)) { Message rejected = DMT.createFNPRejectedLoop(id); try { source.sendAsync(rejected, null, ctr); } catch (NotConnectedException e) { Logger.normal(this, "Rejecting insert request from " + source.getPeer() + ": " + e); } return true; } InsertTag tag = new InsertTag(isSSK, InsertTag.START.REMOTE); if (!node.lockUID(id, isSSK, true, false, false, tag)) { if (logMINOR) Logger.minor(this, "Could not lock ID " + id + " -> rejecting (already running)"); Message rejected = DMT.createFNPRejectedLoop(id); try { source.sendAsync(rejected, null, ctr); } catch (NotConnectedException e) { Logger.normal(this, "Rejecting insert request from " + source.getPeer() + ": " + e); } return true; } // SSKs don't fix bwlimitDelayTime so shouldn't be accepted when overloaded. String rejectReason = nodeStats.shouldRejectRequest(!isSSK, true, isSSK, false, false, source, false); if (rejectReason != null) { Logger.normal( this, "Rejecting insert from " + source.getPeer() + " preemptively because " + rejectReason); Message rejected = DMT.createFNPRejectedOverload(id, true); try { source.sendAsync(rejected, null, ctr); } catch (NotConnectedException e) { Logger.normal( this, "Rejecting (overload) insert request from " + source.getPeer() + ": " + e); } node.unlockUID(id, isSSK, true, false, false, false, tag); return true; } boolean forkOnCacheable = Node.FORK_ON_CACHEABLE_DEFAULT; Message forkControl = m.getSubMessage(DMT.FNPSubInsertForkControl); if (forkControl != null) forkOnCacheable = forkControl.getBoolean(DMT.ENABLE_INSERT_FORK_WHEN_CACHEABLE); long now = System.currentTimeMillis(); if (m.getSpec().equals(DMT.FNPSSKInsertRequest)) { NodeSSK key = (NodeSSK) m.getObject(DMT.FREENET_ROUTING_KEY); byte[] data = ((ShortBuffer) m.getObject(DMT.DATA)).getData(); byte[] headers = ((ShortBuffer) m.getObject(DMT.BLOCK_HEADERS)).getData(); short htl = m.getShort(DMT.HTL); SSKInsertHandler rh = new SSKInsertHandler( key, data, headers, htl, source, id, node, now, tag, node.canWriteDatastoreInsert(htl), forkOnCacheable); rh.receivedBytes(m.receivedByteCount()); node.executor.execute( rh, "SSKInsertHandler for " + id + " on " + node.getDarknetPortNumber()); } else if (m.getSpec().equals(DMT.FNPSSKInsertRequestNew)) { NodeSSK key = (NodeSSK) m.getObject(DMT.FREENET_ROUTING_KEY); short htl = m.getShort(DMT.HTL); SSKInsertHandler rh = new SSKInsertHandler( key, null, null, htl, source, id, node, now, tag, node.canWriteDatastoreInsert(htl), forkOnCacheable); rh.receivedBytes(m.receivedByteCount()); node.executor.execute( rh, "SSKInsertHandler for " + id + " on " + node.getDarknetPortNumber()); } else { CHKInsertHandler rh = new CHKInsertHandler(m, source, id, node, now, tag, forkOnCacheable); node.executor.execute( rh, "CHKInsertHandler for " + id + " on " + node.getDarknetPortNumber()); } if (logMINOR) Logger.minor(this, "Started InsertHandler for " + id); return true; }
/** Handle an incoming FNPDataRequest. */ private boolean handleDataRequest(Message m, PeerNode source, boolean isSSK) { long id = m.getLong(DMT.UID); ByteCounter ctr = isSSK ? node.nodeStats.sskRequestCtr : node.nodeStats.chkRequestCtr; if (node.recentlyCompleted(id)) { Message rejected = DMT.createFNPRejectedLoop(id); try { source.sendAsync(rejected, null, ctr); } catch (NotConnectedException e) { Logger.normal(this, "Rejecting data request (loop, finished): " + e); } return true; } short htl = m.getShort(DMT.HTL); Key key = (Key) m.getObject(DMT.FREENET_ROUTING_KEY); final RequestTag tag = new RequestTag(isSSK, RequestTag.START.REMOTE); if (!node.lockUID(id, isSSK, false, false, false, tag)) { if (logMINOR) Logger.minor(this, "Could not lock ID " + id + " -> rejecting (already running)"); Message rejected = DMT.createFNPRejectedLoop(id); try { source.sendAsync(rejected, null, ctr); } catch (NotConnectedException e) { Logger.normal(this, "Rejecting request from " + source.getPeer() + ": " + e); } node.failureTable.onFinalFailure(key, null, htl, htl, -1, source); return true; } else { if (logMINOR) Logger.minor(this, "Locked " + id); } // There are at least 2 threads that call this function. // DO NOT reuse the meta object, unless on a per-thread basis. // Object allocation is pretty cheap in modern Java anyway... // If we do reuse it, call reset(). BlockMetadata meta = new BlockMetadata(); KeyBlock block = node.fetch(key, false, false, false, false, meta); String rejectReason = nodeStats.shouldRejectRequest( !isSSK, false, isSSK, false, false, source, block != null && !meta.isOldBlock()); if (rejectReason != null) { // can accept 1 CHK request every so often, but not with SSKs because they aren't throttled so // won't sort out bwlimitDelayTime, which was the whole reason for accepting them when // overloaded... Logger.normal( this, "Rejecting " + (isSSK ? "SSK" : "CHK") + " request from " + source.getPeer() + " preemptively because " + rejectReason); Message rejected = DMT.createFNPRejectedOverload(id, true); try { source.sendAsync(rejected, null, ctr); } catch (NotConnectedException e) { Logger.normal( this, "Rejecting (overload) data request from " + source.getPeer() + ": " + e); } tag.setRejected(); node.unlockUID(id, isSSK, false, false, false, false, tag); // Do not tell failure table. // Otherwise an attacker can flood us with requests very cheaply and purge our // failure table even though we didn't accept any of them. return true; } nodeStats.reportIncomingRequestLocation(key.toNormalizedDouble()); // if(!node.lockUID(id)) return false; RequestHandler rh = new RequestHandler(m, source, id, node, htl, key, tag, block); node.executor.execute( rh, "RequestHandler for UID " + id + " on " + node.getDarknetPortNumber()); return true; }
@Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("cluster_name", getClusterName().value()); builder.startObject("nodes"); for (NodeStats nodeStats : this) { builder.startObject(nodeStats.getNode().id(), XContentBuilder.FieldCaseConversion.NONE); builder.field("timestamp", nodeStats.getTimestamp()); builder.field("name", nodeStats.getNode().name(), XContentBuilder.FieldCaseConversion.NONE); builder.field("transport_address", nodeStats.getNode().address().toString()); if (nodeStats.getHostname() != null) { builder.field( "hostname", nodeStats.getHostname(), XContentBuilder.FieldCaseConversion.NONE); } if (!nodeStats.getNode().attributes().isEmpty()) { builder.startObject("attributes"); for (Map.Entry<String, String> attr : nodeStats.getNode().attributes().entrySet()) { builder.field(attr.getKey(), attr.getValue()); } builder.endObject(); } if (nodeStats.getIndices() != null) { nodeStats.getIndices().toXContent(builder, params); } if (nodeStats.getOs() != null) { nodeStats.getOs().toXContent(builder, params); } if (nodeStats.getProcess() != null) { nodeStats.getProcess().toXContent(builder, params); } if (nodeStats.getJvm() != null) { nodeStats.getJvm().toXContent(builder, params); } if (nodeStats.getThreadPool() != null) { nodeStats.getThreadPool().toXContent(builder, params); } if (nodeStats.getNetwork() != null) { nodeStats.getNetwork().toXContent(builder, params); } if (nodeStats.getFs() != null) { nodeStats.getFs().toXContent(builder, params); } if (nodeStats.getTransport() != null) { nodeStats.getTransport().toXContent(builder, params); } if (nodeStats.getHttp() != null) { nodeStats.getHttp().toXContent(builder, params); } builder.endObject(); } builder.endObject(); return builder; }
public void requestCompleted(boolean isSSK, boolean isInsert, Key key, boolean realTime) { getThrottleWindow(realTime).requestCompleted(); (isSSK ? throttleWindowSSK : throttleWindowCHK).requestCompleted(); (isInsert ? throttleWindowInsert : throttleWindowRequest).requestCompleted(); stats.reportOutgoingRequestLocation(key.toNormalizedDouble()); }