public String toString() {
return name;
}
void realRun() {
ChosenBlock req = null;
// The last time at which we sent a request or decided not to
long cycleTime = System.currentTimeMillis();
while(true) {
// Allow 5 minutes before we start killing requests due to not connecting.
OpennetManager om;
if(core.node.peers.countConnectedPeers() < 3 && (om = core.node.getOpennet()) != null &&
System.currentTimeMillis() - om.getCreationTime() < MINUTES.toMillis(5)) {
try {
synchronized(this) {
wait(1000);
}
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
continue;
}
if(req == null) {
req = sched.grabRequest();
}
if(req != null) {
if(logMINOR) Logger.minor(this, "Running "+req+" priority "+req.getPriority());
if(!req.localRequestOnly) {
// Wait
long delay;
delay = throttle.getDelay();
if(logMINOR) Logger.minor(this, "Delay="+delay+" from "+throttle);
long sleepUntil = cycleTime + delay;
if(!LOCAL_REQUESTS_COMPETE_FAIRLY) {
inputBucket.blockingGrab((int)(Math.max(0, averageInputBytesPerRequest.currentValue())));
outputBucket.blockingGrab((int)(Math.max(0, averageOutputBytesPerRequest.currentValue())));
}
long now;
do {
now = System.currentTimeMillis();
if(now < sleepUntil)
try {
Thread.sleep(sleepUntil - now);
if(logMINOR) Logger.minor(this, "Slept: "+(sleepUntil-now)+"ms");
} catch (InterruptedException e) {
// Ignore
}
} while(now < sleepUntil);
}
// if(!doAIMD) {
// // Arbitrary limit on number of local requests waiting for slots.
// // Firstly, they use threads. This could be a serious problem for faster nodes.
// // Secondly, it may help to prevent wider problems:
// // If all queues are full, the network will die.
// int[] waiting = core.node.countRequestsWaitingForSlots();
// int localRequestsWaitingForSlots = waiting[0];
// int maxWaitingForSlots = MAX_WAITING_FOR_SLOTS;
// // FIXME calibrate this by the number of local timeouts.
// // FIXME consider an AIMD, or some similar mechanism.
// // Local timeout-waiting-for-slots is largely dependant on
// // the number of requests running, due to strict round-robin,
// // so we can probably do something even simpler than an AIMD.
// // For now we'll just have a fixed number.
// // This should partially address the problem.
// // Note that while waitFor() is blocking, we need such a limit anyway.
// if(localRequestsWaitingForSlots > maxWaitingForSlots) continue;
// }
RejectReason reason;
assert(req.realTimeFlag == realTime);
if(LOCAL_REQUESTS_COMPETE_FAIRLY && !req.localRequestOnly) {
reason = stats.shouldRejectRequest(true, isInsert, isSSK, true, false, null, false,
Node.PREFER_INSERT_DEFAULT && isInsert, req.realTimeFlag, null);
if(reason != null) {
if(logMINOR)
Logger.minor(this, "Not sending local request: "+reason);
// Wait one throttle-delay before trying again
cycleTime = System.currentTimeMillis();
continue; // Let local requests compete with all the others
}
} else {
stats.waitUntilNotOverloaded(isInsert);
}
} else {
if(logMINOR) Logger.minor(this, "Waiting...");
// Always take the lock on RequestStarter first. AFAICS we don't synchronize on RequestStarter anywhere else.
// Nested locks here prevent extra latency when there is a race, and therefore allow us to sleep indefinitely
synchronized(this) {
req = sched.grabRequest();
if(req == null) {
try {
wait(SECONDS.toMillis(1)); // this can happen when most but not all stuff is already running but there is still stuff to fetch, so don't wait *too* long.
// FIXME increase when we can be *sure* there is nothing left in the queue (especially for transient requests).
} catch (InterruptedException e) {
// Ignore
}
}
}
}
if(req == null) continue;
if(!startRequest(req, logMINOR)) {
// Don't log if it's a cancelled transient request.
if(!((!req.isPersistent()) && req.isCancelled()))
Logger.normal(this, "No requests to start on "+req);
}
if(!req.localRequestOnly)
cycleTime = System.currentTimeMillis();
req = null;