static void stoplockedm(void);
static void startlockedm(G*);
static void sysmon(void);
-static uint32 retake(uint32*);
+static uint32 retake(int64);
static void inclocked(int32);
static void checkdead(void);
static void exitsyscall0(G*);
uint32 idle, delay;
int64 now, lastpoll;
G *gp;
- uint32 ticks[MaxGomaxprocs];
idle = 0; // how many cycles in succession we had not wokeup somebody
delay = 0;
injectglist(gp);
}
// retake P's blocked in syscalls
- if(retake(ticks))
+ // and preempt long running G's
+ if(retake(now))
idle = 0;
else
idle++;
}
}
+typedef struct Pdesc Pdesc;
+struct Pdesc
+{
+ uint32 tick;
+ int64 when;
+};
+static Pdesc pdesc[MaxGomaxprocs];
+
static uint32
-retake(uint32 *ticks)
+retake(int64 now)
{
uint32 i, s, n;
int64 t;
P *p;
+ Pdesc *pd;
n = 0;
for(i = 0; i < runtime·gomaxprocs; i++) {
if(p==nil)
continue;
t = p->tick;
- if(ticks[i] != t) {
- ticks[i] = t;
+ pd = &pdesc[i];
+ if(pd->tick != t) {
+ pd->tick = t;
+ pd->when = now;
continue;
}
s = p->status;
- if(s != Psyscall)
- continue;
- if(p->runqhead == p->runqtail && runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0) // TODO: fast atomic
- continue;
- // Need to increment number of locked M's before the CAS.
- // Otherwise the M from which we retake can exit the syscall,
- // increment nmidle and report deadlock.
- inclocked(-1);
- if(runtime·cas(&p->status, s, Pidle)) {
- n++;
- handoffp(p);
+ if(s == Psyscall) {
+ // Retake P from syscall if it's there for more than 1 sysmon tick (20us).
+ // But only if there is other work to do.
+ if(p->runqhead == p->runqtail &&
+ runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0)
+ continue;
+ // Need to increment number of locked M's before the CAS.
+ // Otherwise the M from which we retake can exit the syscall,
+ // increment nmidle and report deadlock.
+ inclocked(-1);
+ if(runtime·cas(&p->status, s, Pidle)) {
+ n++;
+ handoffp(p);
+ }
+ inclocked(1);
+ } else if(s == Prunning) {
+ // Preempt G if it's running for more than 10ms.
+ if(pd->when + 10*1000*1000 > now)
+ continue;
+ preemptone(p);
}
- inclocked(1);
}
return n;
}
return sum
}
+func TestPreemption(t *testing.T) {
+ t.Skip("preemption is disabled")
+ // Test that goroutines are preempted at function calls.
+ const N = 5
+ c := make(chan bool)
+ var x uint32
+ for g := 0; g < 2; g++ {
+ go func(g int) {
+ for i := 0; i < N; i++ {
+ for atomic.LoadUint32(&x) != uint32(g) {
+ preempt()
+ }
+ atomic.StoreUint32(&x, uint32(1-g))
+ }
+ c <- true
+ }(g)
+ }
+ <-c
+ <-c
+}
+
func TestPreemptionGC(t *testing.T) {
t.Skip("preemption is disabled")
// Test that pending GC preempts running goroutines.
if(gp->stackguard0 == (uintptr)StackPreempt) {
if(gp == m->g0)
runtime·throw("runtime: preempt g0");
- if(oldstatus == Grunning && (m->p == nil || m->p->status != Prunning))
+ if(oldstatus == Grunning && m->p == nil)
runtime·throw("runtime: g is running but p is not");
// Be conservative about where we preempt.
// We are interested in preempting user Go code, not runtime code.
- if(oldstatus != Grunning || m->locks || m->mallocing || m->gcing) {
+ if(oldstatus != Grunning || m->locks || m->mallocing || m->gcing || m->p->status != Prunning) {
// Let the goroutine keep running for now.
// gp->preempt is set, so it will be preempted next time.
gp->stackguard0 = gp->stackguard;