src := "package runtime; type maptype struct{}; type _type struct{}; type alg struct{};" +
" type mspan struct{}; type m struct{}; type lock struct{}; type slicetype struct{};" +
" type iface struct{}; type eface struct{}; type interfacetype struct{}; type itab struct{};" +
- " type mcache struct{}; type bucket struct{}"
+ " type mcache struct{}; type bucket struct{}; type sudog struct{}; type g struct{}"
f, err = parser.ParseFile(fset, filename, src, 0)
if err != nil {
log.Fatalf("incorrect generated file: %s", err)
sudog->list = list(sudog->list, nod(ODCLFIELD, newname(lookup("link")), typenod(ptrto(types[TUINT8]))));
sudog->list = list(sudog->list, nod(ODCLFIELD, newname(lookup("elem")), typenod(ptrto(types[TUINT8]))));
sudog->list = list(sudog->list, nod(ODCLFIELD, newname(lookup("releasetime")), typenod(types[TUINT64])));
+ sudog->list = list(sudog->list, nod(ODCLFIELD, newname(lookup("nrelease")), typenod(types[TINT32])));
typecheck(&sudog, Etype);
sudog->type->noalg = 1;
sudog->type->local = 1;
MOVL DX, 4(DI)
RET
+TEXT runtime·gocputicks(SB),NOSPLIT,$0-8
+ RDTSC
+ MOVL AX, ret+0(FP)
+ MOVL DX, ret+4(FP)
+ RET
+
TEXT runtime·ldt0setup(SB),NOSPLIT,$16-0
// set up ldt 7 to point at tls0
// ldt 1 would be fine on Linux, but on OS X, 7 is as low as we can go.
ADDQ DX, AX
RET
+TEXT runtime·gocputicks(SB),NOSPLIT,$0-8
+ RDTSC
+ MOVL AX, ret+0(FP)
+ MOVL DX, ret+4(FP)
+ RET
+
TEXT runtime·stackguard(SB),NOSPLIT,$0-16
MOVQ SP, DX
MOVQ DX, sp+0(FP)
ADDQ DX, AX
RET
+TEXT runtime·gocputicks(SB),NOSPLIT,$0-8
+ RDTSC
+ SHLQ $32, DX
+ ADDQ DX, AX
+ MOVQ AX, ret+0(FP)
+ RET
+
TEXT runtime·stackguard(SB),NOSPLIT,$0-8
MOVL SP, DX
MOVL DX, sp+0(FP)
MOVW $0, R0
MOVW (R0), R1
+TEXT runtime·gocputicks(SB),NOSPLIT,$4-8
+ ADD $8, SP, R0
+ MOVW R0, 0(SP)
+ BL runtime·cputicks(SB)
+ RET
+
// bool armcas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
USED(t);
if(!block)
return false;
- runtime·park(nil, nil, "chan send (nil chan)");
+ runtime·park(nil, nil, runtime·gostringnocopy((byte*)"chan send (nil chan)"));
return false; // not reached
}
mysg.selectdone = nil;
g->param = nil;
enqueue(&c->sendq, &mysg);
- runtime·parkunlock(&c->lock, "chan send");
+ runtime·parkunlock(&c->lock, runtime·gostringnocopy((byte*)"chan send"));
if(g->param == nil) {
runtime·lock(&c->lock);
mysg.elem = nil;
mysg.selectdone = nil;
enqueue(&c->sendq, &mysg);
- runtime·parkunlock(&c->lock, "chan send");
+ runtime·parkunlock(&c->lock, runtime·gostringnocopy((byte*)"chan send"));
runtime·lock(&c->lock);
goto asynch;
USED(t);
if(!block)
return false;
- runtime·park(nil, nil, "chan receive (nil chan)");
+ runtime·park(nil, nil, runtime·gostringnocopy((byte*)"chan receive (nil chan)"));
return false; // not reached
}
mysg.selectdone = nil;
g->param = nil;
enqueue(&c->recvq, &mysg);
- runtime·parkunlock(&c->lock, "chan receive");
+ runtime·parkunlock(&c->lock, runtime·gostringnocopy((byte*)"chan receive"));
if(g->param == nil) {
runtime·lock(&c->lock);
mysg.elem = nil;
mysg.selectdone = nil;
enqueue(&c->recvq, &mysg);
- runtime·parkunlock(&c->lock, "chan receive");
+ runtime·parkunlock(&c->lock, runtime·gostringnocopy((byte*)"chan receive"));
runtime·lock(&c->lock);
goto asynch;
}
func block() {
- runtime·park(nil, nil, "select (no cases)"); // forever
+ runtime·park(nil, nil, runtime·gostringnocopy((byte*)"select (no cases)")); // forever
}
static void* selectgo(Select**);
}
g->param = nil;
- runtime·park(selparkcommit, sel, "select");
+ runtime·park(selparkcommit, sel, runtime·gostringnocopy((byte*)"select"));
sellock(sel);
sg = g->param;
SudoG* link;
byte* elem; // data element
int64 releasetime;
+ int32 nrelease; // -1 for acquire
};
struct WaitQ
dumpbool(gp->issystem);
dumpbool(gp->isbackground);
dumpint(gp->waitsince);
- dumpcstr(gp->waitreason);
+ dumpstr(gp->waitreason);
dumpint((uintptr)gp->sched.ctxt);
dumpint((uintptr)gp->m);
dumpint((uintptr)gp->defer);
// Call dump routine on M stack.
g->status = Gwaiting;
- g->waitreason = "dumping heap";
+ g->waitreason = runtime·gostringnocopy((byte*)"dumping heap");
runtime·mcall(mdump);
// Reset dump file.
uintptr size; // total size of stacks in list
};
+typedef struct SudoG SudoG;
+
// Per-thread (in Go, per-P) cache for small objects.
// No locking needed because it is per-thread (per-P).
struct MCache
StackFreeList stackcache[NumStackOrders];
+ SudoG* sudogcache;
+
void* gcworkbuf;
// Local allocator stats, flushed during GC.
if(c != nil) {
c->tiny = nil;
c->tinysize = 0;
+ c->sudogcache = nil;
}
// clear defer pools
for(i=0; i<nelem(p->deferpool); i++)
}
sweep.parked = true;
g->isbackground = true;
- runtime·parkunlock(&gclock, "GC sweep wait");
+ runtime·parkunlock(&gclock, runtime·gostringnocopy((byte*)"GC sweep wait"));
g->isbackground = false;
}
}
// switch to g0, call gc(&a), then switch back
g->param = &a;
g->status = Gwaiting;
- g->waitreason = "garbage collection";
+ g->waitreason = runtime·gostringnocopy((byte*)"garbage collection");
runtime·mcall(mgc);
}
gp = g->m->curg;
gp->status = Gwaiting;
- gp->waitreason = "garbage collection";
+ gp->waitreason = runtime·gostringnocopy((byte*)"garbage collection");
a.start_time = (uint64)(g->m->scalararg[0]) | ((uint64)(g->m->scalararg[1]) << 32);
a.eagersweep = g->m->scalararg[2];
if(fb == nil) {
runtime·fingwait = true;
g->isbackground = true;
- runtime·parkunlock(&finlock, "finalizer wait");
+ runtime·parkunlock(&finlock, runtime·gostringnocopy((byte*)"finalizer wait"));
g->isbackground = false;
continue;
}
// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
// do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
if(waitio || checkerr(pd, mode) == 0)
- runtime·park((bool(*)(G*, void*))blockcommit, gpp, "IO wait");
+ runtime·park((bool(*)(G*, void*))blockcommit, gpp, runtime·gostringnocopy((byte*)"IO wait"));
// be careful to not lose concurrent READY notification
old = runtime·xchgp(gpp, nil);
if(old > WAIT)
static void incidlelocked(int32);
static void checkdead(void);
static void exitsyscall0(G*);
-static void park0(G*);
+void runtime·park_m(G*);
static void goexit0(G*);
static void gfput(P*, G*);
static G* gfget(P*);
// let the other goroutine finish printing the panic trace.
// Once it does, it will exit. See issue 3934.
if(runtime·panicking)
- runtime·park(nil, nil, "panicwait");
+ runtime·park(nil, nil, runtime·gostringnocopy((byte*)"panicwait"));
runtime·exit(0);
for(;;)
void
runtime·goroutineheader(G *gp)
{
- int8 *status;
+ String status;
int64 waitfor;
switch(gp->status) {
case Gidle:
- status = "idle";
+ status = runtime·gostringnocopy((byte*)"idle");
break;
case Grunnable:
- status = "runnable";
+ status = runtime·gostringnocopy((byte*)"runnable");
break;
case Grunning:
- status = "running";
+ status = runtime·gostringnocopy((byte*)"running");
break;
case Gsyscall:
- status = "syscall";
+ status = runtime·gostringnocopy((byte*)"syscall");
break;
case Gwaiting:
- if(gp->waitreason)
+ if(gp->waitreason.str != nil)
status = gp->waitreason;
else
- status = "waiting";
+ status = runtime·gostringnocopy((byte*)"waiting");
break;
default:
- status = "???";
+ status = runtime·gostringnocopy((byte*)"???");
break;
}
if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince != 0)
waitfor = (runtime·nanotime() - gp->waitsince) / (60LL*1000*1000*1000);
- runtime·printf("goroutine %D [%s", gp->goid, status);
+ runtime·printf("goroutine %D [%S", gp->goid, status);
if(waitfor >= 1)
runtime·printf(", %D minutes", waitfor);
if(gp->lockedm != nil)
g->stackguard0 = StackPreempt;
}
+void
+runtime·ready_m(void)
+{
+ G *gp;
+
+ gp = g->m->ptrarg[0];
+ g->m->ptrarg[0] = nil;
+ runtime·ready(gp);
+}
+
int32
runtime·gcprocs(void)
{
// Puts the current goroutine into a waiting state and calls unlockf.
// If unlockf returns false, the goroutine is resumed.
void
-runtime·park(bool(*unlockf)(G*, void*), void *lock, int8 *reason)
+runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
{
if(g->status != Grunning)
runtime·throw("bad g status");
g->m->waitlock = lock;
g->m->waitunlockf = unlockf;
g->waitreason = reason;
- runtime·mcall(park0);
+ runtime·mcall(runtime·park_m);
}
-static bool
-parkunlock(G *gp, void *lock)
+bool
+runtime·parkunlock_c(G *gp, void *lock)
{
USED(gp);
runtime·unlock(lock);
// Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling runtime·ready(gp).
void
-runtime·parkunlock(Lock *lock, int8 *reason)
+runtime·parkunlock(Lock *lock, String reason)
{
- runtime·park(parkunlock, lock, reason);
+ runtime·park(runtime·parkunlock_c, lock, reason);
}
// runtime·park continuation on g0.
-static void
-park0(G *gp)
+void
+runtime·park_m(G *gp)
{
bool ok;
gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
gp->writenbuf = 0;
gp->writebuf = nil;
- gp->waitreason = nil;
+ gp->waitreason.str = nil;
+ gp->waitreason.len = 0;
gp->param = nil;
dropg();
gp = runtime·allg[gi];
mp = gp->m;
lockedm = gp->lockedm;
- runtime·printf(" G%D: status=%d(%s) m=%d lockedm=%d\n",
+ runtime·printf(" G%D: status=%d(%S) m=%d lockedm=%d\n",
gp->goid, gp->status, gp->waitreason, mp ? mp->id : -1,
lockedm ? lockedm->id : -1);
}
package runtime
+import "unsafe"
+
+const (
+ gStatusidle = iota
+ gStatusRunnable
+ gStatusRunning
+ gStatusSyscall
+ gStatusWaiting
+ gStatusMoribundUnused
+ gStatusDead
+)
+
+var parkunlock_c byte
+
// Gosched yields the processor, allowing other goroutines to run. It does not
// suspend the current goroutine, so execution resumes automatically.
func Gosched() {
mcall(&gosched_m)
}
+
+// Puts the current goroutine into a waiting state and calls unlockf.
+// If unlockf returns false, the goroutine is resumed.
+func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) {
+ mp := acquirem()
+ gp := mp.curg
+ if gp.status != gStatusRunning {
+ gothrow("gopark: bad g status")
+ }
+ mp.waitlock = lock
+ mp.waitunlockf = *(*func(*g, unsafe.Pointer) uint8)(unsafe.Pointer(&unlockf))
+ gp.waitreason = reason
+ releasem(mp)
+ // can't do anything that might move the G between Ms here.
+ mcall(&park_m)
+}
+
+// Puts the current goroutine into a waiting state and unlocks the lock.
+// The goroutine can be made runnable again by calling goready(gp).
+func goparkunlock(lock *lock, reason string) {
+ gopark(unsafe.Pointer(&parkunlock_c), unsafe.Pointer(lock), reason)
+}
+
+func goready(gp *g) {
+ mp := acquirem()
+ mp.ptrarg[0] = unsafe.Pointer(gp)
+ onM(&ready_m)
+ releasem(mp)
+}
+
+func goblockevent(cycles int64, skip int32) {
+ // TODO: convert to Go when we do mprof.goc
+ mp := acquirem()
+ mp.scalararg[0] = uint(uint32(cycles))
+ mp.scalararg[1] = uint(cycles >> 32)
+ mp.scalararg[2] = uint(skip)
+ onM(&blockevent_m)
+ releasem(mp)
+}
+
+//go:nosplit
+func acquireSudog() *sudog {
+ c := gomcache()
+ s := c.sudogcache
+ if s != nil {
+ c.sudogcache = s.link
+ return s
+ }
+ return new(sudog)
+}
+
+//go:nosplit
+func releaseSudog(s *sudog) {
+ c := gomcache()
+ s.link = c.sudogcache
+ c.sudogcache = s
+}
int16 status;
int64 goid;
int64 waitsince; // approx time when the G become blocked
- int8* waitreason; // if status==Gwaiting
+ String waitreason; // if status==Gwaiting
G* schedlink;
bool ispanic;
bool issystem; // do not output in stack dump
void runtime·gosched(void);
void runtime·gosched_m(G*);
void runtime·schedtrace(bool);
-void runtime·park(bool(*)(G*, void*), void*, int8*);
-void runtime·parkunlock(Lock*, int8*);
-void runtime·tsleep(int64, int8*);
+void runtime·park(bool(*)(G*, void*), void*, String);
+void runtime·parkunlock(Lock*, String);
+void runtime·tsleep(int64, String);
M* runtime·newm(void);
void runtime·goexit(void);
void runtime·asmcgocall(void (*fn)(void*), void*);
// Any semrelease after the cansemacquire knows we're waiting
// (we set nwait above), so go to sleep.
semqueue(root, addr, &s);
- runtime·parkunlock(&root->lock, "semacquire");
+ runtime·parkunlock(&root->lock, runtime·gostringnocopy((byte*)"semacquire"));
if(cansemacquire(addr)) {
if(t0)
runtime·blockevent(s.releasetime - t0, 3);
else
s->tail->next = &w;
s->tail = &w;
- runtime·parkunlock(&s->lock, "semacquire");
+ runtime·parkunlock(&s->lock, runtime·gostringnocopy((byte*)"semacquire"));
if(t0)
runtime·blockevent(w.releasetime - t0, 2);
}
else
s->tail->next = &w;
s->tail = &w;
- runtime·parkunlock(&s->lock, "semarelease");
+ runtime·parkunlock(&s->lock, runtime·gostringnocopy((byte*)"semarelease"));
} else
runtime·unlock(&s->lock);
}
gp->sched.ret = g->m->cret;
g->m->cret = 0; // drop reference
gp->status = Gwaiting;
- gp->waitreason = "stack unsplit";
+ gp->waitreason = runtime·gostringnocopy((byte*)"stack unsplit");
if(argsize > 0) {
sp -= argsize;
g->m->morebuf.lr = (uintptr)nil;
g->m->morebuf.sp = (uintptr)nil;
gp->status = Gwaiting;
- gp->waitreason = "stack growth";
+ gp->waitreason = runtime·gostringnocopy((byte*)"stack growth");
newstackcall = framesize==1;
if(newstackcall)
framesize = 0;
}
// in stubs.goc
+func getg() *g
func acquirem() *m
func releasem(mp *m)
func gomcache() *mcache
markallocated_m,
unrollgcprog_m,
unrollgcproginplace_m,
- gosched_m mFunction
+ gosched_m,
+ ready_m,
+ park_m,
+ blockevent_m mFunction
)
// memclr clears n bytes starting at ptr.
// gopersistentalloc allocates a permanent (not garbage collected)
// memory region of size n. Use wisely!
func gopersistentalloc(n uintptr) unsafe.Pointer
+
+func gocputicks() int64
ret = runtime·casp((void**)p, (void*)x, (void*)y);
}
+#pragma textflag NOSPLIT
+func runtime·getg() (ret *G) {
+ ret = g;
+}
+
#pragma textflag NOSPLIT
func runtime·acquirem() (ret *M) {
ret = g->m;
// Sleep puts the current goroutine to sleep for at least ns nanoseconds.
func Sleep(ns int64) {
- runtime·tsleep(ns, "sleep");
+ runtime·tsleep(ns, runtime·gostringnocopy((byte*)"sleep"));
}
// startTimer adds t to the timer heap.
// Put the current goroutine to sleep for ns nanoseconds.
void
-runtime·tsleep(int64 ns, int8 *reason)
+runtime·tsleep(int64 ns, String reason)
{
Timer t;
// No timers left - put goroutine to sleep.
timers.rescheduling = true;
g->isbackground = true;
- runtime·parkunlock(&timers.lock, "timer goroutine (idle)");
+ runtime·parkunlock(&timers.lock, runtime·gostringnocopy((byte*)"timer goroutine (idle)"));
g->isbackground = false;
continue;
}