MOVL gobuf_pc(BX), BX
JMP BX
-// void mcall(void (*fn)(G*))
+// func mcall(fn func(*g))
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
MOVL SI, g(CX) // g = m->g0
MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
PUSHL AX
+ MOVL DI, DX
+ MOVL 0(DI), DI
CALL DI
POPL AX
MOVL $runtime·badmcall2(SB), AX
TEXT runtime·switchtoM(SB), NOSPLIT, $0-4
RET
-// void onM(void (*fn)())
+// func onM(fn func())
// calls fn() on the M stack.
// switches to the M stack if not already on it, and
// switches back when fn() returns.
// call target function
ARGSIZE(0)
+ MOVL DI, DX
+ MOVL 0(DI), DI
CALL DI
// switch back to g
onm:
// already on m stack, just call directly
+ MOVL DI, DX
+ MOVL 0(DI), DI
CALL DI
RET
MOVQ gobuf_pc(BX), BX
JMP BX
-// void mcall(void (*fn)(G*))
+// func mcall(fn func(*g))
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
MOVQ (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
PUSHQ AX
ARGSIZE(8)
+ MOVQ DI, DX
+ MOVQ 0(DI), DI
CALL DI
POPQ AX
MOVQ $runtime·badmcall2(SB), AX
TEXT runtime·switchtoM(SB), NOSPLIT, $0-8
RET
-// void onM(void (*fn)())
+// func onM(fn func())
// calls fn() on the M stack.
// switches to the M stack if not already on it, and
// switches back when fn() returns.
// call target function
ARGSIZE(0)
+ MOVQ DI, DX
+ MOVQ 0(DI), DI
CALL DI
// switch back to g
onm:
// already on m stack, just call directly
+ MOVQ DI, DX
+ MOVQ 0(DI), DI
CALL DI
RET
MOVL gobuf_pc(BX), BX
JMP BX
-// void mcall(void (*fn)(G*))
+// func mcall(fn func(*g))
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
PUSHQ AX
ARGSIZE(8)
+ MOVL DI, DX
+ MOVL 0(DI), DI
CALL DI
POPQ AX
MOVL $runtime·badmcall2(SB), AX
TEXT runtime·switchtoM(SB), NOSPLIT, $0-4
RET
-// void onM(void (*fn)())
+// func onM(fn func())
// calls fn() on the M stack.
// switches to the M stack if not already on it, and
// switches back when fn() returns.
// call target function
ARGSIZE(0)
+ MOVL DI, DX
+ MOVL 0(DI), DI
CALL DI
// switch back to g
onm:
// already on m stack, just call directly
+ MOVL DI, DX
+ MOVL 0(DI), DI
CALL DI
RET
MOVW gobuf_pc(R1), R11
B (R11)
-// void mcall(void (*fn)(G*))
+// func mcall(fn func(*g))
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
MOVW (g_sched+gobuf_sp)(g), SP
SUB $8, SP
MOVW R1, 4(SP)
+ MOVW R0, R7
+ MOVW 0(R0), R0
BL (R0)
B runtime·badmcall2(SB)
RET
BL (R0) // clobber lr to ensure push {lr} is kept
RET
-// void onM(void (*fn)())
+// func onM(fn func())
// calls fn() on the M stack.
// switches to the M stack if not already on it, and
// switches back when fn() returns.
// call target function
ARGSIZE(0)
+ MOVW R0, R7
+ MOVW 0(R0), R0
BL (R0)
// switch back to g
RET
onm:
+ MOVW R0, R7
+ MOVW 0(R0), R0
BL (R0)
RET
Pushcnt uintptr
}
-var (
- lfstackpush_m,
- lfstackpop_m mFunction
-)
+func lfstackpush_m()
+func lfstackpop_m()
func LFStackPush(head *uint64, node *LFNode) {
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(head)
mp.ptrarg[1] = unsafe.Pointer(node)
- onM(&lfstackpush_m)
+ onM(lfstackpush_m)
releasem(mp)
}
func LFStackPop(head *uint64) *LFNode {
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(head)
- onM(&lfstackpop_m)
+ onM(lfstackpop_m)
node := (*LFNode)(unsafe.Pointer(mp.ptrarg[0]))
mp.ptrarg[0] = nil
releasem(mp)
wait bool
}
-var (
- newparfor_m,
- parforsetup_m,
- parfordo_m,
- parforiters_m mFunction
-)
+func newparfor_m()
+func parforsetup_m()
+func parfordo_m()
+func parforiters_m()
func NewParFor(nthrmax uint32) *ParFor {
mp := acquirem()
mp.scalararg[0] = uintptr(nthrmax)
- onM(&newparfor_m)
+ onM(newparfor_m)
desc := (*ParFor)(mp.ptrarg[0])
mp.ptrarg[0] = nil
releasem(mp)
if wait {
mp.scalararg[2] = 1
}
- onM(&parforsetup_m)
+ onM(parforsetup_m)
releasem(mp)
}
func ParForDo(desc *ParFor) {
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(desc)
- onM(&parfordo_m)
+ onM(parfordo_m)
releasem(mp)
}
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(desc)
mp.scalararg[0] = uintptr(tid)
- onM(&parforiters_m)
+ onM(parforiters_m)
begin := uint32(mp.scalararg[0])
end := uint32(mp.scalararg[1])
releasem(mp)
void
runtime∕debug·WriteHeapDump(uintptr fd)
{
+ void (*fn)(G*);
+
// Stop the world.
runtime·semacquire(&runtime·worldsema, false);
g->m->gcing = 1;
// Call dump routine on M stack.
runtime·casgstatus(g, Grunning, Gwaiting);
g->waitreason = runtime·gostringnocopy((byte*)"dumping heap");
- runtime·mcall(mdump);
+ fn = mdump;
+ runtime·mcall(&fn);
// Reset dump file.
dumpfd = 0;
if v == nil {
mp := acquirem()
mp.scalararg[0] = tinySizeClass
- onM(&mcacheRefill_m)
+ onM(mcacheRefill_m)
releasem(mp)
s = c.alloc[tinySizeClass]
v = s.freelist
if v == nil {
mp := acquirem()
mp.scalararg[0] = uintptr(sizeclass)
- onM(&mcacheRefill_m)
+ onM(mcacheRefill_m)
releasem(mp)
s = c.alloc[sizeclass]
v = s.freelist
mp := acquirem()
mp.scalararg[0] = uintptr(size)
mp.scalararg[1] = uintptr(flags)
- onM(&largeAlloc_m)
+ onM(largeAlloc_m)
s = (*mspan)(mp.ptrarg[0])
mp.ptrarg[0] = nil
releasem(mp)
mp.ptrarg[1] = unsafe.Pointer(typ)
mp.scalararg[0] = uintptr(size)
mp.scalararg[1] = uintptr(size0)
- onM(&unrollgcproginplace_m)
+ onM(unrollgcproginplace_m)
releasem(mp)
goto marked
}
if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 {
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(typ)
- onM(&unrollgcprog_m)
+ onM(unrollgcprog_m)
releasem(mp)
}
ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte
} else {
mp.scalararg[2] = 0
}
- onM(&gc_m)
+ onM(gc_m)
}
// all done
// switch to M stack and remove finalizer
mp := acquirem()
mp.ptrarg[0] = e.data
- onM(&removeFinalizer_m)
+ onM(removeFinalizer_m)
releasem(mp)
return
}
mp.scalararg[0] = nret
mp.ptrarg[2] = unsafe.Pointer(fint)
mp.ptrarg[3] = unsafe.Pointer(ot)
- onM(&setFinalizer_m)
+ onM(setFinalizer_m)
if mp.scalararg[0] != 1 {
gothrow("runtime.SetFinalizer: finalizer already set")
}
void
runtime·freemcache(MCache *c)
{
+ void (*fn)(G*);
+
g->m->ptrarg[0] = c;
- runtime·mcall(freemcache_m);
+ fn = freemcache_m;
+ runtime·mcall(&fn);
}
// Gets a span that has a free object in it and assigns it
int32 i;
uint64 smallfree;
uint64 *src, *dst;
+ void (*fn)(G*);
if(stats)
runtime·memclr((byte*)stats, sizeof(*stats));
// Flush MCache's to MCentral.
if(g == g->m->g0)
flushallmcaches();
- else
- runtime·mcall(flushallmcaches_m);
+ else {
+ fn = flushallmcaches_m;
+ runtime·mcall(&fn);
+ }
// Aggregate local stats.
cachestats();
func freeOSMemory() {
gogc(2) // force GC and do eager sweep
- onM(&scavenge_m)
+ onM(scavenge_m)
}
var poolcleanup func()
runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
{
MSpan *s;
+ void (*fn)(G*);
// Don't do any operations that lock the heap on the G stack.
// It might trigger stack growth, and the stack growth code needs
g->m->scalararg[0] = npage;
g->m->scalararg[1] = sizeclass;
g->m->scalararg[2] = large;
- runtime·mcall(mheap_alloc_m);
+ fn = mheap_alloc_m;
+ runtime·mcall(&fn);
s = g->m->ptrarg[0];
g->m->ptrarg[0] = nil;
}
void
runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
{
+ void (*fn)(G*);
+
if(g == g->m->g0) {
mheap_free(h, s, acct);
} else {
g->m->ptrarg[0] = h;
g->m->ptrarg[1] = s;
g->m->scalararg[0] = acct;
- runtime·mcall(mheap_free_m);
+ fn = mheap_free_m;
+ runtime·mcall(&fn);
}
}
Defer *d, dabort;
Panic p;
uintptr pc, argp;
+ void (*fn)(G*);
runtime·memclr((byte*)&p, sizeof p);
p.arg = e;
// Pass information about recovering frame to recovery.
g->sigcode0 = (uintptr)argp;
g->sigcode1 = (uintptr)pc;
- runtime·mcall(recovery);
+ fn = recovery;
+ runtime·mcall(&fn);
runtime·throw("recovery failed"); // mcall should not return
}
}
void
runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
{
+ void (*fn)(G*);
+
g->m->waitlock = lock;
g->m->waitunlockf = unlockf;
g->waitreason = reason;
- runtime·mcall(runtime·park_m);
+ fn = runtime·park_m;
+ runtime·mcall(&fn);
}
bool
void
runtime·gosched(void)
{
- runtime·mcall(runtime·gosched_m);
+ void (*fn)(G*);
+
+ fn = runtime·gosched_m;
+ runtime·mcall(&fn);
}
// runtime·gosched continuation on g0.
void
runtime·goexit(void)
{
+ void (*fn)(G*);
+
if(raceenabled)
runtime·racegoend();
- runtime·mcall(goexit0);
+ fn = goexit0;
+ runtime·mcall(&fn);
}
// runtime·goexit continuation on g0.
void
runtime·exitsyscall(void)
{
+ void (*fn)(G*);
+
g->m->locks++; // see comment in entersyscall
g->waitsince = 0;
g->m->locks--;
// Call the scheduler.
- runtime·mcall(exitsyscall0);
+ fn = exitsyscall0;
+ runtime·mcall(&fn);
// Scheduler returned, so we're allowed to run now.
// Delete the gcstack information that we left for
{
G *newg;
byte *stk;
+ void (*fn)(G*);
if(StackTop < sizeof(Stktop)) {
runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (int32)StackTop, (int32)sizeof(Stktop));
// have to call stackalloc on scheduler stack.
newg->stacksize = stacksize;
g->param = newg;
- runtime·mcall(mstackalloc);
+ fn = mstackalloc;
+ runtime·mcall(&fn);
stk = g->param;
g->param = nil;
}
runtime·newproc(int32 siz, FuncVal* fn, ...)
{
byte *argp;
+ void (*mfn)(void);
if(thechar == '5')
argp = (byte*)(&fn+2); // skip caller's saved LR
g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz);
g->m->ptrarg[0] = argp;
g->m->ptrarg[1] = fn;
- runtime·onM(newproc_m);
+ mfn = newproc_m;
+ runtime·onM(&mfn);
g->m->locks--;
}
{
G *gp;
byte *stk;
+ void (*fn)(G*);
retry:
gp = p->gfree;
} else {
gp->stacksize = FixedStack;
g->param = gp;
- runtime·mcall(mstackalloc);
+ fn = mstackalloc;
+ runtime·mcall(&fn);
stk = g->param;
g->param = nil;
}
// Gosched yields the processor, allowing other goroutines to run. It does not
// suspend the current goroutine, so execution resumes automatically.
func Gosched() {
- mcall(&gosched_m)
+ mcall(gosched_m)
}
func readgStatus(gp *g) uint32 {
gp.waitreason = reason
releasem(mp)
// can't do anything that might move the G between Ms here.
- mcall(&park_m)
+ mcall(park_m)
}
// Puts the current goroutine into a waiting state and unlocks the lock.
func goready(gp *g) {
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(gp)
- onM(&ready_m)
+ onM(ready_m)
releasem(mp)
}
func setGCPercent(in int32) (out int32) {
mp := acquirem()
mp.scalararg[0] = uintptr(int(in))
- onM(&setgcpercent_m)
+ onM(setgcpercent_m)
out = int32(int(mp.scalararg[0]))
releasem(mp)
return out
func setMaxThreads(in int) (out int) {
mp := acquirem()
mp.scalararg[0] = uintptr(in)
- onM(&setmaxthreads_m)
+ onM(setmaxthreads_m)
out = int(mp.scalararg[0])
releasem(mp)
return out
uintptr runtime·getcallersp(void*);
int32 runtime·mcount(void);
int32 runtime·gcount(void);
-void runtime·mcall(void(*)(G*));
-void runtime·onM(void(*)(void));
+void runtime·mcall(void(**)(G*));
+void runtime·onM(void(**)(void));
uint32 runtime·fastrand1(void);
void runtime·rewindmorestack(Gobuf*);
int32 runtime·timediv(int64, int32, int32*);
func signal_recv() (m uint32) {
for {
mp := acquirem()
- onM(&signal_recv_m)
+ onM(signal_recv_m)
ok := mp.scalararg[0] != 0
m = uint32(mp.scalararg[1])
releasem(mp)
func signal_enable(s uint32) {
mp := acquirem()
mp.scalararg[0] = uintptr(s)
- onM(&signal_enable_m)
+ onM(signal_enable_m)
releasem(mp)
}
func signal_disable(s uint32) {
mp := acquirem()
mp.scalararg[0] = uintptr(s)
- onM(&signal_disable_m)
+ onM(signal_disable_m)
releasem(mp)
}
-var (
- signal_recv_m,
- signal_enable_m,
- signal_disable_m mFunction
-)
+func signal_recv_m()
+func signal_enable_m()
+func signal_disable_m()
func releasem(mp *m)
func gomcache() *mcache
-// An mFunction represents a C function that runs on the M stack. It
-// can be called from Go using mcall or onM. Through the magic of
-// linking, an mFunction variable and the corresponding C code entry
-// point live at the same address.
-type mFunction byte
-
// in asm_*.s
-func mcall(fn *mFunction)
-func onM(fn *mFunction)
-
-// C functions that run on the M stack. Call these like
-// mcall(&mcacheRefill_m)
-// Arguments should be passed in m->scalararg[x] and
-// m->ptrarg[x]. Return values can be passed in those
-// same slots.
-var (
- mcacheRefill_m,
- largeAlloc_m,
- gc_m,
- scavenge_m,
- setFinalizer_m,
- removeFinalizer_m,
- markallocated_m,
- unrollgcprog_m,
- unrollgcproginplace_m,
- gosched_m,
- setgcpercent_m,
- setmaxthreads_m,
- ready_m,
- park_m mFunction
-)
+func mcall(func(*g))
+func onM(fn func())
+
+// C functions that run on the M stack.
+// Call using mcall.
+// These functions need to be written to arrange explicitly
+// for the goroutine to continue execution.
+func gosched_m(*g)
+func park_m(*g)
+
+// More C functions that run on the M stack.
+// Call using onM.
+// Arguments should be passed in m->scalararg[x] and m->ptrarg[x].
+// Return values can be passed in those same slots.
+// These functions return to the goroutine when they return.
+func mcacheRefill_m()
+func largeAlloc_m()
+func gc_m()
+func scavenge_m()
+func setFinalizer_m()
+func removeFinalizer_m()
+func markallocated_m()
+func unrollgcprog_m()
+func unrollgcproginplace_m()
+func setgcpercent_m()
+func setmaxthreads_m()
+func ready_m()
// memclr clears n bytes starting at ptr.
// in memclr_*.s