]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: make onM and mcall take Go func values
authorRuss Cox <rsc@golang.org>
Wed, 3 Sep 2014 15:35:22 +0000 (11:35 -0400)
committerRuss Cox <rsc@golang.org>
Wed, 3 Sep 2014 15:35:22 +0000 (11:35 -0400)
This gives them correct types in Go and also makes it
possible to use them to run Go code on an m stack.

LGTM=iant
R=golang-codereviews, dave, iant
CC=dvyukov, golang-codereviews, khr, r
https://golang.org/cl/137970044

18 files changed:
src/pkg/runtime/asm_386.s
src/pkg/runtime/asm_amd64.s
src/pkg/runtime/asm_amd64p32.s
src/pkg/runtime/asm_arm.s
src/pkg/runtime/export_test.go
src/pkg/runtime/heapdump.c
src/pkg/runtime/malloc.go
src/pkg/runtime/mcache.c
src/pkg/runtime/mgc0.c
src/pkg/runtime/mgc0.go
src/pkg/runtime/mheap.c
src/pkg/runtime/panic.c
src/pkg/runtime/proc.c
src/pkg/runtime/proc.go
src/pkg/runtime/rdebug.go
src/pkg/runtime/runtime.h
src/pkg/runtime/sigqueue.go
src/pkg/runtime/stubs.go

index e99c114ad759fdacc9bda9fb7089dda3f066c1a1..07158ef0fdbd01319110f167c4baa31149b3e037 100644 (file)
@@ -162,7 +162,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-4
        MOVL    gobuf_pc(BX), BX
        JMP     BX
 
-// void mcall(void (*fn)(G*))
+// func mcall(fn func(*g))
 // Switch to m->g0's stack, call fn(g).
 // Fn must never return.  It should gogo(&g->sched)
 // to keep running g.
@@ -188,6 +188,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
        MOVL    SI, g(CX)       // g = m->g0
        MOVL    (g_sched+gobuf_sp)(SI), SP      // sp = m->g0->sched.sp
        PUSHL   AX
+       MOVL    DI, DX
+       MOVL    0(DI), DI
        CALL    DI
        POPL    AX
        MOVL    $runtime·badmcall2(SB), AX
@@ -202,7 +204,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
 TEXT runtime·switchtoM(SB), NOSPLIT, $0-4
        RET
 
-// void onM(void (*fn)())
+// func onM(fn func())
 // calls fn() on the M stack.
 // switches to the M stack if not already on it, and
 // switches back when fn() returns.
@@ -227,6 +229,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
 
        // call target function
        ARGSIZE(0)
+       MOVL    DI, DX
+       MOVL    0(DI), DI
        CALL    DI
 
        // switch back to g
@@ -241,6 +245,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
 
 onm:
        // already on m stack, just call directly
+       MOVL    DI, DX
+       MOVL    0(DI), DI
        CALL    DI
        RET
 
index 0933fa92c77f3aa5834349a54a956e67d2b6e69e..1d98fc26526ec26b0c63e532e1cdef3ab530d3cc 100644 (file)
@@ -153,7 +153,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-8
        MOVQ    gobuf_pc(BX), BX
        JMP     BX
 
-// void mcall(void (*fn)(G*))
+// func mcall(fn func(*g))
 // Switch to m->g0's stack, call fn(g).
 // Fn must never return.  It should gogo(&g->sched)
 // to keep running g.
@@ -180,6 +180,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8
        MOVQ    (g_sched+gobuf_sp)(SI), SP      // sp = m->g0->sched.sp
        PUSHQ   AX
        ARGSIZE(8)
+       MOVQ    DI, DX
+       MOVQ    0(DI), DI
        CALL    DI
        POPQ    AX
        MOVQ    $runtime·badmcall2(SB), AX
@@ -194,7 +196,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8
 TEXT runtime·switchtoM(SB), NOSPLIT, $0-8
        RET
 
-// void onM(void (*fn)())
+// func onM(fn func())
 // calls fn() on the M stack.
 // switches to the M stack if not already on it, and
 // switches back when fn() returns.
@@ -220,6 +222,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-8
 
        // call target function
        ARGSIZE(0)
+       MOVQ    DI, DX
+       MOVQ    0(DI), DI
        CALL    DI
 
        // switch back to g
@@ -234,6 +238,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-8
 
 onm:
        // already on m stack, just call directly
+       MOVQ    DI, DX
+       MOVQ    0(DI), DI
        CALL    DI
        RET
 
index 4a391033d1cb4505da2ca0b9a6fd8c7174c1422e..20069a6c7ecc1305de343f65379a200b44497d29 100644 (file)
@@ -131,7 +131,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-4
        MOVL    gobuf_pc(BX), BX
        JMP     BX
 
-// void mcall(void (*fn)(G*))
+// func mcall(fn func(*g))
 // Switch to m->g0's stack, call fn(g).
 // Fn must never return.  It should gogo(&g->sched)
 // to keep running g.
@@ -158,6 +158,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
        MOVL    (g_sched+gobuf_sp)(SI), SP      // sp = m->g0->sched.sp
        PUSHQ   AX
        ARGSIZE(8)
+       MOVL    DI, DX
+       MOVL    0(DI), DI
        CALL    DI
        POPQ    AX
        MOVL    $runtime·badmcall2(SB), AX
@@ -172,7 +174,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
 TEXT runtime·switchtoM(SB), NOSPLIT, $0-4
        RET
 
-// void onM(void (*fn)())
+// func onM(fn func())
 // calls fn() on the M stack.
 // switches to the M stack if not already on it, and
 // switches back when fn() returns.
@@ -198,6 +200,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
 
        // call target function
        ARGSIZE(0)
+       MOVL    DI, DX
+       MOVL    0(DI), DI
        CALL    DI
 
        // switch back to g
@@ -212,6 +216,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
 
 onm:
        // already on m stack, just call directly
+       MOVL    DI, DX
+       MOVL    0(DI), DI
        CALL    DI
        RET
 
index 6954bb7c05e7812caac6d4b94d7093a521e036d6..6e12cf60f356ebf36bfa93827bc51d07734758a2 100644 (file)
@@ -147,7 +147,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $-4-4
        MOVW    gobuf_pc(R1), R11
        B       (R11)
 
-// void mcall(void (*fn)(G*))
+// func mcall(fn func(*g))
 // Switch to m->g0's stack, call fn(g).
 // Fn must never return.  It should gogo(&g->sched)
 // to keep running g.
@@ -173,6 +173,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $-4-4
        MOVW    (g_sched+gobuf_sp)(g), SP
        SUB     $8, SP
        MOVW    R1, 4(SP)
+       MOVW    R0, R7
+       MOVW    0(R0), R0
        BL      (R0)
        B       runtime·badmcall2(SB)
        RET
@@ -187,7 +189,7 @@ TEXT runtime·switchtoM(SB), NOSPLIT, $0-4
        BL      (R0) // clobber lr to ensure push {lr} is kept
        RET
 
-// void onM(void (*fn)())
+// func onM(fn func())
 // calls fn() on the M stack.
 // switches to the M stack if not already on it, and
 // switches back when fn() returns.
@@ -213,6 +215,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
 
        // call target function
        ARGSIZE(0)
+       MOVW    R0, R7
+       MOVW    0(R0), R0
        BL      (R0)
 
        // switch back to g
@@ -224,6 +228,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
        RET
 
 onm:
+       MOVW    R0, R7
+       MOVW    0(R0), R0
        BL      (R0)
        RET
 
index cce9afbef993d9e26f96d49211772094410e1a4f..35a4130ee160c138590f483a4c5e40fc0d38bfc2 100644 (file)
@@ -31,23 +31,21 @@ type LFNode struct {
        Pushcnt uintptr
 }
 
-var (
-       lfstackpush_m,
-       lfstackpop_m mFunction
-)
+func lfstackpush_m()
+func lfstackpop_m()
 
 func LFStackPush(head *uint64, node *LFNode) {
        mp := acquirem()
        mp.ptrarg[0] = unsafe.Pointer(head)
        mp.ptrarg[1] = unsafe.Pointer(node)
-       onM(&lfstackpush_m)
+       onM(lfstackpush_m)
        releasem(mp)
 }
 
 func LFStackPop(head *uint64) *LFNode {
        mp := acquirem()
        mp.ptrarg[0] = unsafe.Pointer(head)
-       onM(&lfstackpop_m)
+       onM(lfstackpop_m)
        node := (*LFNode)(unsafe.Pointer(mp.ptrarg[0]))
        mp.ptrarg[0] = nil
        releasem(mp)
@@ -65,17 +63,15 @@ type ParFor struct {
        wait    bool
 }
 
-var (
-       newparfor_m,
-       parforsetup_m,
-       parfordo_m,
-       parforiters_m mFunction
-)
+func newparfor_m()
+func parforsetup_m()
+func parfordo_m()
+func parforiters_m()
 
 func NewParFor(nthrmax uint32) *ParFor {
        mp := acquirem()
        mp.scalararg[0] = uintptr(nthrmax)
-       onM(&newparfor_m)
+       onM(newparfor_m)
        desc := (*ParFor)(mp.ptrarg[0])
        mp.ptrarg[0] = nil
        releasem(mp)
@@ -93,14 +89,14 @@ func ParForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*
        if wait {
                mp.scalararg[2] = 1
        }
-       onM(&parforsetup_m)
+       onM(parforsetup_m)
        releasem(mp)
 }
 
 func ParForDo(desc *ParFor) {
        mp := acquirem()
        mp.ptrarg[0] = unsafe.Pointer(desc)
-       onM(&parfordo_m)
+       onM(parfordo_m)
        releasem(mp)
 }
 
@@ -108,7 +104,7 @@ func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) {
        mp := acquirem()
        mp.ptrarg[0] = unsafe.Pointer(desc)
        mp.scalararg[0] = uintptr(tid)
-       onM(&parforiters_m)
+       onM(parforiters_m)
        begin := uint32(mp.scalararg[0])
        end := uint32(mp.scalararg[1])
        releasem(mp)
index 63ffe68066fa5d3137f9f068c51332c833086cbd..83c2be2ac79cb784b3d2764b4188b1b4ff3bfb43 100644 (file)
@@ -746,6 +746,8 @@ mdump(G *gp)
 void
 runtime∕debug·WriteHeapDump(uintptr fd)
 {
+       void (*fn)(G*);
+
        // Stop the world.
        runtime·semacquire(&runtime·worldsema, false);
        g->m->gcing = 1;
@@ -762,7 +764,8 @@ runtime∕debug·WriteHeapDump(uintptr fd)
        // Call dump routine on M stack.
        runtime·casgstatus(g, Grunning, Gwaiting);
        g->waitreason = runtime·gostringnocopy((byte*)"dumping heap");
-       runtime·mcall(mdump);
+       fn = mdump;
+       runtime·mcall(&fn);
 
        // Reset dump file.
        dumpfd = 0;
index 84587a36d6bd4ae3b5ae3d2fa749b64ac40ebaca..dbe37c81086c8f04df6e3ea6cee234bb745efe8b 100644 (file)
@@ -144,7 +144,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
                        if v == nil {
                                mp := acquirem()
                                mp.scalararg[0] = tinySizeClass
-                               onM(&mcacheRefill_m)
+                               onM(mcacheRefill_m)
                                releasem(mp)
                                s = c.alloc[tinySizeClass]
                                v = s.freelist
@@ -175,7 +175,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
                        if v == nil {
                                mp := acquirem()
                                mp.scalararg[0] = uintptr(sizeclass)
-                               onM(&mcacheRefill_m)
+                               onM(mcacheRefill_m)
                                releasem(mp)
                                s = c.alloc[sizeclass]
                                v = s.freelist
@@ -196,7 +196,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
                mp := acquirem()
                mp.scalararg[0] = uintptr(size)
                mp.scalararg[1] = uintptr(flags)
-               onM(&largeAlloc_m)
+               onM(largeAlloc_m)
                s = (*mspan)(mp.ptrarg[0])
                mp.ptrarg[0] = nil
                releasem(mp)
@@ -246,7 +246,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
                                mp.ptrarg[1] = unsafe.Pointer(typ)
                                mp.scalararg[0] = uintptr(size)
                                mp.scalararg[1] = uintptr(size0)
-                               onM(&unrollgcproginplace_m)
+                               onM(unrollgcproginplace_m)
                                releasem(mp)
                                goto marked
                        }
@@ -255,7 +255,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
                        if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 {
                                mp := acquirem()
                                mp.ptrarg[0] = unsafe.Pointer(typ)
-                               onM(&unrollgcprog_m)
+                               onM(unrollgcprog_m)
                                releasem(mp)
                        }
                        ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte
@@ -459,7 +459,7 @@ func gogc(force int32) {
                } else {
                        mp.scalararg[2] = 0
                }
-               onM(&gc_m)
+               onM(gc_m)
        }
 
        // all done
@@ -571,7 +571,7 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
                // switch to M stack and remove finalizer
                mp := acquirem()
                mp.ptrarg[0] = e.data
-               onM(&removeFinalizer_m)
+               onM(removeFinalizer_m)
                releasem(mp)
                return
        }
@@ -624,7 +624,7 @@ okarg:
        mp.scalararg[0] = nret
        mp.ptrarg[2] = unsafe.Pointer(fint)
        mp.ptrarg[3] = unsafe.Pointer(ot)
-       onM(&setFinalizer_m)
+       onM(setFinalizer_m)
        if mp.scalararg[0] != 1 {
                gothrow("runtime.SetFinalizer: finalizer already set")
        }
index e17bd2144f76e8afac170cabcd44148556cae8fd..8e98890e8e3f0e50b6ae72c7129461900078aaa1 100644 (file)
@@ -65,8 +65,11 @@ freemcache_m(G *gp)
 void
 runtime·freemcache(MCache *c)
 {
+       void (*fn)(G*);
+
        g->m->ptrarg[0] = c;
-       runtime·mcall(freemcache_m);
+       fn = freemcache_m;
+       runtime·mcall(&fn);
 }
 
 // Gets a span that has a free object in it and assigns it
index 3e22acc5241ee9f41dc29b462ebcfe0b094103e0..2ae23e8bf0e1b28981438532dfe970b17f96e278 100644 (file)
@@ -1141,6 +1141,7 @@ runtime·updatememstats(GCStats *stats)
        int32 i;
        uint64 smallfree;
        uint64 *src, *dst;
+       void (*fn)(G*);
 
        if(stats)
                runtime·memclr((byte*)stats, sizeof(*stats));
@@ -1177,8 +1178,10 @@ runtime·updatememstats(GCStats *stats)
        // Flush MCache's to MCentral.
        if(g == g->m->g0)
                flushallmcaches();
-       else
-               runtime·mcall(flushallmcaches_m);
+       else {
+               fn = flushallmcaches_m;
+               runtime·mcall(&fn);
+       }
 
        // Aggregate local stats.
        cachestats();
index 93af63e63e937905315cae682df56f14f5612e84..2d9d76a4749b012736acae5ca8bb16bfe898f593 100644 (file)
@@ -37,7 +37,7 @@ func gc_unixnanotime(now *int64) {
 
 func freeOSMemory() {
        gogc(2) // force GC and do eager sweep
-       onM(&scavenge_m)
+       onM(scavenge_m)
 }
 
 var poolcleanup func()
index 93f33f21cfbc30dc14ee4cabba3c64e416288581..0050e96556a9c4ae98267c71aaff9512d10cc1c9 100644 (file)
@@ -229,6 +229,7 @@ MSpan*
 runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
 {
        MSpan *s;
+       void (*fn)(G*);
 
        // Don't do any operations that lock the heap on the G stack.
        // It might trigger stack growth, and the stack growth code needs
@@ -240,7 +241,8 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool
                g->m->scalararg[0] = npage;
                g->m->scalararg[1] = sizeclass;
                g->m->scalararg[2] = large;
-               runtime·mcall(mheap_alloc_m);
+               fn = mheap_alloc_m;
+               runtime·mcall(&fn);
                s = g->m->ptrarg[0];
                g->m->ptrarg[0] = nil;
        }
@@ -488,13 +490,16 @@ mheap_free_m(G *gp)
 void
 runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
 {
+       void (*fn)(G*);
+
        if(g == g->m->g0) {
                mheap_free(h, s, acct);
        } else {
                g->m->ptrarg[0] = h;
                g->m->ptrarg[1] = s;
                g->m->scalararg[0] = acct;
-               runtime·mcall(mheap_free_m);
+               fn = mheap_free_m;
+               runtime·mcall(&fn);
        }
 }
 
index ecf411133763eb1b00c4e8e1f859553b073ddfca..4b6829e011025cb41568ef75637125819fd76e74 100644 (file)
@@ -214,6 +214,7 @@ runtime·panic(Eface e)
        Defer *d, dabort;
        Panic p;
        uintptr pc, argp;
+       void (*fn)(G*);
 
        runtime·memclr((byte*)&p, sizeof p);
        p.arg = e;
@@ -266,7 +267,8 @@ runtime·panic(Eface e)
                        // Pass information about recovering frame to recovery.
                        g->sigcode0 = (uintptr)argp;
                        g->sigcode1 = (uintptr)pc;
-                       runtime·mcall(recovery);
+                       fn = recovery;
+                       runtime·mcall(&fn);
                        runtime·throw("recovery failed"); // mcall should not return
                }
        }
index b85baca14d223b6c57f17440f68393c28d2687e0..bc15d822cbc6dc56386b0162dcfa387699f33654 100644 (file)
@@ -1439,10 +1439,13 @@ dropg(void)
 void
 runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
 {
+       void (*fn)(G*);
+
        g->m->waitlock = lock;
        g->m->waitunlockf = unlockf;
        g->waitreason = reason;
-       runtime·mcall(runtime·park_m);
+       fn = runtime·park_m;
+       runtime·mcall(&fn);
 }
 
 bool
@@ -1487,7 +1490,10 @@ runtime·park_m(G *gp)
 void
 runtime·gosched(void)
 {
-       runtime·mcall(runtime·gosched_m);
+       void (*fn)(G*);
+       
+       fn = runtime·gosched_m;
+       runtime·mcall(&fn);
 }
 
 // runtime·gosched continuation on g0.
@@ -1518,9 +1524,12 @@ runtime·gosched_m(G *gp)
 void
 runtime·goexit(void)
 {
+       void (*fn)(G*);
+
        if(raceenabled)
                runtime·racegoend();
-       runtime·mcall(goexit0);
+       fn = goexit0;
+       runtime·mcall(&fn);
 }
 
 // runtime·goexit continuation on g0.
@@ -1689,6 +1698,8 @@ runtime·entersyscallblock_m(void)
 void
 runtime·exitsyscall(void)
 {
+       void (*fn)(G*);
+
        g->m->locks++;  // see comment in entersyscall
 
        g->waitsince = 0;
@@ -1716,7 +1727,8 @@ runtime·exitsyscall(void)
        g->m->locks--;
 
        // Call the scheduler.
-       runtime·mcall(exitsyscall0);
+       fn = exitsyscall0;
+       runtime·mcall(&fn);
 
        // Scheduler returned, so we're allowed to run now.
        // Delete the gcstack information that we left for
@@ -1858,6 +1870,7 @@ runtime·malg(int32 stacksize)
 {
        G *newg;
        byte *stk;
+       void (*fn)(G*);
 
        if(StackTop < sizeof(Stktop)) {
                runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (int32)StackTop, (int32)sizeof(Stktop));
@@ -1874,7 +1887,8 @@ runtime·malg(int32 stacksize)
                        // have to call stackalloc on scheduler stack.
                        newg->stacksize = stacksize;
                        g->param = newg;
-                       runtime·mcall(mstackalloc);
+                       fn = mstackalloc;
+                       runtime·mcall(&fn);
                        stk = g->param;
                        g->param = nil;
                }
@@ -1915,6 +1929,7 @@ void
 runtime·newproc(int32 siz, FuncVal* fn, ...)
 {
        byte *argp;
+       void (*mfn)(void);
 
        if(thechar == '5')
                argp = (byte*)(&fn+2);  // skip caller's saved LR
@@ -1926,7 +1941,8 @@ runtime·newproc(int32 siz, FuncVal* fn, ...)
        g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz);
        g->m->ptrarg[0] = argp;
        g->m->ptrarg[1] = fn;
-       runtime·onM(newproc_m);
+       mfn = newproc_m;
+       runtime·onM(&mfn);
        g->m->locks--;
 }
 
@@ -2090,6 +2106,7 @@ gfget(P *p)
 {
        G *gp;
        byte *stk;
+       void (*fn)(G*);
 
 retry:
        gp = p->gfree;
@@ -2117,7 +2134,8 @@ retry:
                        } else {
                                gp->stacksize = FixedStack;
                                g->param = gp;
-                               runtime·mcall(mstackalloc);
+                               fn = mstackalloc;
+                               runtime·mcall(&fn);
                                stk = g->param;
                                g->param = nil;
                        }
index a36b931b88a6f922454bb34f533e056f3b150f5f..f060640a2626cb8fe3abea3ea19adf1052f97460 100644 (file)
@@ -55,7 +55,7 @@ func forcegchelper() {
 // Gosched yields the processor, allowing other goroutines to run.  It does not
 // suspend the current goroutine, so execution resumes automatically.
 func Gosched() {
-       mcall(&gosched_m)
+       mcall(gosched_m)
 }
 
 func readgStatus(gp *g) uint32 {
@@ -77,7 +77,7 @@ func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) {
        gp.waitreason = reason
        releasem(mp)
        // can't do anything that might move the G between Ms here.
-       mcall(&park_m)
+       mcall(park_m)
 }
 
 // Puts the current goroutine into a waiting state and unlocks the lock.
@@ -89,7 +89,7 @@ func goparkunlock(lock *mutex, reason string) {
 func goready(gp *g) {
        mp := acquirem()
        mp.ptrarg[0] = unsafe.Pointer(gp)
-       onM(&ready_m)
+       onM(ready_m)
        releasem(mp)
 }
 
index eef0f281fd3cab2f8e709bdf5685a694fc8d4f7a..e5e691122c63a15a2033083da3b57611e4f66052 100644 (file)
@@ -13,7 +13,7 @@ func setMaxStack(in int) (out int) {
 func setGCPercent(in int32) (out int32) {
        mp := acquirem()
        mp.scalararg[0] = uintptr(int(in))
-       onM(&setgcpercent_m)
+       onM(setgcpercent_m)
        out = int32(int(mp.scalararg[0]))
        releasem(mp)
        return out
@@ -30,7 +30,7 @@ func setPanicOnFault(new bool) (old bool) {
 func setMaxThreads(in int) (out int) {
        mp := acquirem()
        mp.scalararg[0] = uintptr(in)
-       onM(&setmaxthreads_m)
+       onM(setmaxthreads_m)
        out = int(mp.scalararg[0])
        releasem(mp)
        return out
index 3cc6f9a81e41f05693ea0fa3905840b9b924a90e..d67d7a0076de14e4c28baae69d211a635255a0fb 100644 (file)
@@ -809,8 +809,8 @@ void        runtime·runpanic(Panic*);
 uintptr        runtime·getcallersp(void*);
 int32  runtime·mcount(void);
 int32  runtime·gcount(void);
-void   runtime·mcall(void(*)(G*));
-void   runtime·onM(void(*)(void));
+void   runtime·mcall(void(**)(G*));
+void   runtime·onM(void(**)(void));
 uint32 runtime·fastrand1(void);
 void   runtime·rewindmorestack(Gobuf*);
 int32  runtime·timediv(int64, int32, int32*);
index c51ede02654dfd914de1f8ece5c8620152c546a9..4643559705f8b114e855bb1ae661e442218638ef 100644 (file)
@@ -9,7 +9,7 @@ package runtime
 func signal_recv() (m uint32) {
        for {
                mp := acquirem()
-               onM(&signal_recv_m)
+               onM(signal_recv_m)
                ok := mp.scalararg[0] != 0
                m = uint32(mp.scalararg[1])
                releasem(mp)
@@ -24,19 +24,17 @@ func signal_recv() (m uint32) {
 func signal_enable(s uint32) {
        mp := acquirem()
        mp.scalararg[0] = uintptr(s)
-       onM(&signal_enable_m)
+       onM(signal_enable_m)
        releasem(mp)
 }
 
 func signal_disable(s uint32) {
        mp := acquirem()
        mp.scalararg[0] = uintptr(s)
-       onM(&signal_disable_m)
+       onM(signal_disable_m)
        releasem(mp)
 }
 
-var (
-       signal_recv_m,
-       signal_enable_m,
-       signal_disable_m mFunction
-)
+func signal_recv_m()
+func signal_enable_m()
+func signal_disable_m()
index 86dc47f4ab822fe4b95b6e9fc6a94d6c41169e23..b002da98b423f1fa60f90d5001dd57ca694ad66f 100644 (file)
@@ -57,37 +57,34 @@ func acquirem() *m
 func releasem(mp *m)
 func gomcache() *mcache
 
-// An mFunction represents a C function that runs on the M stack.  It
-// can be called from Go using mcall or onM.  Through the magic of
-// linking, an mFunction variable and the corresponding C code entry
-// point live at the same address.
-type mFunction byte
-
 // in asm_*.s
-func mcall(fn *mFunction)
-func onM(fn *mFunction)
-
-// C functions that run on the M stack.  Call these like
-//   mcall(&mcacheRefill_m)
-// Arguments should be passed in m->scalararg[x] and
-// m->ptrarg[x].  Return values can be passed in those
-// same slots.
-var (
-       mcacheRefill_m,
-       largeAlloc_m,
-       gc_m,
-       scavenge_m,
-       setFinalizer_m,
-       removeFinalizer_m,
-       markallocated_m,
-       unrollgcprog_m,
-       unrollgcproginplace_m,
-       gosched_m,
-       setgcpercent_m,
-       setmaxthreads_m,
-       ready_m,
-       park_m mFunction
-)
+func mcall(func(*g))
+func onM(fn func())
+
+// C functions that run on the M stack.
+// Call using mcall.
+// These functions need to be written to arrange explicitly
+// for the goroutine to continue execution.
+func gosched_m(*g)
+func park_m(*g)
+
+// More C functions that run on the M stack.
+// Call using onM.
+// Arguments should be passed in m->scalararg[x] and m->ptrarg[x].
+// Return values can be passed in those same slots.
+// These functions return to the goroutine when they return.
+func mcacheRefill_m()
+func largeAlloc_m()
+func gc_m()
+func scavenge_m()
+func setFinalizer_m()
+func removeFinalizer_m()
+func markallocated_m()
+func unrollgcprog_m()
+func unrollgcproginplace_m()
+func setgcpercent_m()
+func setmaxthreads_m()
+func ready_m()
 
 // memclr clears n bytes starting at ptr.
 // in memclr_*.s