int dotaddable(Node*, Node*);
void sudoclean(void);
int sudoaddable(int, Node*, Addr*, int*);
-void afunclit(Addr*);
+void afunclit(Addr*, Node*);
void datagostring(Strlit*, Addr*);
void split64(Node*, Node*, Node*);
void splitclean(void);
/*
* generate:
* call f
+ * proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
*/
void
ginscall(Node *f, int proc)
{
Prog *p;
- Node n1, r, con;
+ Node n1, r, r1, con;
switch(proc) {
default:
case 0: // normal call
case -1: // normal call but no return
- p = gins(ABL, N, f);
- afunclit(&p->to);
- if(proc == -1 || noreturn(p))
- gins(AUNDEF, N, N);
+ if(f->op == ONAME && f->class == PFUNC) {
+ p = gins(ABL, N, f);
+ afunclit(&p->to, f);
+ if(proc == -1 || noreturn(p))
+ gins(AUNDEF, N, N);
+ break;
+ }
+ nodreg(&r, types[tptr], 0);
+ nodreg(&r1, types[tptr], 1);
+ gmove(f, &r);
+ r.op = OINDREG;
+ gmove(&r, &r1);
+ r1.op = OINDREG;
+ gins(ABL, N, &r1);
+ break;
+
+ case 3: // normal call of c function pointer
+ gins(ABL, N, f);
break;
case 1: // call in new proc (go)
int r;
Node *i, *f;
Node tmpi, nodo, nodr, nodsp;
+ Prog *p;
i = n->left;
if(i->op != ODOTINTER)
cgen(&nodo, &nodr); // REG = 0(REG) -- i.tab
nodo.xoffset = n->left->xoffset + 3*widthptr + 8;
- cgen(&nodo, &nodr); // REG = 20+offset(REG) -- i.tab->fun[f]
+
+ if(proc == 0) {
+ // plain call: use direct c function pointer - more efficient
+ cgen(&nodo, &nodr); // REG = 20+offset(REG) -- i.tab->fun[f]
+ nodr.op = OINDREG;
+ proc = 3;
+ } else {
+ // go/defer. generate go func value.
+ p = gins(AMOVW, &nodo, &nodr);
+ p->from.type = D_CONST; // REG = &(20+offset(REG)) -- i.tab->fun[f]
+ }
// BOTCH nodr.type = fntype;
nodr.type = n->left->type;
* also fix up direct register references to be D_OREG.
*/
void
-afunclit(Addr *a)
+afunclit(Addr *a, Node *n)
{
if(a->type == D_CONST && a->name == D_EXTERN || a->type == D_REG) {
a->type = D_OREG;
+ if(n->op == ONAME)
+ a->sym = n->sym;
}
}
case PFUNC:
a->name = D_EXTERN;
a->type = D_CONST;
+ a->sym = funcsym(a->sym);
break;
}
break;
int isfat(Type*);
void sudoclean(void);
int sudoaddable(int, Node*, Addr*);
-void afunclit(Addr*);
+void afunclit(Addr*, Node*);
void nodfconst(Node*, Type*, Mpflt*);
void gtrack(Sym*);
/*
* generate:
* call f
+ * proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
*/
void
ginscall(Node *f, int proc)
case 0: // normal call
case -1: // normal call but no return
- p = gins(ACALL, N, f);
- afunclit(&p->to);
- if(proc == -1 || noreturn(p))
- gins(AUNDEF, N, N);
+ if(f->op == ONAME && f->class == PFUNC) {
+ p = gins(ACALL, N, f);
+ afunclit(&p->to, f);
+ if(proc == -1 || noreturn(p))
+ gins(AUNDEF, N, N);
+ break;
+ }
+ nodreg(®, types[tptr], D_AX);
+ nodreg(&r1, types[tptr], D_BX);
+ gmove(f, ®);
+ reg.op = OINDREG;
+ gmove(®, &r1);
+ gins(ACALL, N, &r1);
+ break;
+
+ case 3: // normal call of c function pointer
+ gins(ACALL, N, f);
break;
case 1: // call in new proc (go)
fatal("cgen_callinter: badwidth");
nodo.op = OINDREG;
nodo.xoffset = n->left->xoffset + 3*widthptr + 8;
- cgen(&nodo, &nodr); // REG = 32+offset(REG) -- i.tab->fun[f]
+ if(proc == 0) {
+ // plain call: use direct c function pointer - more efficient
+ cgen(&nodo, &nodr); // REG = 32+offset(REG) -- i.tab->fun[f]
+ proc = 3;
+ } else {
+ // go/defer. generate go func value.
+ gins(ALEAQ, &nodo, &nodr); // REG = &(32+offset(REG)) -- i.tab->fun[f]
+ }
// BOTCH nodr.type = fntype;
nodr.type = n->left->type;
* call afunclit to fix up the argument.
*/
void
-afunclit(Addr *a)
+afunclit(Addr *a, Node *n)
{
if(a->type == D_ADDR && a->index == D_EXTERN) {
a->type = D_EXTERN;
a->index = D_NONE;
+ a->sym = n->sym;
}
}
a->index = D_EXTERN;
a->type = D_ADDR;
a->width = widthptr;
+ a->sym = funcsym(a->sym);
break;
}
break;
void sudoclean(void);
int sudoaddable(int, Node*, Addr*);
int dotaddable(Node*, Node*);
-void afunclit(Addr*);
+void afunclit(Addr*, Node*);
void split64(Node*, Node*, Node*);
void splitclean(void);
void nswap(Node*, Node*);
/*
* generate:
* call f
+ * proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
*/
void
ginscall(Node *f, int proc)
{
Prog *p;
- Node reg, con;
+ Node reg, r1, con;
switch(proc) {
default:
case 0: // normal call
case -1: // normal call but no return
- p = gins(ACALL, N, f);
- afunclit(&p->to);
- if(proc == -1 || noreturn(p))
- gins(AUNDEF, N, N);
+ if(f->op == ONAME && f->class == PFUNC) {
+ p = gins(ACALL, N, f);
+ afunclit(&p->to, f);
+ if(proc == -1 || noreturn(p))
+ gins(AUNDEF, N, N);
+ break;
+ }
+ nodreg(®, types[tptr], D_AX);
+ nodreg(&r1, types[tptr], D_BX);
+ gmove(f, ®);
+ reg.op = OINDREG;
+ gmove(®, &r1);
+ gins(ACALL, N, &r1);
+ break;
+
+ case 3: // normal call of c function pointer
+ gins(ACALL, N, f);
break;
case 1: // call in new proc (go)
fatal("cgen_callinter: badwidth");
nodo.op = OINDREG;
nodo.xoffset = n->left->xoffset + 3*widthptr + 8;
- cgen(&nodo, &nodr); // REG = 20+offset(REG) -- i.tab->fun[f]
+
+ if(proc == 0) {
+ // plain call: use direct c function pointer - more efficient
+ cgen(&nodo, &nodr); // REG = 20+offset(REG) -- i.tab->fun[f]
+ proc = 3;
+ } else {
+ // go/defer. generate go func value.
+ gins(ALEAL, &nodo, &nodr); // REG = &(20+offset(REG)) -- i.tab->fun[f]
+ }
// BOTCH nodr.type = fntype;
nodr.type = n->left->type;
* call afunclit to fix up the argument.
*/
void
-afunclit(Addr *a)
+afunclit(Addr *a, Node *n)
{
if(a->type == D_ADDR && a->index == D_EXTERN) {
a->type = D_EXTERN;
a->index = D_NONE;
+ a->sym = n->sym;
}
}
case PFUNC:
a->index = D_EXTERN;
a->type = D_ADDR;
+ a->sym = funcsym(a->sym);
break;
}
break;
funcdepth = 0;
dclcontext = PEXTERN;
}
+
+Sym*
+funcsym(Sym *s)
+{
+ char *p;
+ Sym *s1;
+
+ p = smprint("%s·f", s->name);
+ s1 = pkglookup(p, s->pkg);
+ free(p);
+ if(s1->def == N) {
+ s1->def = newname(s1);
+ s1->def->shortname = newname(s);
+ funcsyms = list(funcsyms, s1->def);
+ }
+ return s1;
+}
+
\ No newline at end of file
switch(n->op) {
default:
- fatal("gen: unknown op %N", n);
+ fatal("gen: unknown op %+hN", n);
break;
case OCASE:
EXTERN NodeList* closures;
EXTERN NodeList* exportlist;
EXTERN NodeList* importlist; // imported functions and methods with inlinable bodies
+EXTERN NodeList* funcsyms;
EXTERN int dclcontext; // PEXTERN/PAUTO
EXTERN int incannedimport;
EXTERN int statuniqgen; // name generator for static temps
Node* typedcl1(Node *n, Node *t, int local);
Node* typenod(Type *t);
NodeList* variter(NodeList *vl, Node *t, NodeList *el);
+Sym* funcsym(Sym*);
/*
* esc.c
ggloblnod(n, n->type->width);
}
+
+ for(l=funcsyms; l; l=l->next) {
+ n = l->n;
+ dsymptr(n->sym, 0, n->sym->def->shortname->sym, 0);
+ ggloblsym(n->sym, widthptr, 1, 1);
+ }
}
void
ptxt = gins(ATEXT, isblank(curfn->nname) ? N : curfn->nname, &nod1);
if(fn->dupok)
ptxt->TEXTFLAG = DUPOK;
- afunclit(&ptxt->from);
+ afunclit(&ptxt->from, curfn->nname);
ginit();
switch(n->op) {
default:
dump("walk", n);
- fatal("walkexpr: switch 1 unknown op %N", n);
+ fatal("walkexpr: switch 1 unknown op %+hN", n);
break;
case OTYPE:
usefield(n);
walkexpr(&n->left, init);
goto ret;
-
case OEFACE:
walkexpr(&n->left, init);
// makeFuncImpl is the closure value implementing the function
// returned by MakeFunc.
type makeFuncImpl struct {
+ codeptr unsafe.Pointer
+
// References visible to the garbage collector.
// The code array below contains the same references
// embedded in the machine code.
typ: t,
fn: fn,
}
+ impl.codeptr = unsafe.Pointer(&impl.code[0])
tptr := unsafe.Pointer(t)
fptr := *(*unsafe.Pointer)(unsafe.Pointer(&fn))
tmp := makeFuncStub
- stub := *(*unsafe.Pointer)(unsafe.Pointer(&tmp))
+ stub := **(**unsafe.Pointer)(unsafe.Pointer(&tmp))
// Create code. Copy template and fill in pointer values.
switch runtime.GOARCH {
cacheflush(&impl.code[0], &impl.code[len(impl.code)-1])
}
- return Value{t, unsafe.Pointer(&impl.code[0]), flag(Func) << flagKindShift}
+ return Value{t, unsafe.Pointer(impl), flag(Func) << flagKindShift}
}
func cacheflush(start, end *byte)
}
mt := p.typ
m.Type = mt
- fn := p.tfn
+ fn := unsafe.Pointer(&p.tfn)
m.Func = Value{mt, fn, fl}
m.Index = i
return
if iface.itab == nil {
panic(method + " of method on nil interface value")
}
- fn = iface.itab.fun[i]
+ fn = unsafe.Pointer(&iface.itab.fun[i])
rcvr = iface.word
} else {
ut := v.typ.uncommon()
if m.pkgPath != nil {
panic(method + " of unexported method")
}
- fn = m.ifn
+ fn = unsafe.Pointer(&m.ifn)
t = m.mtyp
rcvr = v.iword()
}
// code using reflect cannot obtain unsafe.Pointers
// without importing the unsafe package explicitly.
// It panics if v's Kind is not Chan, Func, Map, Ptr, Slice, or UnsafePointer.
+//
+// If v's Kind is Func, the returned pointer is an underlying
+// code pointer, but not necessarily enough to identify a
+// single function uniquely. The only guarantee is that the
+// result is zero if and only if v is a nil func Value.
func (v Value) Pointer() uintptr {
k := v.kind()
switch k {
- case Chan, Func, Map, Ptr, UnsafePointer:
- if k == Func && v.flag&flagMethod != 0 {
+ case Chan, Map, Ptr, UnsafePointer:
+ p := v.val
+ if v.flag&flagIndir != 0 {
+ p = *(*unsafe.Pointer)(p)
+ }
+ return uintptr(p)
+ case Func:
+ if v.flag&flagMethod != 0 {
panic("reflect.Value.Pointer of method Value")
}
p := v.val
if v.flag&flagIndir != 0 {
p = *(*unsafe.Pointer)(p)
}
+ // Non-nil func value points at data block.
+ // First word of data block is actual code.
+ if p != nil {
+ p = *(*unsafe.Pointer)(p)
+ }
return uintptr(p)
+
case Slice:
return (*SliceHeader)(v.val).Data
}
CALL runtime·schedinit(SB)
// create a new goroutine to start program
- PUSHL $runtime·main(SB) // entry
+ PUSHL $runtime·main·f(SB) // entry
PUSHL $0 // arg size
CALL runtime·newproc(SB)
POPL AX
INT $3
RET
+DATA runtime·main·f+0(SB)/4,$runtime·main(SB)
+GLOBL runtime·main·f(SB),8,$4
+
TEXT runtime·breakpoint(SB),7,$0
INT $3
RET
JMP AX
POPL BX // not reached
+// void gogocallfn(Gobuf*, FuncVal*)
+// restore state from Gobuf but then call fn.
+// (call fn, returning to state in Gobuf)
+TEXT runtime·gogocallfn(SB), 7, $0
+ MOVL 8(SP), AX // fn
+ MOVL 4(SP), BX // gobuf
+ MOVL gobuf_g(BX), DX
+ get_tls(CX)
+ MOVL DX, g(CX)
+ MOVL 0(DX), CX // make sure g != nil
+ MOVL gobuf_sp(BX), SP // restore SP
+ MOVL gobuf_pc(BX), BX
+ PUSHL BX
+ MOVL 0(AX), BX
+ JMP BX
+ POPL BX // not reached
+
// void mcall(void (*fn)(G*))
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
MOVL 8(SP), BX // caller sp
LEAL -4(BX), SP // caller sp after CALL
SUBL $5, (SP) // return to CALL again
- JMP AX // but first run the deferred function
+ MOVL 0(AX), BX
+ JMP BX // but first run the deferred function
// Dummy function to use in saved gobuf.PC,
// to match SP pointing at a return address.
CALL runtime·schedinit(SB)
// create a new goroutine to start program
- PUSHQ $runtime·main(SB) // entry
+ PUSHQ $runtime·main·f(SB) // entry
PUSHQ $0 // arg size
CALL runtime·newproc(SB)
POPQ AX
MOVL $0xf1, 0xf1 // crash
RET
+DATA runtime·main·f+0(SB)/8,$runtime·main(SB)
+GLOBL runtime·main·f(SB),8,$8
+
TEXT runtime·breakpoint(SB),7,$0
BYTE $0xcc
RET
JMP AX
POPQ BX // not reached
+// void gogocallfn(Gobuf*, FuncVal*)
+// restore state from Gobuf but then call fn.
+// (call fn, returning to state in Gobuf)
+TEXT runtime·gogocallfn(SB), 7, $0
+ MOVQ 16(SP), AX // fn
+ MOVQ 8(SP), BX // gobuf
+ MOVQ gobuf_g(BX), DX
+ get_tls(CX)
+ MOVQ DX, g(CX)
+ MOVQ 0(DX), CX // make sure g != nil
+ MOVQ gobuf_sp(BX), SP // restore SP
+ MOVQ gobuf_pc(BX), BX
+ PUSHQ BX
+ MOVQ 0(AX), BX
+ JMP BX
+ POPQ BX // not reached
+
// void mcall(void (*fn)(G*))
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
MOVQ 16(SP), BX // caller sp
LEAQ -8(BX), SP // caller sp after CALL
SUBQ $5, (SP) // return to CALL again
- JMP AX // but first run the deferred function
+ MOVQ 0(AX), BX
+ JMP BX // but first run the deferred function
// Dummy function to use in saved gobuf.PC,
// to match SP pointing at a return address.
BL runtime·schedinit(SB)
// create a new goroutine to start program
- MOVW $runtime·main(SB), R0
+ MOVW $runtime·main·f(SB), R0
MOVW.W R0, -4(R13)
MOVW $8, R0
MOVW.W R0, -4(R13)
MOVW $1000, R1
MOVW R0, (R1) // fail hard
+DATA runtime·main·f+0(SB)/4,$runtime·main(SB)
+GLOBL runtime·main·f(SB),8,$4
+
TEXT runtime·breakpoint(SB),7,$0
// gdb won't skip this breakpoint instruction automatically,
// so you must manually "set $pc+=4" to skip it and continue.
MOVW gobuf_pc(R3), LR
MOVW R1, PC
+// void gogocallfn(Gobuf*, FuncVal*)
+// restore state from Gobuf but then call fn.
+// (call fn, returning to state in Gobuf)
+// using frame size $-4 means do not save LR on stack.
+TEXT runtime·gogocallfn(SB), 7, $-4
+ MOVW 0(FP), R3 // gobuf
+ MOVW 4(FP), R1 // fn
+ MOVW 8(FP), R2 // fp offset
+ MOVW gobuf_g(R3), g
+ MOVW 0(g), R0 // make sure g != nil
+ MOVW cgo_save_gm(SB), R0
+ CMP $0, R0 // if in Cgo, we have to save g and m
+ BL.NE (R0) // this call will clobber R0
+ MOVW gobuf_sp(R3), SP // restore SP
+ MOVW gobuf_pc(R3), LR
+ MOVW R1, R0
+ MOVW 0(R1), PC
+
// void mcall(void (*fn)(G*))
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
MOVW fn+0(FP), R0
MOVW argp+4(FP), SP
MOVW $-4(SP), SP // SP is 4 below argp, due to saved LR
- B (R0)
+ MOVW 0(R0), R1
+ B (R1)
// Dummy function to use in saved gobuf.PC,
// to match SP pointing at a return address.
// Call from Go to C.
+static FuncVal unlockOSThread = { runtime·unlockOSThread };
+
void
runtime·cgocall(void (*fn)(void*), void *arg)
{
* cgo callback. Add entry to defer stack in case of panic.
*/
runtime·lockOSThread();
- d.fn = (byte*)runtime·unlockOSThread;
+ d.fn = &unlockOSThread;
d.siz = 0;
d.link = g->defer;
d.argp = (void*)-1; // unused because unlockm never recovers
m->cgomal = nil;
}
- if(g->defer != &d || d.fn != (byte*)runtime·unlockOSThread)
+ if(g->defer != &d || d.fn != &unlockOSThread)
runtime·throw("runtime: bad defer entry in cgocallback");
g->defer = d.link;
runtime·unlockOSThread();
// Call from C back to Go.
+static FuncVal unwindmf = {unwindm};
+
void
runtime·cgocallbackg(void (*fn)(void), void *arg, uintptr argsize)
{
Defer d;
+ FuncVal fv;
+ fv.fn = fn;
if(m->racecall) {
- reflect·call((byte*)fn, arg, argsize);
+ reflect·call(&fv, arg, argsize);
return;
}
}
// Add entry to defer stack in case of panic.
- d.fn = (byte*)unwindm;
+ d.fn = &unwindmf;
d.siz = 0;
d.link = g->defer;
d.argp = (void*)-1; // unused because unwindm never recovers
runtime·raceacquire(&cgosync);
// Invoke callback.
- reflect·call((byte*)fn, arg, argsize);
+ reflect·call(&fv, arg, argsize);
if(raceenabled)
runtime·racereleasemerge(&cgosync);
// Pop defer.
// Do not unwind m->g0->sched.sp.
// Our caller, cgocallback, will do that.
- if(g->defer != &d || d.fn != (byte*)unwindm)
+ if(g->defer != &d || d.fn != &unwindmf)
runtime·throw("runtime: bad defer entry in cgocallback");
g->defer = d.link;
if(siz < 0 || siz%4 != 0)
runtime·throw("bad closure size");
+ fn = *(byte**)fn;
ret = (byte**)((byte*)&arg0 + siz);
if(siz > 100) {
if(n%4)
n += 4 - n%4;
- p = runtime·mal(n);
+ p = runtime·mal(4+n);
*ret = p;
+ *(byte**)p = p+4;
+ p += 4;
q = p + n - siz;
if(siz > 0) {
if(siz < 0 || siz%8 != 0)
runtime·throw("bad closure size");
+ fn = *(byte**)fn;
ret = (byte**)((byte*)&arg0 + siz);
if(siz > 100) {
if(n%8)
n += 8 - n%8;
- p = runtime·mal(n);
+ p = runtime·mal(8+n);
*ret = p;
+ *(byte**)p = (p+8);
+ p += 8;
q = p + n - siz;
+
if(siz > 0) {
runtime·memmove(q, (byte*)&arg0, siz);
if(siz < 0 || siz%4 != 0)
runtime·throw("bad closure size");
+ fn = *(byte**)fn;
ret = (byte**)((byte*)&arg0 + siz);
if(siz > 100) {
// store args aligned after code, so gc can find them.
n += siz;
- p = runtime·mal(n);
+ p = runtime·mal(4+n);
*ret = p;
+ *(byte**)p = p+4;
+ p += 4;
q = p + n - siz;
pc = (uint32*)p;
void runtime·helpgc(int32 nproc);
void runtime·gchelper(void);
-bool runtime·getfinalizer(void *p, bool del, void (**fn)(void*), uintptr *nret);
+bool runtime·getfinalizer(void *p, bool del, FuncVal **fn, uintptr *nret);
void runtime·walkfintab(void (*fn)(void*));
enum
typedef struct Fin Fin;
struct Fin
{
- void (*fn)(void*);
+ FuncVal *fn;
uintptr nret;
};
} fintab[TABSZ];
static void
-addfintab(Fintab *t, void *k, void (*fn)(void*), uintptr nret)
+addfintab(Fintab *t, void *k, FuncVal *fn, uintptr nret)
{
int32 i, j;
}
bool
-runtime·addfinalizer(void *p, void (*f)(void*), uintptr nret)
+runtime·addfinalizer(void *p, FuncVal *f, uintptr nret)
{
Fintab *tab;
byte *base;
// get finalizer; if del, delete finalizer.
// caller is responsible for updating RefHasFinalizer (special) bit.
bool
-runtime·getfinalizer(void *p, bool del, void (**fn)(void*), uintptr *nret)
+runtime·getfinalizer(void *p, bool del, FuncVal **fn, uintptr *nret)
{
Fintab *tab;
bool res;
typedef struct Finalizer Finalizer;
struct Finalizer
{
- void (*fn)(void*);
+ FuncVal *fn;
void *arg;
uintptr nret;
};
static bool
handlespecial(byte *p, uintptr size)
{
- void (*fn)(void*);
+ FuncVal *fn;
uintptr nret;
FinBlock *block;
Finalizer *f;
{
byte *p;
struct gc_args a, *ap;
+ FuncVal gcv;
// The atomic operations are not atomic if the uint64s
// are not aligned on uint64 boundaries. This has been
a.force = force;
ap = &a;
m->moreframesize_minalloc = StackBig;
- reflect·call((byte*)gc, (byte*)&ap, sizeof(ap));
+ gcv.fn = (void*)gc;
+ reflect·call(&gcv, (byte*)&ap, sizeof(ap));
if(gctrace > 1 && !force) {
a.force = 1;
}
}
+static FuncVal runfinqv = {runfinq};
+
static void
gc(struct gc_args *args)
{
m->locks++; // disable gc during the mallocs in newproc
// kick off or wake up goroutine to run queued finalizers
if(fing == nil)
- fing = runtime·newproc1((byte*)runfinq, nil, 0, 0, runtime·gc);
+ fing = runtime·newproc1(&runfinqv, nil, 0, 0, runtime·gc);
else if(fingwait) {
fingwait = 0;
runtime·ready(fing);
framecap = framesz;
}
*(void**)frame = f->arg;
- reflect·call((byte*)f->fn, frame, sizeof(uintptr) + f->nret);
+ reflect·call(f->fn, frame, sizeof(uintptr) + f->nret);
f->fn = nil;
f->arg = nil;
}
return sumreleased;
}
+static FuncVal forcegchelperv = {(void(*)(void))forcegchelper};
+
// Release (part of) unused memory to OS.
// Goroutine created at startup.
// Loop forever.
// GC blocks other goroutines via the runtime·worldsema.
runtime·noteclear(¬e);
notep = ¬e;
- runtime·newproc1((byte*)forcegchelper, (byte*)¬ep, sizeof(notep), 0, runtime·MHeap_Scavenger);
+ runtime·newproc1(&forcegchelperv, (byte*)¬ep, sizeof(notep), 0, runtime·MHeap_Scavenger);
runtime·entersyscallblock();
runtime·notesleep(¬e);
runtime·exitsyscall();
// functions that split the stack.
#pragma textflag 7
uintptr
-runtime·deferproc(int32 siz, byte* fn, ...)
+runtime·deferproc(int32 siz, FuncVal *fn, ...)
{
Defer *d;
runtime·deferreturn(uintptr arg0)
{
Defer *d;
- byte *argp, *fn;
+ byte *argp;
+ FuncVal *fn;
d = g->defer;
if(d == nil)
void
runtime·parforsetup2(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void *body)
{
- runtime·parforsetup(desc, nthr, n, ctx, wait, (void(*)(ParFor*, uint32))body);
+ runtime·parforsetup(desc, nthr, n, ctx, wait, *(void(**)(ParFor*, uint32))body);
}
void
extern void main·init(void);
extern void main·main(void);
+static FuncVal scavenger = {runtime·MHeap_Scavenger};
+
// The main goroutine.
void
runtime·main(void)
// From now on, newgoroutines may use non-main threads.
setmcpumax(runtime·gomaxprocs);
runtime·sched.init = true;
- scvg = runtime·newproc1((byte*)runtime·MHeap_Scavenger, nil, 0, 0, runtime·main);
+ scvg = runtime·newproc1(&scavenger, nil, 0, 0, runtime·main);
scvg->issystem = true;
// The deadlock detection has false negatives.
// Let scvg start up, to eliminate the false negative
runtime·resetcpuprofiler(hz);
if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff
- runtime·gogocall(&gp->sched, (void(*)(void))gp->entry);
+ runtime·gogocallfn(&gp->sched, gp->fnstart);
}
runtime·gogo(&gp->sched, 0);
}
// functions that split the stack.
#pragma textflag 7
void
-runtime·newproc(int32 siz, byte* fn, ...)
+runtime·newproc(int32 siz, FuncVal* fn, ...)
{
byte *argp;
// address of the go statement that created this. The new g is put
// on the queue of g's waiting to run.
G*
-runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
+runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
{
byte *sp;
G *newg;
newg->sched.sp = (uintptr)sp;
newg->sched.pc = (byte*)runtime·goexit;
newg->sched.g = newg;
- newg->entry = fn;
+ newg->fnstart = fn;
newg->gopc = (uintptr)callerpc;
if(raceenabled)
newg->racectx = racectx;
typedef struct Slice Slice;
typedef struct Stktop Stktop;
typedef struct String String;
+typedef struct FuncVal FuncVal;
typedef struct SigTab SigTab;
typedef struct MCache MCache;
typedef struct FixAlloc FixAlloc;
typedef struct SEH SEH;
typedef struct Timers Timers;
typedef struct Timer Timer;
-typedef struct GCStats GCStats;
-typedef struct LFNode LFNode;
-typedef struct ParFor ParFor;
-typedef struct ParForThread ParForThread;
-typedef struct CgoMal CgoMal;
+typedef struct GCStats GCStats;
+typedef struct LFNode LFNode;
+typedef struct ParFor ParFor;
+typedef struct ParForThread ParForThread;
+typedef struct CgoMal CgoMal;
/*
* Per-CPU declaration.
byte* str;
intgo len;
};
+struct FuncVal
+{
+ void (*fn)(void);
+ // variable-size, fn-specific data here
+};
struct Iface
{
Itab* tab;
uintptr gcsp; // if status==Gsyscall, gcsp = sched.sp to use during gc
uintptr gcguard; // if status==Gsyscall, gcguard = stackguard to use during gc
uintptr stack0;
- byte* entry; // initial function
+ FuncVal* fnstart; // initial function
G* alllink; // on allg
void* param; // passed parameter on wakeup
int16 status;
// a well-behaved function and not block.
int64 when;
int64 period;
- void (*f)(int64, Eface);
+ FuncVal *fv;
Eface arg;
};
bool free; // if special, free when done
byte* argp; // where args were copied from
byte* pc;
- byte* fn;
+ FuncVal* fn;
Defer* link;
void* args[1]; // padded to actual size
};
void runtime·gogo(Gobuf*, uintptr);
void runtime·gogocall(Gobuf*, void(*)(void));
+void runtime·gogocallfn(Gobuf*, FuncVal*);
void runtime·gosave(Gobuf*);
void runtime·lessstack(void);
void runtime·goargs(void);
uint64 runtime·atomicload64(uint64 volatile*);
void* runtime·atomicloadp(void* volatile*);
void runtime·atomicstorep(void* volatile*, void*);
-void runtime·jmpdefer(byte*, void*);
+void runtime·jmpdefer(FuncVal*, void*);
void runtime·exit1(int32);
void runtime·ready(G*);
byte* runtime·getenv(int8*);
uintptr runtime·efacehash(Eface, uintptr);
void* runtime·malloc(uintptr size);
void runtime·free(void *v);
-bool runtime·addfinalizer(void*, void(*fn)(void*), uintptr);
+bool runtime·addfinalizer(void*, FuncVal *fn, uintptr);
void runtime·runpanic(Panic*);
void* runtime·getcallersp(void*);
int32 runtime·mcount(void);
void runtime·entersyscall(void);
void runtime·entersyscallblock(void);
void runtime·exitsyscall(void);
-G* runtime·newproc1(byte*, byte*, int32, int32, void*);
+G* runtime·newproc1(FuncVal*, byte*, int32, int32, void*);
bool runtime·sigsend(int32 sig);
int32 runtime·callers(int32, uintptr*, int32);
int32 runtime·gentraceback(byte*, byte*, byte*, G*, int32, uintptr*, int32);
void runtime·printhex(uint64);
void runtime·printslice(Slice);
void runtime·printcomplex(Complex128);
-void reflect·call(byte*, byte*, uint32);
+void reflect·call(FuncVal*, byte*, uint32);
void runtime·panic(Eface);
void runtime·panicindex(void);
void runtime·panicslice(void);
label.sp = (uintptr)sp;
label.pc = (byte*)runtime·lessstack;
label.g = m->curg;
- runtime·gogocall(&label, m->morepc);
+ if(reflectcall)
+ runtime·gogocallfn(&label, (FuncVal*)m->morepc);
+ else
+ runtime·gogocall(&label, m->morepc);
*(int32*)345 = 123; // never return
}
runtime·ready(e.data);
}
+static FuncVal readyv = {(void(*)(void))ready};
+
// Put the current goroutine to sleep for ns nanoseconds.
void
runtime·tsleep(int64 ns, int8 *reason)
t.when = runtime·nanotime() + ns;
t.period = 0;
- t.f = ready;
+ t.fv = &readyv;
t.arg.data = g;
runtime·lock(&timers);
addtimer(&t);
runtime·park(runtime·unlock, &timers, reason);
}
+static FuncVal timerprocv = {timerproc};
+
// Add a timer to the heap and start or kick the timer proc
// if the new timer is earlier than any of the others.
static void
}
}
if(timers.timerproc == nil) {
- timers.timerproc = runtime·newproc1((byte*)timerproc, nil, 0, 0, addtimer);
+ timers.timerproc = runtime·newproc1(&timerprocv, nil, 0, 0, addtimer);
timers.timerproc->issystem = true;
}
}
siftdown(0);
t->i = -1; // mark as removed
}
- f = t->f;
+ f = (void*)t->fv->fn;
arg = t->arg;
runtime·unlock(&timers);
if(raceenabled)
waspanic = false;
// If the PC is goexit, the goroutine hasn't started yet.
- if(pc == (uintptr)runtime·goexit && gp->entry != 0) {
- pc = (uintptr)gp->entry;
+ if(pc == (uintptr)runtime·goexit && gp->fnstart != nil) {
+ pc = (uintptr)gp->fnstart->fn;
lr = (uintptr)runtime·goexit;
}
waspanic = false;
// If the PC is goexit, the goroutine hasn't started yet.
- if(pc0 == gp->sched.pc && sp == (byte*)gp->sched.sp && pc0 == (byte*)runtime·goexit && gp->entry != 0) {
+ if(pc0 == gp->sched.pc && sp == (byte*)gp->sched.sp && pc0 == (byte*)runtime·goexit && gp->fnstart != nil) {
fp = sp;
lr = pc;
- pc = (uintptr)gp->entry;
+ pc = (uintptr)gp->fnstart->fn;
}
// If the PC is zero, it's likely a nil function call.
i int32
when int64
period int64
- f func(int64, interface{})
+ f func(int64, interface{}) // NOTE: must not be closure
arg interface{}
}