fmt.Fprintf(fc, "void\n")
fmt.Fprintf(fc, "·%s(struct{uint8 x[%d];}p)\n", n.Mangle, argSize)
fmt.Fprintf(fc, "{\n")
- fmt.Fprintf(fc, "\tcgocall(_cgo%s, &p);\n", n.Mangle)
+ fmt.Fprintf(fc, "\truntime·cgocall(_cgo%s, &p);\n", n.Mangle)
if n.AddError {
// gcc leaves errno in first word of interface at end of p.
// check whether it is zero; if so, turn interface into nil.
fmt.Fprintf(fc, "\nvoid\n")
fmt.Fprintf(fc, "_cgoexp_%s(void *a, int32 n)\n", exp.ExpName)
fmt.Fprintf(fc, "{\n")
- fmt.Fprintf(fc, "\tcgocallback(·%s, a, n);\n", goname)
+ fmt.Fprintf(fc, "\truntime·cgocallback(·%s, a, n);\n", goname)
fmt.Fprintf(fc, "}\n")
// Calling a function with a receiver from C requires
void
·_Cfunc_GoString(int8 *p, String s)
{
- s = gostring((byte*)p);
+ s = runtime·gostring((byte*)p);
FLUSH(&s);
}
void
·_Cfunc_GoStringN(int8 *p, int32 l, String s)
{
- s = gostringn((byte*)p, l);
+ s = runtime·gostringn((byte*)p, l);
FLUSH(&s);
}
void
·_Cfunc_CString(String s, int8 *p)
{
- p = cmalloc(s.len+1);
- mcpy((byte*)p, s.str, s.len);
+ p = runtime·cmalloc(s.len+1);
+ runtime·mcpy((byte*)p, s.str, s.len);
p[s.len] = 0;
FLUSH(&p);
}
char *runtimeimport =
"package runtime\n"
- "func \"\".mal (? int32) *any\n"
+ "func \"\".new (? int32) *any\n"
"func \"\".panicindex ()\n"
"func \"\".panicslice ()\n"
"func \"\".throwreturn ()\n"
"func \"\".printslice (? any)\n"
"func \"\".printnl ()\n"
"func \"\".printsp ()\n"
- "func \"\".printf ()\n"
+ "func \"\".goprintf ()\n"
"func \"\".concatstring ()\n"
"func \"\".append ()\n"
"func \"\".appendslice (typ *uint8, x any, y []any) any\n"
// emitted by compiler, not referred to by go programs
-func mal(int32) *any
+func new(int32) *any
func panicindex()
func panicslice()
func throwreturn()
func printslice(any)
func printnl()
func printsp()
-func printf()
+func goprintf()
// filled in by compiler: int n, string, string, ...
func concatstring()
if(defer) {
if(op == OPRINTN)
fmtprint(&fmt, "\n");
- on = syslook("printf", 1);
+ on = syslook("goprintf", 1);
on->type = functype(nil, intypes, nil);
args->n = nod(OLITERAL, N, N);
args->n->val.ctype = CTSTR;
Node *fn;
dowidth(t);
- fn = syslook("mal", 1);
+ fn = syslook("new", 1);
argtype(fn, t);
return mkcall1(fn, ptrto(t), nil, nodintconst(t->width));
}
JMP ok
// set up %gs
- CALL ldt0setup(SB)
+ CALL runtime·ldt0setup(SB)
// store through it, to make sure it works
- CMPL isplan9(SB), $1
+ CMPL runtime·isplan9(SB), $1
JEQ ok
get_tls(BX)
MOVL $0x123, g(BX)
- MOVL tls0(SB), AX
+ MOVL runtime·tls0(SB), AX
CMPL AX, $0x123
JEQ ok
MOVL AX, 0 // abort
ok:
// set up m and g "registers"
get_tls(BX)
- LEAL g0(SB), CX
+ LEAL runtime·g0(SB), CX
MOVL CX, g(BX)
- LEAL m0(SB), AX
+ LEAL runtime·m0(SB), AX
MOVL AX, m(BX)
// save m->g0 = g0
LEAL (-8192+104)(SP), AX // TODO: 104?
MOVL AX, g_stackguard(CX)
MOVL SP, g_stackbase(CX)
- CALL emptyfunc(SB) // fault if stack check is wrong
+ CALL runtime·emptyfunc(SB) // fault if stack check is wrong
// convention is D is always cleared
CLD
- CALL check(SB)
+ CALL runtime·check(SB)
// saved argc, argv
MOVL 120(SP), AX
MOVL AX, 0(SP)
MOVL 124(SP), AX
MOVL AX, 4(SP)
- CALL args(SB)
- CALL osinit(SB)
- CALL schedinit(SB)
+ CALL runtime·args(SB)
+ CALL runtime·osinit(SB)
+ CALL runtime·schedinit(SB)
// create a new goroutine to start program
- PUSHL $mainstart(SB) // entry
+ PUSHL $runtime·mainstart(SB) // entry
PUSHL $0 // arg size
- CALL ·newproc(SB)
+ CALL runtime·newproc(SB)
POPL AX
POPL AX
// start this M
- CALL mstart(SB)
+ CALL runtime·mstart(SB)
INT $3
RET
-TEXT mainstart(SB),7,$0
+TEXT runtime·mainstart(SB),7,$0
CALL main·init(SB)
- CALL initdone(SB)
+ CALL runtime·initdone(SB)
CALL main·main(SB)
PUSHL $0
- CALL exit(SB)
+ CALL runtime·exit(SB)
POPL AX
INT $3
RET
-TEXT breakpoint(SB),7,$0
+TEXT runtime·breakpoint(SB),7,$0
INT $3
RET
// uintptr gosave(Gobuf*)
// save state in Gobuf; setjmp
-TEXT gosave(SB), 7, $0
+TEXT runtime·gosave(SB), 7, $0
MOVL 4(SP), AX // gobuf
LEAL 4(SP), BX // caller's SP
MOVL BX, gobuf_sp(AX)
// void gogo(Gobuf*, uintptr)
// restore state from Gobuf; longjmp
-TEXT gogo(SB), 7, $0
+TEXT runtime·gogo(SB), 7, $0
MOVL 8(SP), AX // return 2nd arg
MOVL 4(SP), BX // gobuf
MOVL gobuf_g(BX), DX
// void gogocall(Gobuf*, void (*fn)(void))
// restore state from Gobuf but then call fn.
// (call fn, returning to state in Gobuf)
-TEXT gogocall(SB), 7, $0
+TEXT runtime·gogocall(SB), 7, $0
MOVL 8(SP), AX // fn
MOVL 4(SP), BX // gobuf
MOVL gobuf_g(BX), DX
*/
// Called during function prolog when more stack is needed.
-TEXT ·morestack(SB),7,$0
+TEXT runtime·morestack(SB),7,$0
// Cannot grow scheduler stack (m->g0).
get_tls(CX)
MOVL m(CX), BX
MOVL m_g0(BX), BP
MOVL BP, g(CX)
MOVL (m_sched+gobuf_sp)(BX), SP
- CALL newstack(SB)
+ CALL runtime·newstack(SB)
MOVL $0, 0x1003 // crash if newstack returns
RET
get_tls(CX)
MOVL BP, g(CX)
MOVL (m_sched+gobuf_sp)(BX), SP
- CALL newstack(SB)
+ CALL runtime·newstack(SB)
MOVL $0, 0x1103 // crash if newstack returns
RET
// Return point when leaving stack.
-TEXT ·lessstack(SB), 7, $0
+TEXT runtime·lessstack(SB), 7, $0
// Save return value in m->cret
get_tls(CX)
MOVL m(CX), BX
MOVL m_g0(BX), DX
MOVL DX, g(CX)
MOVL (m_sched+gobuf_sp)(BX), SP
- CALL oldstack(SB)
+ CALL runtime·oldstack(SB)
MOVL $0, 0x1004 // crash if oldstack returns
RET
// return 1;
// }else
// return 0;
-TEXT cas(SB), 7, $0
+TEXT runtime·cas(SB), 7, $0
MOVL 4(SP), BX
MOVL 8(SP), AX
MOVL 12(SP), CX
// return 1;
// }else
// return 0;
-TEXT casp(SB), 7, $0
+TEXT runtime·casp(SB), 7, $0
MOVL 4(SP), BX
MOVL 8(SP), AX
MOVL 12(SP), CX
// 1. pop the caller
// 2. sub 5 bytes from the callers return
// 3. jmp to the argument
-TEXT jmpdefer(SB), 7, $0
+TEXT runtime·jmpdefer(SB), 7, $0
MOVL 4(SP), AX // fn
MOVL 8(SP), BX // caller sp
LEAL -4(BX), SP // caller sp after CALL
SUBL $5, (SP) // return to CALL again
JMP AX // but first run the deferred function
-TEXT ·memclr(SB),7,$0
+TEXT runtime·memclr(SB),7,$0
MOVL 4(SP), DI // arg 1 addr
MOVL 8(SP), CX // arg 2 count
ADDL $3, CX
STOSL
RET
-TEXT ·getcallerpc+0(SB),7,$0
+TEXT runtime·getcallerpc(SB),7,$0
MOVL x+0(FP),AX // addr of first arg
MOVL -4(AX),AX // get calling pc
RET
-TEXT ·setcallerpc+0(SB),7,$0
+TEXT runtime·setcallerpc(SB),7,$0
MOVL x+0(FP),AX // addr of first arg
MOVL x+4(FP), BX
MOVL BX, -4(AX) // set calling pc
RET
-TEXT getcallersp(SB), 7, $0
+TEXT runtime·getcallersp(SB), 7, $0
MOVL sp+0(FP), AX
RET
-TEXT ldt0setup(SB),7,$16
+TEXT runtime·ldt0setup(SB),7,$16
// set up ldt 7 to point at tls0
// ldt 1 would be fine on Linux, but on OS X, 7 is as low as we can go.
// the entry number is just a hint. setldt will set up GS with what it used.
MOVL $7, 0(SP)
- LEAL tls0(SB), AX
+ LEAL runtime·tls0(SB), AX
MOVL AX, 4(SP)
MOVL $32, 8(SP) // sizeof(tls array)
- CALL setldt(SB)
+ CALL runtime·setldt(SB)
RET
-TEXT emptyfunc(SB),0,$0
+TEXT runtime·emptyfunc(SB),0,$0
RET
-TEXT abort(SB),7,$0
+TEXT runtime·abort(SB),7,$0
INT $0x3
// runcgo(void(*fn)(void*), void *arg)
// Call fn(arg) on the scheduler stack,
// aligned appropriately for the gcc ABI.
-TEXT runcgo(SB),7,$16
+TEXT runtime·runcgo(SB),7,$16
MOVL fn+0(FP), AX
MOVL arg+4(FP), BX
MOVL SP, CX
// runcgocallback(G *g1, void* sp, void (*fn)(void))
// Switch to g1 and sp, call fn, switch back. fn's arguments are on
// the new stack.
-TEXT runcgocallback(SB),7,$32
+TEXT runtime·runcgocallback(SB),7,$32
MOVL g1+0(FP), DX
MOVL sp+4(FP), AX
MOVL fn+8(FP), BX
RET
// check that SP is in range [g->stackbase, g->stackguard)
-TEXT stackcheck(SB), 7, $0
+TEXT runtime·stackcheck(SB), 7, $0
get_tls(CX)
MOVL g(CX), AX
CMPL g_stackbase(AX), SP
INT $3
RET
-GLOBL m0(SB), $1024
-GLOBL g0(SB), $1024
-GLOBL tls0(SB), $32
-GLOBL initcgo(SB), $4
+GLOBL runtime·tls0(SB), $32
// fn func(arg0, arg1, arg2 *ptr, callerpc uintptr, xxx) yyy,
// arg0, arg1, arg2 *ptr) (func(xxx) yyy)
void
-·closure(int32 siz, byte *fn, byte *arg0)
+runtime·closure(int32 siz, byte *fn, byte *arg0)
{
byte *p, *q, **ret;
int32 i, n;
int32 pcrel;
- if(goos != nil && strcmp((uint8*)goos, (uint8*)"nacl") == 0)
- throw("no closures in native client yet");
+ if(runtime·goos != nil && runtime·strcmp((uint8*)runtime·goos, (uint8*)"nacl") == 0)
+ runtime·throw("no closures in native client yet");
if(siz < 0 || siz%4 != 0)
- throw("bad closure size");
+ runtime·throw("bad closure size");
ret = (byte**)((byte*)&arg0 + siz);
if(siz > 100) {
// TODO(rsc): implement stack growth preamble?
- throw("closure too big");
+ runtime·throw("closure too big");
}
// compute size of new fn.
if(n%4)
n += 4 - n%4;
- p = mal(n);
+ p = runtime·mal(n);
*ret = p;
q = p + n - siz;
if(siz > 0) {
- mcpy(q, (byte*)&arg0, siz);
+ runtime·mcpy(q, (byte*)&arg0, siz);
// SUBL $siz, SP
*p++ = 0x81;
*p++ = 0xc3;
if(p > q)
- throw("bad math in sys.closure");
+ runtime·throw("bad math in sys.closure");
}
-
-
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
- TEXT memmove(SB), 7, $0
-
+TEXT runtime·memmove(SB), 7, $0
MOVL to+0(FP), DI
MOVL fr+4(FP), SI
MOVL n+8(FP), BX
* C runtime for 64-bit divide.
*/
-TEXT _mul64by32(SB), 7, $0
+TEXT _mul64by32(SB), 7, $0
MOVL r+0(FP), CX
MOVL a+4(FP), AX
MULL b+12(FP)
MOVL BX, 4(CX)
RET
-TEXT _div64by32(SB), 7, $0
+TEXT _div64by32(SB), 7, $0
MOVL r+12(FP), CX
MOVL a+0(FP), AX
MOVL a+4(FP), DX
};
};
-void abort(void);
+void runtime·abort(void);
void
_d2v(Vlong *y, double d)
}
void
-·uint64div(Vlong n, Vlong d, Vlong q)
+runtime·uint64div(Vlong n, Vlong d, Vlong q)
{
_divvu(&q, n, d);
}
}
void
-·uint64mod(Vlong n, Vlong d, Vlong q)
+runtime·uint64mod(Vlong n, Vlong d, Vlong q)
{
_modvu(&q, n, d);
}
}
void
-·int64div(Vlong n, Vlong d, Vlong q)
+runtime·int64div(Vlong n, Vlong d, Vlong q)
{
_divv(&q, n, d);
}
}
void
-·int64mod(Vlong n, Vlong d, Vlong q)
+runtime·int64mod(Vlong n, Vlong d, Vlong q)
{
_modv(&q, n, d);
}
u.hi = 0;
switch(type) {
default:
- abort();
+ runtime·abort();
break;
case 1: /* schar */
#include "amd64/asm.h"
-TEXT _rt0_amd64(SB),7,$-8
+TEXT _rt0_amd64(SB),7,$-8
// copy arguments forward on an even stack
MOVQ 0(DI), AX // argc
LEAQ 8(DI), BX // argv
JMP ok
needtls:
- LEAQ tls0(SB), DI
- CALL settls(SB)
+ LEAQ runtime·tls0(SB), DI
+ CALL runtime·settls(SB)
// store through it, to make sure it works
get_tls(BX)
MOVQ $0x123, g(BX)
- MOVQ tls0(SB), AX
+ MOVQ runtime·tls0(SB), AX
CMPQ AX, $0x123
JEQ 2(PC)
MOVL AX, 0 // abort
ok:
// set the per-goroutine and per-mach "registers"
get_tls(BX)
- LEAQ g0(SB), CX
+ LEAQ runtime·g0(SB), CX
MOVQ CX, g(BX)
- LEAQ m0(SB), AX
+ LEAQ runtime·m0(SB), AX
MOVQ AX, m(BX)
// save m->g0 = g0
MOVQ SP, g_stackbase(CX)
CLD // convention is D is always left cleared
- CALL check(SB)
+ CALL runtime·check(SB)
MOVL 16(SP), AX // copy argc
MOVL AX, 0(SP)
MOVQ 24(SP), AX // copy argv
MOVQ AX, 8(SP)
- CALL args(SB)
- CALL osinit(SB)
- CALL schedinit(SB)
+ CALL runtime·args(SB)
+ CALL runtime·osinit(SB)
+ CALL runtime·schedinit(SB)
// create a new goroutine to start program
- PUSHQ $mainstart(SB) // entry
+ PUSHQ $runtime·mainstart(SB) // entry
PUSHQ $0 // arg size
- CALL ·newproc(SB)
+ CALL runtime·newproc(SB)
POPQ AX
POPQ AX
// start this M
- CALL mstart(SB)
+ CALL runtime·mstart(SB)
- CALL notok(SB) // never returns
+ CALL runtime·notok(SB) // never returns
RET
-TEXT mainstart(SB),7,$0
+TEXT runtime·mainstart(SB),7,$0
CALL main·init(SB)
- CALL initdone(SB)
+ CALL runtime·initdone(SB)
CALL main·main(SB)
PUSHQ $0
- CALL exit(SB)
+ CALL runtime·exit(SB)
POPQ AX
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT breakpoint(SB),7,$0
+TEXT runtime·breakpoint(SB),7,$0
BYTE $0xcc
RET
// uintptr gosave(Gobuf*)
// save state in Gobuf; setjmp
-TEXT gosave(SB), 7, $0
+TEXT runtime·gosave(SB), 7, $0
MOVQ 8(SP), AX // gobuf
LEAQ 8(SP), BX // caller's SP
MOVQ BX, gobuf_sp(AX)
// void gogo(Gobuf*, uintptr)
// restore state from Gobuf; longjmp
-TEXT gogo(SB), 7, $0
+TEXT runtime·gogo(SB), 7, $0
MOVQ 16(SP), AX // return 2nd arg
MOVQ 8(SP), BX // gobuf
MOVQ gobuf_g(BX), DX
// void gogocall(Gobuf*, void (*fn)(void))
// restore state from Gobuf but then call fn.
// (call fn, returning to state in Gobuf)
-TEXT gogocall(SB), 7, $0
+TEXT runtime·gogocall(SB), 7, $0
MOVQ 16(SP), AX // fn
MOVQ 8(SP), BX // gobuf
MOVQ gobuf_g(BX), DX
// Called during function prolog when more stack is needed.
// Caller has already done get_tls(CX); MOVQ m(CX), BX.
-TEXT morestack(SB),7,$0
+TEXT runtime·morestack(SB),7,$0
// Cannot grow scheduler stack (m->g0).
MOVQ m_g0(BX), SI
CMPQ g(CX), SI
MOVQ m_g0(BX), BP
MOVQ BP, g(CX)
MOVQ (m_sched+gobuf_sp)(BX), SP
- CALL newstack(SB)
+ CALL runtime·newstack(SB)
MOVQ $0, 0x1003 // crash if newstack returns
RET
get_tls(CX)
MOVQ BP, g(CX)
MOVQ (m_sched+gobuf_sp)(BX), SP
- CALL newstack(SB)
+ CALL runtime·newstack(SB)
MOVQ $0, 0x1103 // crash if newstack returns
RET
// Return point when leaving stack.
-TEXT ·lessstack(SB), 7, $0
+TEXT runtime·lessstack(SB), 7, $0
// Save return value in m->cret
get_tls(CX)
MOVQ m(CX), BX
MOVQ m_g0(BX), DX
MOVQ DX, g(CX)
MOVQ (m_sched+gobuf_sp)(BX), SP
- CALL oldstack(SB)
+ CALL runtime·oldstack(SB)
MOVQ $0, 0x1004 // crash if oldstack returns
RET
// morestack trampolines
-TEXT ·morestack00+0(SB),7,$0
+TEXT runtime·morestack00(SB),7,$0
get_tls(CX)
MOVQ m(CX), BX
MOVQ $0, AX
MOVQ AX, m_moreframe(BX)
- MOVQ $morestack+0(SB), AX
+ MOVQ $runtime·morestack(SB), AX
JMP AX
-TEXT ·morestack01+0(SB),7,$0
+TEXT runtime·morestack01(SB),7,$0
get_tls(CX)
MOVQ m(CX), BX
SHLQ $32, AX
MOVQ AX, m_moreframe(BX)
- MOVQ $morestack+0(SB), AX
+ MOVQ $runtime·morestack(SB), AX
JMP AX
-TEXT ·morestack10+0(SB),7,$0
+TEXT runtime·morestack10(SB),7,$0
get_tls(CX)
MOVQ m(CX), BX
MOVLQZX AX, AX
MOVQ AX, m_moreframe(BX)
- MOVQ $morestack+0(SB), AX
+ MOVQ $runtime·morestack(SB), AX
JMP AX
-TEXT ·morestack11+0(SB),7,$0
+TEXT runtime·morestack11(SB),7,$0
get_tls(CX)
MOVQ m(CX), BX
MOVQ AX, m_moreframe(BX)
- MOVQ $morestack+0(SB), AX
+ MOVQ $runtime·morestack(SB), AX
JMP AX
// subcases of morestack01
// with const of 8,16,...48
-TEXT ·morestack8(SB),7,$0
+TEXT runtime·morestack8(SB),7,$0
PUSHQ $1
- MOVQ $·morestackx(SB), AX
+ MOVQ $morestack<>(SB), AX
JMP AX
-TEXT ·morestack16(SB),7,$0
+TEXT runtime·morestack16(SB),7,$0
PUSHQ $2
- MOVQ $·morestackx(SB), AX
+ MOVQ $morestack<>(SB), AX
JMP AX
-TEXT ·morestack24(SB),7,$0
+TEXT runtime·morestack24(SB),7,$0
PUSHQ $3
- MOVQ $·morestackx(SB), AX
+ MOVQ $morestack<>(SB), AX
JMP AX
-TEXT ·morestack32(SB),7,$0
+TEXT runtime·morestack32(SB),7,$0
PUSHQ $4
- MOVQ $·morestackx(SB), AX
+ MOVQ $morestack<>(SB), AX
JMP AX
-TEXT ·morestack40(SB),7,$0
+TEXT runtime·morestack40(SB),7,$0
PUSHQ $5
- MOVQ $·morestackx(SB), AX
+ MOVQ $morestack<>(SB), AX
JMP AX
-TEXT ·morestack48(SB),7,$0
+TEXT runtime·morestack48(SB),7,$0
PUSHQ $6
- MOVQ $·morestackx(SB), AX
+ MOVQ $morestack<>(SB), AX
JMP AX
-TEXT ·morestackx(SB),7,$0
+TEXT morestack<>(SB),7,$0
get_tls(CX)
MOVQ m(CX), BX
POPQ AX
SHLQ $35, AX
MOVQ AX, m_moreframe(BX)
- MOVQ $morestack(SB), AX
+ MOVQ $runtime·morestack(SB), AX
JMP AX
// bool cas(int32 *val, int32 old, int32 new)
// return 1;
// } else
// return 0;
-TEXT cas(SB), 7, $0
+TEXT runtime·cas(SB), 7, $0
MOVQ 8(SP), BX
MOVL 16(SP), AX
MOVL 20(SP), CX
// 1. pop the caller
// 2. sub 5 bytes from the callers return
// 3. jmp to the argument
-TEXT jmpdefer(SB), 7, $0
+TEXT runtime·jmpdefer(SB), 7, $0
MOVQ 8(SP), AX // fn
MOVQ 16(SP), BX // caller sp
LEAQ -8(BX), SP // caller sp after CALL
// runcgo(void(*fn)(void*), void *arg)
// Call fn(arg) on the scheduler stack,
// aligned appropriately for the gcc ABI.
-TEXT runcgo(SB),7,$32
+TEXT runtime·runcgo(SB),7,$32
MOVQ fn+0(FP), R12
MOVQ arg+8(FP), R13
MOVQ SP, CX
// runcgocallback(G *g1, void* sp, void (*fn)(void))
// Switch to g1 and sp, call fn, switch back. fn's arguments are on
// the new stack.
-TEXT runcgocallback(SB),7,$48
+TEXT runtime·runcgocallback(SB),7,$48
MOVQ g1+0(FP), DX
MOVQ sp+8(FP), AX
MOVQ fp+16(FP), BX
RET
// check that SP is in range [g->stackbase, g->stackguard)
-TEXT stackcheck(SB), 7, $0
+TEXT runtime·stackcheck(SB), 7, $0
get_tls(CX)
MOVQ g(CX), AX
CMPQ g_stackbase(AX), SP
INT $3
RET
-TEXT ·memclr(SB),7,$0
+TEXT runtime·memclr(SB),7,$0
MOVQ 8(SP), DI // arg 1 addr
MOVL 16(SP), CX // arg 2 count
ADDL $7, CX
STOSQ
RET
-TEXT ·getcallerpc+0(SB),7,$0
+TEXT runtime·getcallerpc(SB),7,$0
MOVQ x+0(FP),AX // addr of first arg
MOVQ -8(AX),AX // get calling pc
RET
-TEXT ·setcallerpc+0(SB),7,$0
+TEXT runtime·setcallerpc(SB),7,$0
MOVQ x+0(FP),AX // addr of first arg
MOVQ x+8(FP), BX
MOVQ BX, -8(AX) // set calling pc
RET
-TEXT getcallersp(SB),7,$0
+TEXT runtime·getcallersp(SB),7,$0
MOVQ sp+0(FP), AX
RET
-GLOBL initcgo(SB), $8
-GLOBL tls0(SB), $64
+GLOBL runtime·tls0(SB), $64
// fn func(arg0, arg1, arg2 *ptr, callerpc uintptr, xxx) yyy,
// arg0, arg1, arg2 *ptr) (func(xxx) yyy)
void
-·closure(int32 siz, byte *fn, byte *arg0)
+runtime·closure(int32 siz, byte *fn, byte *arg0)
{
byte *p, *q, **ret;
int32 i, n;
int64 pcrel;
if(siz < 0 || siz%8 != 0)
- throw("bad closure size");
+ runtime·throw("bad closure size");
ret = (byte**)((byte*)&arg0 + siz);
if(siz > 100) {
// TODO(rsc): implement stack growth preamble?
- throw("closure too big");
+ runtime·throw("closure too big");
}
// compute size of new fn.
if(n%8)
n += 8 - n%8;
- p = mal(n);
+ p = runtime·mal(n);
*ret = p;
q = p + n - siz;
if(siz > 0) {
- mcpy(q, (byte*)&arg0, siz);
+ runtime·mcpy(q, (byte*)&arg0, siz);
// SUBQ $siz, SP
*p++ = 0x48;
*p++ = 0xc3;
if(p > q)
- throw("bad math in sys.closure");
+ runtime·throw("bad math in sys.closure");
}
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
- TEXT memmove(SB), 7, $0
+TEXT runtime·memmove(SB), 7, $0
MOVQ to+0(FP), DI
MOVQ fr+8(FP), SI
#include "malloc.h"
static uintptr isclosureentry(uintptr);
-void ·deferproc(void);
-void ·newproc(void);
+void runtime·deferproc(void);
+void runtime·newproc(void);
// This code is also used for the 386 tracebacks.
// Use uintptr for an appropriate word-sized integer.
}
nascent = 0;
- if(pc0 == g->sched.pc && sp == g->sched.sp && pc0 == (byte*)goexit) {
+ if(pc0 == g->sched.pc && sp == g->sched.sp && pc0 == (byte*)runtime·goexit) {
// Hasn't started yet. g->sched is set up for goexit
// but goroutine will start at g->entry.
nascent = 1;
n = 0;
stk = (Stktop*)g->stackbase;
for(iter = 0; iter < 100 && n < m; iter++) { // iter avoids looping forever
- if(pc == (uintptr)·lessstack) {
+ if(pc == (uintptr)runtime·lessstack) {
// Hit top of stack segment. Unwind to next segment.
pc = (uintptr)stk->gobuf.pc;
sp = stk->gobuf.sp;
continue;
}
- if(pc <= 0x1000 || (f = findfunc(pc)) == nil) {
+ if(pc <= 0x1000 || (f = runtime·findfunc(pc)) == nil) {
// Dangerous, but worthwhile: see if this is a closure:
// ADDQ $wwxxyyzz, SP; RET
// [48] 81 c4 zz yy xx ww c3
// The 0x48 byte is only on amd64.
p = (byte*)pc;
// We check p < p+8 to avoid wrapping and faulting if we lose track.
- if(mheap.min < p && p < p+8 && p+8 < mheap.max && // pointer in allocated memory
+ if(runtime·mheap.min < p && p < p+8 && p+8 < runtime·mheap.max && // pointer in allocated memory
(sizeof(uintptr) != 8 || *p++ == 0x48) && // skip 0x48 byte on amd64
p[0] == 0x81 && p[1] == 0xc4 && p[6] == 0xc3) {
sp += *(uint32*)(p+2);
// Print during crash.
// main+0xf /home/rsc/go/src/runtime/x.go:23
// main(0x1, 0x2, 0x3)
- printf("%S", f->name);
+ runtime·printf("%S", f->name);
if(pc > f->entry)
- printf("+%p", (uintptr)(pc - f->entry));
+ runtime·printf("+%p", (uintptr)(pc - f->entry));
tracepc = pc; // back up to CALL instruction for funcline.
if(n > 0 && pc > f->entry)
tracepc--;
- printf(" %S:%d\n", f->src, funcline(f, tracepc));
- printf("\t%S(", f->name);
+ runtime·printf(" %S:%d\n", f->src, runtime·funcline(f, tracepc));
+ runtime·printf("\t%S(", f->name);
fp = (uintptr*)sp;
if(f->frame < sizeof(uintptr))
fp++;
fp += f->frame/sizeof(uintptr);
for(i = 0; i < f->args; i++) {
if(i != 0)
- prints(", ");
- ·printhex(fp[i]);
+ runtime·prints(", ");
+ runtime·printhex(fp[i]);
if(i >= 4) {
- prints(", ...");
+ runtime·prints(", ...");
break;
}
}
- prints(")\n");
+ runtime·prints(")\n");
n++;
}
else
sp += f->frame;
pc = *((uintptr*)sp - 1);
- if(f->entry == (uintptr)·deferproc || f->entry == (uintptr)·newproc)
+ if(f->entry == (uintptr)runtime·deferproc || f->entry == (uintptr)runtime·newproc)
sp += 2*sizeof(uintptr);
}
return n;
}
void
-traceback(byte *pc0, byte *sp, byte*, G *g)
+runtime·traceback(byte *pc0, byte *sp, byte*, G *g)
{
gentraceback(pc0, sp, g, 0, nil, 100);
}
int32
-callers(int32 skip, uintptr *pcbuf, int32 m)
+runtime·callers(int32 skip, uintptr *pcbuf, int32 m)
{
byte *pc, *sp;
// our caller's pc, sp.
sp = (byte*)&skip;
- pc = ·getcallerpc(&skip);
+ pc = runtime·getcallerpc(&skip);
return gentraceback(pc, sp, g, skip, pcbuf, m);
}
int32 i, siz;
p = (byte*)pc;
- if(p < mheap.min || p+32 > mheap.max)
+ if(p < runtime·mheap.min || p+32 > runtime·mheap.max)
return 0;
// SUBQ $siz, SP
// set up m and g registers
// g is R10, m is R9
- MOVW $g0(SB), g
- MOVW $m0(SB), m
+ MOVW $runtime·g0(SB), g
+ MOVW $runtime·m0(SB), m
// save m->g0 = g0
MOVW g, m_g0(m)
MOVW $(-8192+104)(R13), R0
MOVW R0, g_stackguard(g) // (w 104b guard)
MOVW R13, g_stackbase(g)
- BL emptyfunc(SB) // fault if stack check is wrong
+ BL runtime·emptyfunc(SB) // fault if stack check is wrong
- BL check(SB)
+ BL runtime·check(SB)
// saved argc, argv
MOVW 120(R13), R0
MOVW R0, 4(R13)
MOVW 124(R13), R1
MOVW R1, 8(R13)
- BL args(SB)
- BL osinit(SB)
- BL schedinit(SB)
+ BL runtime·args(SB)
+ BL runtime·osinit(SB)
+ BL runtime·schedinit(SB)
// create a new goroutine to start program
- MOVW $mainstart(SB), R0
+ MOVW $runtime·mainstart(SB), R0
MOVW.W R0, -4(R13)
MOVW $8, R0
MOVW.W R0, -4(R13)
MOVW $0, R0
MOVW.W R0, -4(R13) // push $0 as guard
- BL ·newproc(SB)
+ BL runtime·newproc(SB)
MOVW $12(R13), R13 // pop args and LR
// start this M
- BL mstart(SB)
+ BL runtime·mstart(SB)
MOVW $1234, R0
MOVW $1000, R1
MOVW R0, (R1) // fail hard
- B _dep_dummy(SB) // Never reached
+ B runtime·_dep_dummy(SB) // Never reached
-TEXT mainstart(SB),7,$4
+TEXT runtime·mainstart(SB),7,$4
BL main·init(SB)
- BL initdone(SB)
+ BL runtime·initdone(SB)
EOR R0, R0
MOVW R0, 0(R13)
BL main·main(SB)
MOVW $0, R0
MOVW R0, 4(SP)
- BL exit(SB)
+ BL runtime·exit(SB)
MOVW $1234, R0
MOVW $1001, R1
MOVW R0, (R1) // fail hard
// TODO(kaib): remove these once i actually understand how the linker removes symbols
// pull in dummy dependencies
-TEXT _dep_dummy(SB),7,$0
+TEXT runtime·_dep_dummy(SB),7,$0
BL _div(SB)
BL _divu(SB)
BL _mod(SB)
BL _modu(SB)
BL _sfloat(SB)
-TEXT breakpoint(SB),7,$0
+TEXT runtime·breakpoint(SB),7,$0
// no breakpoint yet; let program exit
RET
// uintptr gosave(Gobuf*)
// save state in Gobuf; setjmp
-TEXT gosave(SB), 7, $-4
+TEXT runtime·gosave(SB), 7, $-4
MOVW 0(FP), R0 // gobuf
MOVW SP, gobuf_sp(R0)
MOVW LR, gobuf_pc(R0)
// void gogo(Gobuf*, uintptr)
// restore state from Gobuf; longjmp
-TEXT gogo(SB), 7, $-4
+TEXT runtime·gogo(SB), 7, $-4
MOVW 0(FP), R1 // gobuf
MOVW 4(FP), R0 // return 2nd arg
MOVW gobuf_g(R1), g
// restore state from Gobuf but then call fn.
// (call fn, returning to state in Gobuf)
// using frame size $-4 means do not save LR on stack.
-TEXT gogocall(SB), 7, $-4
+TEXT runtime·gogocall(SB), 7, $-4
MOVW 0(FP), R0 // gobuf
MOVW 4(FP), R1 // fn
MOVW 8(FP), R2 // fp offset
// NB. we do not save R0 because we've forced 5c to pass all arguments
// on the stack.
// using frame size $-4 means do not save LR on stack.
-TEXT ·morestack(SB),7,$-4
+TEXT runtime·morestack(SB),7,$-4
// Cannot grow scheduler stack (m->g0).
MOVW m_g0(m), R4
CMP g, R4
- BL.EQ abort(SB)
+ BL.EQ runtime·abort(SB)
// Save in m.
MOVW R1, m_moreframe(m)
// Call newstack on m's scheduling stack.
MOVW m_g0(m), g
MOVW (m_sched+gobuf_sp)(m), SP
- B newstack(SB)
+ B runtime·newstack(SB)
// Called from reflection library. Mimics morestack,
// reuses stack growth code to create a frame
// Call newstack on m's scheduling stack.
MOVW m_g0(m), g
MOVW (m_sched+gobuf_sp)(m), SP
- B newstack(SB)
+ B runtime·newstack(SB)
// Return point when leaving stack.
// using frame size $-4 means do not save LR on stack.
-TEXT ·lessstack(SB), 7, $-4
+TEXT runtime·lessstack(SB), 7, $-4
// Save return value in m->cret
MOVW R0, m_cret(m)
// Call oldstack on m's scheduling stack.
MOVW m_g0(m), g
MOVW (m_sched+gobuf_sp)(m), SP
- B oldstack(SB)
+ B runtime·oldstack(SB)
// void jmpdefer(fn, sp);
// called from deferreturn.
// 1. grab stored LR for caller
// 2. sub 4 bytes to get back to BL deferreturn
// 3. B to fn
-TEXT jmpdefer(SB), 7, $0
+TEXT runtime·jmpdefer(SB), 7, $0
MOVW 0(SP), LR
MOVW $-4(LR), LR // BL deferreturn
MOVW 4(SP), R0 // fn
MOVW 8(SP), SP
B (R0)
-TEXT ·memclr(SB),7,$20
+TEXT runtime·memclr(SB),7,$20
MOVW 0(FP), R0
MOVW $0, R1 // c = 0
MOVW R1, -16(SP)
MOVW R1, -12(SP)
MOVW m, -8(SP) // Save m and g
MOVW g, -4(SP)
- BL memset(SB)
+ BL runtime·memset(SB)
MOVW -8(SP), m // Restore m and g, memset clobbers them
MOVW -4(SP), g
RET
-TEXT ·getcallerpc+0(SB),7,$-4
+TEXT runtime·getcallerpc(SB),7,$-4
MOVW 0(SP), R0
RET
-TEXT ·setcallerpc+0(SB),7,$-4
+TEXT runtime·setcallerpc(SB),7,$-4
MOVW x+4(FP), R0
MOVW R0, 0(SP)
RET
-TEXT getcallersp(SB),7,$-4
+TEXT runtime·getcallersp(SB),7,$-4
MOVW 0(FP), R0
MOVW $-4(R0), R0
RET
// Just call fn(arg), but first align the stack
// appropriately for the gcc ABI.
// TODO(kaib): figure out the arm-gcc ABI
-TEXT runcgo(SB),7,$16
- BL abort(SB)
+TEXT runtime·runcgo(SB),7,$16
+ BL runtime·abort(SB)
// MOVL fn+0(FP), AX
// MOVL arg+4(FP), BX
// MOVL SP, CX
// MOVL 4(SP), SP
// RET
-TEXT emptyfunc(SB),0,$0
+TEXT runtime·emptyfunc(SB),0,$0
RET
-TEXT abort(SB),7,$-4
+TEXT runtime·abort(SB),7,$-4
MOVW $0, R0
MOVW (R0), R1
-TEXT runcgocallback(SB),7,$0
+TEXT runtime·runcgocallback(SB),7,$0
MOVW $0, R0
MOVW (R0), R1
// }else
// return 0;
-TEXT cas(SB),7,$0
+TEXT runtime·cas(SB),7,$0
MOVW 0(FP), R0 // *val
MOVW 4(FP), R1 // old
MOVW 8(FP), R2 // new
MOVW $1, R3
- MOVW $cas_mutex(SB), R4
+ MOVW $runtime·cas_mutex(SB), R4
l:
SWPW (R4), R3 // acquire mutex
CMP $0, R3
MOVW $0, R0
RET
-DATA cas_mutex(SB)/4, $0
-GLOBL cas_mutex(SB), $4
+DATA runtime·cas_mutex(SB)/4, $0
+GLOBL runtime·cas_mutex(SB), $4
// }else
// return 0;
-TEXT cas(SB),7,$0
+TEXT cas(SB),7,$0
MOVW 0(FP), R1 // *val
MOVW 4(FP), R2 // old
MOVW 8(FP), R3 // new
#pragma textflag 7
void
-·closure(int32 siz, byte *fn, byte *arg0)
+runtime·closure(int32 siz, byte *fn, byte *arg0)
{
byte *p, *q, **ret;
uint32 *pc;
int32 n;
if(siz < 0 || siz%4 != 0)
- throw("bad closure size");
+ runtime·throw("bad closure size");
ret = (byte**)((byte*)&arg0 + siz);
if(siz > 100) {
// TODO(kaib): implement stack growth preamble?
- throw("closure too big");
+ runtime·throw("closure too big");
}
// size of new fn.
// store args aligned after code, so gc can find them.
n += siz;
- p = mal(n);
+ p = runtime·mal(n);
*ret = p;
q = p + n - siz;
*pc++ = 0xe52de000 | (siz + 4);
if(siz > 0) {
- mcpy(q, (byte*)&arg0, siz);
+ runtime·mcpy(q, (byte*)&arg0, siz);
// MOVW $vars(PC), R0
*pc = 0xe28f0000 | (int32)(q - (byte*)pc - 8);
p = (byte*)pc;
if(p > q)
- throw("bad math in sys.closure");
+ runtime·throw("bad math in sys.closure");
- cacheflush(*ret, q+siz);
+ runtime·cacheflush(*ret, q+siz);
}
TMP1 = 4
// TODO(kaib): This can be done with the existing registers of LR is re-used. Same for memset.
-TEXT memmove(SB), 7, $8
+TEXT runtime·memmove(SB), 7, $8
// save g and m
MOVW R9, 4(R13)
MOVW R10, 8(R13)
// TODO(kaib): memset clobbers R9 and R10 (m and g). This makes the
// registers unpredictable if (when) memset SIGSEGV's. Fix it by
// moving the R4-R11 register bank.
-TEXT memset(SB), $0
+TEXT runtime·memset(SB), $0
MOVW R0, R(TO)
MOVW data+4(FP), R(4)
MOVW n+8(FP), R(N)
fabort(void)
{
if (1) {
- printf("Unsupported floating point instruction\n");
- abort();
+ runtime·printf("Unsupported floating point instruction\n");
+ runtime·abort();
}
}
case 0x80:
return 1;
default:
- fabort();
+ runtime·fabort();
}
return 0;
}
{
uint32 i;
for (i = 0; i < 8; i++) {
- printf("\tf%d:\t%X\n", i, m->freg[i]);
+ runtime·printf("\tf%d:\t%X\n", i, m->freg[i]);
}
}
{
uint32 x;
- ·f64to32c(d, &x);
+ runtime·f64to32c(d, &x);
return x;
}
{
uint64 x;
- ·f32to64c(s, &x);
+ runtime·f32to64c(s, &x);
return x;
}
lhs = i>>16 & 7;
rhs = i & 15;
- prec = precision(i);
+ prec = runtime·precision(i);
// if (prec != 1)
// goto undef;
if (unary) {
switch (opcode) {
case 0: // mvf
- fd = frhs(rhs);
+ fd = runtime·frhs(rhs);
if(prec == 0)
- fd = s2d(d2s(fd));
+ fd = runtime·s2d(d2s(fd));
m->freg[dest] = fd;
goto ret;
default:
}
} else {
l = m->freg[lhs];
- r = frhs(rhs);
+ r = runtime·frhs(rhs);
switch (opcode) {
default:
goto undef;
case 0:
- ·fadd64c(l, r, &m->freg[dest]);
+ runtime·fadd64c(l, r, &m->freg[dest]);
break;
case 1:
- ·fmul64c(l, r, &m->freg[dest]);
+ runtime·fmul64c(l, r, &m->freg[dest]);
break;
case 2:
- ·fsub64c(l, r, &m->freg[dest]);
+ runtime·fsub64c(l, r, &m->freg[dest]);
break;
case 4:
- ·fdiv64c(l, r, &m->freg[dest]);
+ runtime·fdiv64c(l, r, &m->freg[dest]);
break;
}
goto ret;
ret:
if (trace || doabort) {
- printf(" %p %x\t%s%s\tf%d, ", pc, *pc, opnames[opcode | unary<<4],
+ runtime·printf(" %p %x\t%s%s\tf%d, ", pc, *pc, opnames[opcode | unary<<4],
fpprec[prec], dest);
if (!unary)
- printf("f%d, ", lhs);
+ runtime·printf("f%d, ", lhs);
if (rhs & 0x8)
- printf("#%s\n", fpconst[rhs&0x7]);
+ runtime·printf("#%s\n", fpconst[rhs&0x7]);
else
- printf("f%d\n", rhs&0x7);
- fprint();
+ runtime·printf("f%d\n", rhs&0x7);
+ runtime·fprint();
}
if (doabort)
- fabort();
+ runtime·fabort();
}
#define CPSR 14
rhs = i & 0xf;
l = m->freg[lhs];
- r = frhs(rhs);
- ·fcmp64c(l, r, &cmp, &nan);
+ r = runtime·frhs(rhs);
+ runtime·fcmp64c(l, r, &cmp, &nan);
if (nan)
flags = FLAGS_C | FLAGS_V;
else if (cmp == 0)
flags = FLAGS_C;
if (trace) {
- printf(" %p %x\tcmf\tf%d, ", pc, *pc, lhs);
+ runtime·printf(" %p %x\tcmf\tf%d, ", pc, *pc, lhs);
if (rhs & 0x8)
- printf("#%s\n", fpconst[rhs&0x7]);
+ runtime·printf("#%s\n", fpconst[rhs&0x7]);
else
- printf("f%d\n", rhs&0x7);
+ runtime·printf("f%d\n", rhs&0x7);
}
regs[CPSR] = regs[CPSR] & 0x0fffffff | flags;
}
if (tlen)
m->freg[freg] = *((uint64*)addr);
else
- m->freg[freg] = s2d(*((uint32*)addr));
+ m->freg[freg] = runtime·s2d(*((uint32*)addr));
else
if (tlen)
*((uint64*)addr) = m->freg[freg];
else
- *((uint32*)addr) = d2s(m->freg[freg]);
+ *((uint32*)addr) = runtime·d2s(m->freg[freg]);
goto ret;
undef:
ret:
if (trace || doabort) {
if (isload)
- printf(" %p %x\tldf", pc, *pc);
+ runtime·printf(" %p %x\tldf", pc, *pc);
else
- printf(" %p %x\tstf", pc, *pc);
- printf("%s\t\tf%d, %s%d(r%d)", fpprec[tlen], freg, ud ? "" : "-", offset, reg);
- printf("\t\t// %p", regs[reg] + (ud ? offset : -offset));
+ runtime·printf(" %p %x\tstf", pc, *pc);
+ runtime·printf("%s\t\tf%d, %s%d(r%d)", fpprec[tlen], freg, ud ? "" : "-", offset, reg);
+ runtime·printf("\t\t// %p", regs[reg] + (ud ? offset : -offset));
if (coproc != 1 || p != 1 || wb != 0)
- printf(" coproc: %d pre: %d wb %d", coproc, p, wb);
- printf("\n");
- fprint();
+ runtime·printf(" coproc: %d pre: %d wb %d", coproc, p, wb);
+ runtime·printf("\n");
+ runtime·fprint();
}
if (doabort)
- fabort();
+ runtime·fabort();
}
static void
toarm = i>>20 & 0x1;
freg = i>>16 & 0x7;
reg = i>>12 & 0xf;
- prec = precision(i);
+ prec = runtime·precision(i);
if (toarm) { // fix
f0 = m->freg[freg];
- ·f64tointc(f0, &val, &ok);
+ runtime·f64tointc(f0, &val, &ok);
if (!ok || (int32)val != val)
val = 0;
regs[reg] = val;
} else { // flt
- ·fintto64c((int32)regs[reg], &f0);
+ runtime·fintto64c((int32)regs[reg], &f0);
m->freg[freg] = f0;
}
goto ret;
ret:
if (trace || doabort) {
if (toarm)
- printf(" %p %x\tfix%s\t\tr%d, f%d\n", pc, *pc, fpprec[prec], reg, freg);
+ runtime·printf(" %p %x\tfix%s\t\tr%d, f%d\n", pc, *pc, fpprec[prec], reg, freg);
else
- printf(" %p %x\tflt%s\t\tf%d, r%d\n", pc, *pc, fpprec[prec], freg, reg);
- fprint();
+ runtime·printf(" %p %x\tflt%s\t\tf%d, r%d\n", pc, *pc, fpprec[prec], freg, reg);
+ runtime·fprint();
}
if (doabort)
- fabort();
+ runtime·fabort();
}
// returns number of words that the fp instruction is occupying, 0 if next instruction isn't float.
c = i >> 25 & 7;
switch(c) {
case 6: // 110
- loadstore(pc, regs);
+ runtime·loadstore(pc, regs);
return 1;
case 7: // 111
if (i>>24 & 1)
if (i>>4 & 1) { //data transfer
if ((i&0x00f0ff00) == 0x0090f100) {
- compare(pc, regs);
+ runtime·compare(pc, regs);
} else if ((i&0x00e00f10) == 0x00000110) {
- fltfix(pc, regs);
+ runtime·fltfix(pc, regs);
} else {
- printf(" %p %x\t// case 7 fail\n", pc, i);
- fabort();
+ runtime·printf(" %p %x\t// case 7 fail\n", pc, i);
+ runtime·fabort();
}
} else {
- dataprocess(pc);
+ runtime·dataprocess(pc);
}
return 1;
}
#pragma textflag 7
uint32*
-_sfloat2(uint32 *lr, uint32 r0)
+runtime·_sfloat2(uint32 *lr, uint32 r0)
{
uint32 skip;
- while(skip = stepflt(lr, &r0))
+ while(skip = runtime·stepflt(lr, &r0))
lr += skip;
return lr;
}
lr = (uintptr)lr0;
// If the PC is goexit, it hasn't started yet.
- if(pc == (uintptr)goexit) {
+ if(pc == (uintptr)runtime·goexit) {
pc = (uintptr)g->entry;
- lr = (uintptr)goexit;
+ lr = (uintptr)runtime·goexit;
}
// If the PC is zero, it's likely a nil function call.
n = 0;
stk = (Stktop*)g->stackbase;
for(iter = 0; iter < 100 && n < m; iter++) { // iter avoids looping forever
- if(pc == (uintptr)·lessstack) {
+ if(pc == (uintptr)runtime·lessstack) {
// Hit top of stack segment. Unwind to next segment.
pc = (uintptr)stk->gobuf.pc;
sp = stk->gobuf.sp;
stk = (Stktop*)stk->stackbase;
continue;
}
- if(pc <= 0x1000 || (f = findfunc(pc-4)) == nil) {
+ if(pc <= 0x1000 || (f = runtime·findfunc(pc-4)) == nil) {
// TODO: Check for closure.
break;
}
// Print during crash.
// main+0xf /home/rsc/go/src/runtime/x.go:23
// main(0x1, 0x2, 0x3)
- printf("%S", f->name);
+ runtime·printf("%S", f->name);
if(pc > f->entry)
- printf("+%p", (uintptr)(pc - f->entry));
+ runtime·printf("+%p", (uintptr)(pc - f->entry));
tracepc = pc; // back up to CALL instruction for funcline.
if(n > 0 && pc > f->entry)
tracepc -= sizeof(uintptr);
- printf(" %S:%d\n", f->src, funcline(f, tracepc));
- printf("\t%S(", f->name);
+ runtime·printf(" %S:%d\n", f->src, runtime·funcline(f, tracepc));
+ runtime·printf("\t%S(", f->name);
for(i = 0; i < f->args; i++) {
if(i != 0)
- prints(", ");
- ·printhex(((uintptr*)sp)[1+i]);
+ runtime·prints(", ");
+ runtime·printhex(((uintptr*)sp)[1+i]);
if(i >= 4) {
- prints(", ...");
+ runtime·prints(", ...");
break;
}
}
- prints(")\n");
+ runtime·prints(")\n");
n++;
}
}
void
-traceback(byte *pc0, byte *sp, byte *lr, G *g)
+runtime·traceback(byte *pc0, byte *sp, byte *lr, G *g)
{
gentraceback(pc0, sp, lr, g, 0, nil, 100);
}
// func caller(n int) (pc uintptr, file string, line int, ok bool)
int32
-callers(int32 skip, uintptr *pcbuf, int32 m)
+runtime·callers(int32 skip, uintptr *pcbuf, int32 m)
{
byte *pc, *sp;
- sp = getcallersp(&skip);
- pc = ·getcallerpc(&skip);
+ sp = runtime·getcallersp(&skip);
+ pc = runtime·getcallerpc(&skip);
return gentraceback(pc, sp, 0, g, skip, pcbuf, m);
}
/* replaced use of R10 by R11 because the former can be the data segment base register */
-TEXT _mulv(SB), $0
+TEXT _mulv(SB), $0
MOVW 0(FP), R0
MOVW 4(FP), R2 /* l0 */
MOVW 8(FP), R11 /* h0 */
CC = 3
TMP = 11
-TEXT save<>(SB), 7, $0
+TEXT save<>(SB), 7, $0
MOVW R(Q), 0(FP)
MOVW R(N), 4(FP)
MOVW R(D), 8(FP)
/* MOVW -1(R(D)), R(TMP) /* divide by zero fault */
s1: RET
-TEXT rest<>(SB), 7, $0
+TEXT rest<>(SB), 7, $0
MOVW 0(FP), R(Q)
MOVW 4(FP), R(N)
MOVW 8(FP), R(D)
ADD $20, R13
B (R14)
-TEXT div<>(SB), 7, $0
+TEXT div<>(SB), 7, $0
MOVW $32, R(CC)
/*
* skip zeros 8-at-a-time
BNE loop
RET
-TEXT _div(SB), 7, $16
+TEXT _div(SB), 7, $16
BL save<>(SB)
CMP $0, R(Q)
BGE d1
RSB $0, R(Q), R(TMP)
B out
-TEXT _mod(SB), 7, $16
+TEXT _mod(SB), 7, $16
BL save<>(SB)
CMP $0, R(D)
RSB.LT $0, R(D), R(D)
MOVW R(N), R(TMP)
B out
-TEXT _divu(SB), 7, $16
+TEXT _divu(SB), 7, $16
BL save<>(SB)
BL div<>(SB)
MOVW R(Q), R(TMP)
B out
-TEXT _modu(SB), 7, $16
+TEXT _modu(SB), 7, $16
BL save<>(SB)
BL div<>(SB)
MOVW R(N), R(TMP)
// trampoline for _sfloat2. passes LR as arg0 and
// saves registers R0-R13 and CPSR on the stack. R0-R12 and CPSR flags can
// be changed by _sfloat2.
-TEXT _sfloat(SB), 7, $64 // 4 arg + 14*4 saved regs + cpsr
+TEXT _sfloat(SB), 7, $64 // 4 arg + 14*4 saved regs + cpsr
MOVW R14, 4(R13)
MOVW R0, 8(R13)
MOVW $12(R13), R0
// THE SOFTWARE.
// declared here to avoid include of runtime.h
-void panicstring(char*);
+void runtime·panicstring(char*);
typedef unsigned long ulong;
typedef unsigned int uint;
#define SIGN(n) (1UL<<(n-1))
void
-panicdivide(void)
+runtime·panicdivide(void)
{
- panicstring("integer divide by zero");
+ runtime·panicstring("integer divide by zero");
}
typedef struct Vlong Vlong;
};
};
-void abort(void);
+void runtime·abort(void);
void
_addv(Vlong *r, Vlong a, Vlong b)
}
void
-·float64toint64(double d, Vlong y)
+runtime·float64toint64(double d, Vlong y)
{
_d2v(&y, d);
}
}
void
-·int64tofloat64(Vlong y, double d)
+runtime·int64tofloat64(Vlong y, double d)
{
d = _v2d(y);
}
* get a divide by zero
*/
if(denlo==0 && denhi==0) {
- panicdivide();
+ runtime·panicdivide();
}
/*
}
void
-·uint64div(Vlong n, Vlong d, Vlong q)
+runtime·uint64div(Vlong n, Vlong d, Vlong q)
{
_divvu(&q, n, d);
}
}
void
-·uint64mod(Vlong n, Vlong d, Vlong q)
+runtime·uint64mod(Vlong n, Vlong d, Vlong q)
{
_modvu(&q, n, d);
}
}
void
-·int64div(Vlong n, Vlong d, Vlong q)
+runtime·int64div(Vlong n, Vlong d, Vlong q)
{
_divv(&q, n, d);
}
}
void
-·int64mod(Vlong n, Vlong d, Vlong q)
+runtime·int64mod(Vlong n, Vlong d, Vlong q)
{
_modv(&q, n, d);
}
u = *ret;
switch(type) {
default:
- abort();
+ runtime·abort();
break;
case 1: /* schar */
void *initcgo; /* filled in by dynamic linker when Cgo is available */
int64 ncgocall;
-void ·entersyscall(void);
-void ·exitsyscall(void);
+void runtime·entersyscall(void);
+void runtime·exitsyscall(void);
void
-cgocall(void (*fn)(void*), void *arg)
+runtime·cgocall(void (*fn)(void*), void *arg)
{
G *oldlock;
if(initcgo == nil)
- throw("cgocall unavailable");
+ runtime·throw("cgocall unavailable");
ncgocall++;
* M to run goroutines while we are in the
* foreign code.
*/
- ·entersyscall();
- runcgo(fn, arg);
- ·exitsyscall();
+ runtime·entersyscall();
+ runtime·runcgo(fn, arg);
+ runtime·exitsyscall();
m->lockedg = oldlock;
if(oldlock == nil)
// arguments back where they came from, and finally returns to the old
// stack.
void
-cgocallback(void (*fn)(void), void *arg, int32 argsize)
+runtime·cgocallback(void (*fn)(void), void *arg, int32 argsize)
{
Gobuf oldsched, oldg1sched;
G *g1;
void *sp;
if(g != m->g0)
- throw("bad g in cgocallback");
+ runtime·throw("bad g in cgocallback");
g1 = m->curg;
oldsched = m->sched;
oldg1sched = g1->sched;
- startcgocallback(g1);
+ runtime·startcgocallback(g1);
sp = g1->sched.sp - argsize;
if(sp < g1->stackguard)
- throw("g stack overflow in cgocallback");
- mcpy(sp, arg, argsize);
+ runtime·throw("g stack overflow in cgocallback");
+ runtime·mcpy(sp, arg, argsize);
- runcgocallback(g1, sp, fn);
+ runtime·runcgocallback(g1, sp, fn);
- mcpy(arg, sp, argsize);
+ runtime·mcpy(arg, sp, argsize);
- endcgocallback(g1);
+ runtime·endcgocallback(g1);
m->sched = oldsched;
g1->sched = oldg1sched;
}
void
-·Cgocalls(int64 ret)
+runtime·Cgocalls(int64 ret)
{
ret = ncgocall;
FLUSH(&ret);
void (*_cgo_free)(void*);
void*
-cmalloc(uintptr n)
+runtime·cmalloc(uintptr n)
{
struct a {
uint64 n;
a.n = n;
a.ret = nil;
- cgocall(_cgo_malloc, &a);
+ runtime·cgocall(_cgo_malloc, &a);
return a.ret;
}
void
-cfree(void *p)
+runtime·cfree(void *p)
{
- cgocall(_cgo_free, p);
+ runtime·cgocall(_cgo_free, p);
}
* Cgo interface.
*/
-void cgocall(void (*fn)(void*), void*);
-void cgocallback(void (*fn)(void), void*, int32);
-void *cmalloc(uintptr);
-void cfree(void*);
+void runtime·cgocall(void (*fn)(void*), void*);
+void runtime·cgocallback(void (*fn)(void), void*, int32);
+void *runtime·cmalloc(uintptr);
+void runtime·cfree(void*);
static void destroychan(Hchan*);
Hchan*
-makechan(Type *elem, int64 hint)
+runtime·makechan_c(Type *elem, int64 hint)
{
Hchan *c;
int32 i;
if(hint < 0 || (int32)hint != hint || hint > ((uintptr)-1) / elem->size)
- panicstring("makechan: size out of range");
+ runtime·panicstring("makechan: size out of range");
- if(elem->alg >= nelem(algarray)) {
- printf("chan(alg=%d)\n", elem->alg);
- throw("runtime.makechan: unsupported elem type");
+ if(elem->alg >= nelem(runtime·algarray)) {
+ runtime·printf("chan(alg=%d)\n", elem->alg);
+ runtime·throw("runtime.makechan: unsupported elem type");
}
- c = mal(sizeof(*c));
- addfinalizer(c, destroychan, 0);
+ c = runtime·mal(sizeof(*c));
+ runtime·addfinalizer(c, destroychan, 0);
c->elemsize = elem->size;
- c->elemalg = &algarray[elem->alg];
+ c->elemalg = &runtime·algarray[elem->alg];
c->elemalign = elem->align;
if(hint > 0) {
b = nil;
e = nil;
for(i=0; i<hint; i++) {
- d = mal(sizeof(*d) + c->elemsize - sizeof(d->elem));
+ d = runtime·mal(sizeof(*d) + c->elemsize - sizeof(d->elem));
if(e == nil)
e = d;
d->link = b;
}
if(debug)
- printf("makechan: chan=%p; elemsize=%D; elemalg=%d; elemalign=%d; dataqsiz=%d\n",
+ runtime·printf("makechan: chan=%p; elemsize=%D; elemalg=%d; elemalign=%d; dataqsiz=%d\n",
c, (int64)elem->size, elem->alg, elem->align, c->dataqsiz);
return c;
static void
destroychan(Hchan *c)
{
- destroylock(&c->Lock);
+ runtime·destroylock(&c->Lock);
}
// makechan(elem *Type, hint int64) (hchan *chan any);
void
-·makechan(Type *elem, int64 hint, Hchan *ret)
+runtime·makechan(Type *elem, int64 hint, Hchan *ret)
{
- ret = makechan(elem, hint);
+ ret = runtime·makechan_c(elem, hint);
FLUSH(&ret);
}
c->closed += Eincr;
if(c->closed & Emax) {
// Note that channel locks may still be held at this point.
- throw("too many operations on a closed channel");
+ runtime·throw("too many operations on a closed channel");
}
}
* the operation; we'll see that it's now closed.
*/
void
-chansend(Hchan *c, byte *ep, bool *pres)
+runtime·chansend(Hchan *c, byte *ep, bool *pres)
{
SudoG *sg;
G* gp;
if(c == nil)
- panicstring("send to nil channel");
+ runtime·panicstring("send to nil channel");
- if(gcwaiting)
- gosched();
+ if(runtime·gcwaiting)
+ runtime·gosched();
if(debug) {
- printf("chansend: chan=%p; elem=", c);
+ runtime·printf("chansend: chan=%p; elem=", c);
c->elemalg->print(c->elemsize, ep);
- prints("\n");
+ runtime·prints("\n");
}
- lock(c);
+ runtime·lock(c);
loop:
if(c->closed & Wclosed)
goto closed;
gp = sg->g;
gp->param = sg;
- unlock(c);
- ready(gp);
+ runtime·unlock(c);
+ runtime·ready(gp);
if(pres != nil)
*pres = true;
}
if(pres != nil) {
- unlock(c);
+ runtime·unlock(c);
*pres = false;
return;
}
g->param = nil;
g->status = Gwaiting;
enqueue(&c->sendq, sg);
- unlock(c);
- gosched();
+ runtime·unlock(c);
+ runtime·gosched();
- lock(c);
+ runtime·lock(c);
sg = g->param;
if(sg == nil)
goto loop;
freesg(c, sg);
- unlock(c);
+ runtime·unlock(c);
return;
asynch:
if(c->qcount >= c->dataqsiz) {
if(pres != nil) {
- unlock(c);
+ runtime·unlock(c);
*pres = false;
return;
}
sg = allocsg(c);
g->status = Gwaiting;
enqueue(&c->sendq, sg);
- unlock(c);
- gosched();
+ runtime·unlock(c);
+ runtime·gosched();
- lock(c);
+ runtime·lock(c);
goto asynch;
}
if(ep != nil)
if(sg != nil) {
gp = sg->g;
freesg(c, sg);
- unlock(c);
- ready(gp);
+ runtime·unlock(c);
+ runtime·ready(gp);
} else
- unlock(c);
+ runtime·unlock(c);
if(pres != nil)
*pres = true;
return;
incerr(c);
if(pres != nil)
*pres = true;
- unlock(c);
+ runtime·unlock(c);
}
void
-chanrecv(Hchan* c, byte *ep, bool* pres)
+runtime·chanrecv(Hchan* c, byte *ep, bool* pres)
{
SudoG *sg;
G *gp;
if(c == nil)
- panicstring("receive from nil channel");
+ runtime·panicstring("receive from nil channel");
- if(gcwaiting)
- gosched();
+ if(runtime·gcwaiting)
+ runtime·gosched();
if(debug)
- printf("chanrecv: chan=%p\n", c);
+ runtime·printf("chanrecv: chan=%p\n", c);
- lock(c);
+ runtime·lock(c);
loop:
if(c->dataqsiz > 0)
goto asynch;
gp = sg->g;
gp->param = sg;
- unlock(c);
- ready(gp);
+ runtime·unlock(c);
+ runtime·ready(gp);
if(pres != nil)
*pres = true;
}
if(pres != nil) {
- unlock(c);
+ runtime·unlock(c);
c->elemalg->copy(c->elemsize, ep, nil);
*pres = false;
return;
g->param = nil;
g->status = Gwaiting;
enqueue(&c->recvq, sg);
- unlock(c);
- gosched();
+ runtime·unlock(c);
+ runtime·gosched();
- lock(c);
+ runtime·lock(c);
sg = g->param;
if(sg == nil)
goto loop;
c->elemalg->copy(c->elemsize, ep, sg->elem);
c->elemalg->copy(c->elemsize, sg->elem, nil);
freesg(c, sg);
- unlock(c);
+ runtime·unlock(c);
return;
asynch:
goto closed;
if(pres != nil) {
- unlock(c);
+ runtime·unlock(c);
c->elemalg->copy(c->elemsize, ep, nil);
*pres = false;
return;
sg = allocsg(c);
g->status = Gwaiting;
enqueue(&c->recvq, sg);
- unlock(c);
- gosched();
+ runtime·unlock(c);
+ runtime·gosched();
- lock(c);
+ runtime·lock(c);
goto asynch;
}
c->elemalg->copy(c->elemsize, ep, c->recvdataq->elem);
if(sg != nil) {
gp = sg->g;
freesg(c, sg);
- unlock(c);
- ready(gp);
+ runtime·unlock(c);
+ runtime·ready(gp);
if(pres != nil)
*pres = true;
return;
}
- unlock(c);
+ runtime·unlock(c);
if(pres != nil)
*pres = true;
return;
incerr(c);
if(pres != nil)
*pres = true;
- unlock(c);
+ runtime·unlock(c);
}
// chansend1(hchan *chan any, elem any);
#pragma textflag 7
void
-·chansend1(Hchan* c, ...)
+runtime·chansend1(Hchan* c, ...)
{
int32 o;
byte *ae;
if(c == nil)
- panicstring("send to nil channel");
+ runtime·panicstring("send to nil channel");
- o = rnd(sizeof(c), c->elemalign);
+ o = runtime·rnd(sizeof(c), c->elemalign);
ae = (byte*)&c + o;
- chansend(c, ae, nil);
+ runtime·chansend(c, ae, nil);
}
// chansend2(hchan *chan any, elem any) (pres bool);
#pragma textflag 7
void
-·chansend2(Hchan* c, ...)
+runtime·chansend2(Hchan* c, ...)
{
int32 o;
byte *ae, *ap;
if(c == nil)
- panicstring("send to nil channel");
+ runtime·panicstring("send to nil channel");
- o = rnd(sizeof(c), c->elemalign);
+ o = runtime·rnd(sizeof(c), c->elemalign);
ae = (byte*)&c + o;
- o = rnd(o+c->elemsize, Structrnd);
+ o = runtime·rnd(o+c->elemsize, Structrnd);
ap = (byte*)&c + o;
- chansend(c, ae, ap);
+ runtime·chansend(c, ae, ap);
}
// chanrecv1(hchan *chan any) (elem any);
#pragma textflag 7
void
-·chanrecv1(Hchan* c, ...)
+runtime·chanrecv1(Hchan* c, ...)
{
int32 o;
byte *ae;
- o = rnd(sizeof(c), Structrnd);
+ o = runtime·rnd(sizeof(c), Structrnd);
ae = (byte*)&c + o;
- chanrecv(c, ae, nil);
+ runtime·chanrecv(c, ae, nil);
}
// chanrecv2(hchan *chan any) (elem any, pres bool);
#pragma textflag 7
void
-·chanrecv2(Hchan* c, ...)
+runtime·chanrecv2(Hchan* c, ...)
{
int32 o;
byte *ae, *ap;
- o = rnd(sizeof(c), Structrnd);
+ o = runtime·rnd(sizeof(c), Structrnd);
ae = (byte*)&c + o;
- o = rnd(o+c->elemsize, 1);
+ o = runtime·rnd(o+c->elemsize, 1);
ap = (byte*)&c + o;
- chanrecv(c, ae, ap);
+ runtime·chanrecv(c, ae, ap);
}
// newselect(size uint32) (sel *byte);
#pragma textflag 7
void
-·newselect(int32 size, ...)
+runtime·newselect(int32 size, ...)
{
int32 n, o;
Select **selp;
Select *sel;
- o = rnd(sizeof(size), Structrnd);
+ o = runtime·rnd(sizeof(size), Structrnd);
selp = (Select**)((byte*)&size + o);
n = 0;
if(size > 1)
n = size-1;
- sel = mal(sizeof(*sel) + n*sizeof(sel->scase[0]));
+ sel = runtime·mal(sizeof(*sel) + n*sizeof(sel->scase[0]));
sel->tcase = size;
sel->ncase = 0;
*selp = sel;
if(debug)
- printf("newselect s=%p size=%d\n", sel, size);
+ runtime·printf("newselect s=%p size=%d\n", sel, size);
}
// selectsend(sel *byte, hchan *chan any, elem any) (selected bool);
#pragma textflag 7
void
-·selectsend(Select *sel, Hchan *c, ...)
+runtime·selectsend(Select *sel, Hchan *c, ...)
{
int32 i, eo;
Scase *cas;
i = sel->ncase;
if(i >= sel->tcase)
- throw("selectsend: too many cases");
+ runtime·throw("selectsend: too many cases");
sel->ncase = i+1;
- cas = mal(sizeof *cas + c->elemsize - sizeof(cas->u.elem));
+ cas = runtime·mal(sizeof *cas + c->elemsize - sizeof(cas->u.elem));
sel->scase[i] = cas;
- cas->pc = ·getcallerpc(&sel);
+ cas->pc = runtime·getcallerpc(&sel);
cas->chan = c;
- eo = rnd(sizeof(sel), sizeof(c));
- eo = rnd(eo+sizeof(c), c->elemsize);
- cas->so = rnd(eo+c->elemsize, Structrnd);
+ eo = runtime·rnd(sizeof(sel), sizeof(c));
+ eo = runtime·rnd(eo+sizeof(c), c->elemsize);
+ cas->so = runtime·rnd(eo+c->elemsize, Structrnd);
cas->send = 1;
ae = (byte*)&sel + eo;
c->elemalg->copy(c->elemsize, cas->u.elem, ae);
if(debug)
- printf("selectsend s=%p pc=%p chan=%p so=%d send=%d\n",
+ runtime·printf("selectsend s=%p pc=%p chan=%p so=%d send=%d\n",
sel, cas->pc, cas->chan, cas->so, cas->send);
}
// selectrecv(sel *byte, hchan *chan any, elem *any) (selected bool);
#pragma textflag 7
void
-·selectrecv(Select *sel, Hchan *c, ...)
+runtime·selectrecv(Select *sel, Hchan *c, ...)
{
int32 i, eo;
Scase *cas;
i = sel->ncase;
if(i >= sel->tcase)
- throw("selectrecv: too many cases");
+ runtime·throw("selectrecv: too many cases");
sel->ncase = i+1;
- cas = mal(sizeof *cas);
+ cas = runtime·mal(sizeof *cas);
sel->scase[i] = cas;
- cas->pc = ·getcallerpc(&sel);
+ cas->pc = runtime·getcallerpc(&sel);
cas->chan = c;
- eo = rnd(sizeof(sel), sizeof(c));
- eo = rnd(eo+sizeof(c), sizeof(byte*));
- cas->so = rnd(eo+sizeof(byte*), Structrnd);
+ eo = runtime·rnd(sizeof(sel), sizeof(c));
+ eo = runtime·rnd(eo+sizeof(c), sizeof(byte*));
+ cas->so = runtime·rnd(eo+sizeof(byte*), Structrnd);
cas->send = 0;
cas->u.elemp = *(byte**)((byte*)&sel + eo);
if(debug)
- printf("selectrecv s=%p pc=%p chan=%p so=%d send=%d\n",
+ runtime·printf("selectrecv s=%p pc=%p chan=%p so=%d send=%d\n",
sel, cas->pc, cas->chan, cas->so, cas->send);
}
// selectdefaul(sel *byte) (selected bool);
#pragma textflag 7
void
-·selectdefault(Select *sel, ...)
+runtime·selectdefault(Select *sel, ...)
{
int32 i;
Scase *cas;
i = sel->ncase;
if(i >= sel->tcase)
- throw("selectdefault: too many cases");
+ runtime·throw("selectdefault: too many cases");
sel->ncase = i+1;
- cas = mal(sizeof *cas);
+ cas = runtime·mal(sizeof *cas);
sel->scase[i] = cas;
- cas->pc = ·getcallerpc(&sel);
+ cas->pc = runtime·getcallerpc(&sel);
cas->chan = nil;
- cas->so = rnd(sizeof(sel), Structrnd);
+ cas->so = runtime·rnd(sizeof(sel), Structrnd);
cas->send = 2;
cas->u.elemp = nil;
if(debug)
- printf("selectdefault s=%p pc=%p so=%d send=%d\n",
+ runtime·printf("selectdefault s=%p pc=%p so=%d send=%d\n",
sel, cas->pc, cas->so, cas->send);
}
uint32 i;
for(i=0; i<sel->ncase; i++)
- free(sel->scase[i]);
- free(sel);
+ runtime·free(sel->scase[i]);
+ runtime·free(sel);
}
static void
for(i=0; i<sel->ncase; i++) {
if(sel->scase[i]->chan != c) {
c = sel->scase[i]->chan;
- lock(c);
+ runtime·lock(c);
}
}
}
for(i=sel->ncase; i>0; i--) {
if(sel->scase[i-1]->chan && sel->scase[i-1]->chan != c) {
c = sel->scase[i-1]->chan;
- unlock(c);
+ runtime·unlock(c);
}
}
}
// that's less than StackGuard-StackSmall, so okay.
#pragma textflag 7
void
-·selectgo(Select *sel)
+runtime·selectgo(Select *sel)
{
uint32 p, o, i, j;
Scase *cas, *dfl;
G *gp;
byte *as;
- if(gcwaiting)
- gosched();
+ if(runtime·gcwaiting)
+ runtime·gosched();
if(debug)
- printf("select: sel=%p\n", sel);
+ runtime·printf("select: sel=%p\n", sel);
if(sel->ncase < 2) {
if(sel->ncase < 1) {
g->status = Gwaiting; // forever
- gosched();
+ runtime·gosched();
}
// TODO: make special case of one.
}
if(gcd(p, sel->ncase) == 1)
break;
if(i > 1000)
- throw("select: failed to select prime");
+ runtime·throw("select: failed to select prime");
}
// select an initial offset
case 0: // recv
if(c->dataqsiz > 0) {
if(c->qcount > 0)
- throw("select: pass 2 async recv");
+ runtime·throw("select: pass 2 async recv");
} else {
if(dequeue(&c->sendq, c))
- throw("select: pass 2 sync recv");
+ runtime·throw("select: pass 2 sync recv");
}
enqueue(&c->recvq, sg);
break;
case 1: // send
if(c->dataqsiz > 0) {
if(c->qcount < c->dataqsiz)
- throw("select: pass 2 async send");
+ runtime·throw("select: pass 2 async send");
} else {
if(dequeue(&c->recvq, c))
- throw("select: pass 2 sync send");
+ runtime·throw("select: pass 2 sync send");
c->elemalg->copy(c->elemsize, sg->elem, cas->u.elem);
}
enqueue(&c->sendq, sg);
g->param = nil;
g->status = Gwaiting;
selunlock(sel);
- gosched();
+ runtime·gosched();
sellock(sel);
sg = g->param;
}
if(debug)
- printf("wait-return: sel=%p c=%p cas=%p send=%d o=%d\n",
+ runtime·printf("wait-return: sel=%p c=%p cas=%p send=%d o=%d\n",
sel, c, cas, cas->send, o);
if(!cas->send) {
if(sg != nil) {
gp = sg->g;
freesg(c, sg);
- ready(gp);
+ runtime·ready(gp);
}
goto retc;
if(sg != nil) {
gp = sg->g;
freesg(c, sg);
- ready(gp);
+ runtime·ready(gp);
}
goto retc;
syncrecv:
// can receive from sleeping sender (sg)
if(debug)
- printf("syncrecv: sel=%p c=%p o=%d\n", sel, c, o);
+ runtime·printf("syncrecv: sel=%p c=%p o=%d\n", sel, c, o);
if(cas->u.elemp != nil)
c->elemalg->copy(c->elemsize, cas->u.elemp, sg->elem);
c->elemalg->copy(c->elemsize, sg->elem, nil);
gp = sg->g;
gp->param = sg;
- ready(gp);
+ runtime·ready(gp);
goto retc;
rclose:
syncsend:
// can send to sleeping receiver (sg)
if(debug)
- printf("syncsend: sel=%p c=%p o=%d\n", sel, c, o);
+ runtime·printf("syncsend: sel=%p c=%p o=%d\n", sel, c, o);
if(c->closed & Wclosed)
goto sclose;
c->elemalg->copy(c->elemsize, sg->elem, cas->u.elem);
gp = sg->g;
gp->param = sg;
- ready(gp);
+ runtime·ready(gp);
goto retc;
sclose:
selunlock(sel);
// return to pc corresponding to chosen case
- ·setcallerpc(&sel, cas->pc);
+ runtime·setcallerpc(&sel, cas->pc);
as = (byte*)&sel + cas->so;
freesel(sel);
*as = true;
// closechan(sel *byte);
void
-·closechan(Hchan *c)
+runtime·closechan(Hchan *c)
{
SudoG *sg;
G* gp;
- if(gcwaiting)
- gosched();
+ if(runtime·gcwaiting)
+ runtime·gosched();
- lock(c);
+ runtime·lock(c);
incerr(c);
c->closed |= Wclosed;
gp = sg->g;
gp->param = nil;
freesg(c, sg);
- ready(gp);
+ runtime·ready(gp);
}
// release all writers
gp = sg->g;
gp->param = nil;
freesg(c, sg);
- ready(gp);
+ runtime·ready(gp);
}
- unlock(c);
+ runtime·unlock(c);
}
void
-chanclose(Hchan *c)
+runtime·chanclose(Hchan *c)
{
- ·closechan(c);
+ runtime·closechan(c);
}
bool
-chanclosed(Hchan *c)
+runtime·chanclosed(Hchan *c)
{
return (c->closed & Rclosed) != 0;
}
int32
-chanlen(Hchan *c)
+runtime·chanlen(Hchan *c)
{
return c->qcount;
}
int32
-chancap(Hchan *c)
+runtime·chancap(Hchan *c)
{
return c->dataqsiz;
}
// closedchan(sel *byte) bool;
void
-·closedchan(Hchan *c, bool closed)
+runtime·closedchan(Hchan *c, bool closed)
{
- closed = chanclosed(c);
+ closed = runtime·chanclosed(c);
FLUSH(&closed);
}
q->first = sgp->link;
// if sgp is stale, ignore it
- if(!cas(&sgp->g->selgen, sgp->selgen, sgp->selgen + 1)) {
+ if(!runtime·cas(&sgp->g->selgen, sgp->selgen, sgp->selgen + 1)) {
//prints("INVALID PSEUDOG POINTER\n");
freesg(c, sgp);
goto loop;
if(sg != nil) {
c->free = sg->link;
} else
- sg = mal(sizeof(*sg) + c->elemsize - sizeof(sg->elem));
+ sg = runtime·mal(sizeof(*sg) + c->elemsize - sizeof(sg->elem));
sg->selgen = g->selgen;
sg->g = g;
sg->offset = 0;
{
if(sg != nil) {
if(sg->isfree)
- throw("chan.freesg: already free");
+ runtime·throw("chan.freesg: already free");
sg->isfree = 1;
sg->link = c->free;
c->free = sg;
typedef struct Complex128 Complex128;
void
-·complex128div(Complex128 n, Complex128 d, Complex128 q)
+runtime·complex128div(Complex128 n, Complex128 d, Complex128 q)
{
int32 ninf, dinf, nnan, dnan;
float64 a, b, ratio, denom;
// Special cases as in C99.
- ninf = isInf(n.real, 0) || isInf(n.imag, 0);
- dinf = isInf(d.real, 0) || isInf(d.imag, 0);
+ ninf = runtime·isInf(n.real, 0) || runtime·isInf(n.imag, 0);
+ dinf = runtime·isInf(d.real, 0) || runtime·isInf(d.imag, 0);
- nnan = !ninf && (isNaN(n.real) || isNaN(n.imag));
- dnan = !dinf && (isNaN(d.real) || isNaN(d.imag));
+ nnan = !ninf && (runtime·isNaN(n.real) || runtime·isNaN(n.imag));
+ dnan = !dinf && (runtime·isNaN(d.real) || runtime·isNaN(d.imag));
if(nnan || dnan) {
- q.real = NaN();
- q.imag = NaN();
+ q.real = runtime·NaN();
+ q.imag = runtime·NaN();
} else if(ninf && !dinf && !dnan) {
- q.real = Inf(0);
- q.imag = Inf(0);
+ q.real = runtime·Inf(0);
+ q.imag = runtime·Inf(0);
} else if(!ninf && !nnan && dinf) {
q.real = 0;
q.imag = 0;
} else if(d.real == 0 && d.imag == 0) {
if(n.real == 0 && n.imag == 0) {
- q.real = NaN();
- q.imag = NaN();
+ q.real = runtime·NaN();
+ q.imag = runtime·NaN();
} else {
- q.real = Inf(0);
- q.imag = Inf(0);
+ q.real = runtime·Inf(0);
+ q.imag = runtime·Inf(0);
}
} else {
// Standard complex arithmetic, factored to avoid unnecessary overflow.
// Darwin and Linux use the same linkage to main
-TEXT _rt0_386_darwin(SB),7,$0
+TEXT _rt0_386_darwin(SB),7,$0
JMP _rt0_386(SB)
#include "signals.h"
void
-dumpregs(Regs *r)
+runtime·dumpregs(Regs *r)
{
- printf("eax %x\n", r->eax);
- printf("ebx %x\n", r->ebx);
- printf("ecx %x\n", r->ecx);
- printf("edx %x\n", r->edx);
- printf("edi %x\n", r->edi);
- printf("esi %x\n", r->esi);
- printf("ebp %x\n", r->ebp);
- printf("esp %x\n", r->esp);
- printf("eip %x\n", r->eip);
- printf("eflags %x\n", r->eflags);
- printf("cs %x\n", r->cs);
- printf("fs %x\n", r->fs);
- printf("gs %x\n", r->gs);
+ runtime·printf("eax %x\n", r->eax);
+ runtime·printf("ebx %x\n", r->ebx);
+ runtime·printf("ecx %x\n", r->ecx);
+ runtime·printf("edx %x\n", r->edx);
+ runtime·printf("edi %x\n", r->edi);
+ runtime·printf("esi %x\n", r->esi);
+ runtime·printf("ebp %x\n", r->ebp);
+ runtime·printf("esp %x\n", r->esp);
+ runtime·printf("eip %x\n", r->eip);
+ runtime·printf("eflags %x\n", r->eflags);
+ runtime·printf("cs %x\n", r->cs);
+ runtime·printf("fs %x\n", r->fs);
+ runtime·printf("gs %x\n", r->gs);
}
String
-signame(int32 sig)
+runtime·signame(int32 sig)
{
if(sig < 0 || sig >= NSIG)
- return emptystring;
- return gostringnocopy((byte*)sigtab[sig].name);
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
}
void
-sighandler(int32 sig, Siginfo *info, void *context)
+runtime·sighandler(int32 sig, Siginfo *info, void *context)
{
Ucontext *uc;
Mcontext *mc;
Regs *r;
uintptr *sp;
- void (*fn)(void);
G *gp;
byte *pc;
mc = uc->uc_mcontext;
r = &mc->ss;
- if((gp = m->curg) != nil && (sigtab[sig].flags & SigPanic)) {
+ if((gp = m->curg) != nil && (runtime·sigtab[sig].flags & SigPanic)) {
// Work around Leopard bug that doesn't set FPE_INTDIV.
// Look at instruction to see if it is a divide.
// Not necessary in Snow Leopard (si_code will be != 0).
gp->sigcode0 = info->si_code;
gp->sigcode1 = (uintptr)info->si_addr;
- // Only push sigpanic if r->eip != 0.
+ // Only push runtime·sigpanic if r->eip != 0.
// If r->eip == 0, probably panicked because of a
// call to a nil func. Not pushing that onto sp will
- // make the trace look like a call to sigpanic instead.
- // (Otherwise the trace will end at sigpanic and we
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
// won't get to see who faulted.)
if(r->eip != 0) {
sp = (uintptr*)r->esp;
*--sp = r->eip;
r->esp = (uintptr)sp;
}
- r->eip = (uintptr)sigpanic;
+ r->eip = (uintptr)runtime·sigpanic;
return;
}
- if(sigtab[sig].flags & SigQueue) {
- if(sigsend(sig) || (sigtab[sig].flags & SigIgnore))
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
return;
- exit(2); // SIGINT, SIGTERM, etc
+ runtime·exit(2); // SIGINT, SIGTERM, etc
}
- if(panicking) // traceback already printed
- exit(2);
- panicking = 1;
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
if(sig < 0 || sig >= NSIG){
- printf("Signal %d\n", sig);
+ runtime·printf("Signal %d\n", sig);
}else{
- printf("%s\n", sigtab[sig].name);
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
}
- printf("pc: %x\n", r->eip);
- printf("\n");
+ runtime·printf("pc: %x\n", r->eip);
+ runtime·printf("\n");
- if(gotraceback()){
- traceback((void*)r->eip, (void*)r->esp, 0, m->curg);
- tracebackothers(m->curg);
- dumpregs(r);
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->eip, (void*)r->esp, 0, m->curg);
+ runtime·tracebackothers(m->curg);
+ runtime·dumpregs(r);
}
- breakpoint();
- exit(2);
+ runtime·breakpoint();
+ runtime·exit(2);
}
void
-sigignore(int32, Siginfo*, void*)
+runtime·sigignore(int32, Siginfo*, void*)
{
}
void
-signalstack(byte *p, int32 n)
+runtime·signalstack(byte *p, int32 n)
{
StackT st;
st.ss_sp = p;
st.ss_size = n;
st.ss_flags = 0;
- sigaltstack(&st, nil);
+ runtime·sigaltstack(&st, nil);
}
void
-initsig(int32 queue)
+runtime·initsig(int32 queue)
{
int32 i;
static Sigaction sa;
- siginit();
+ runtime·siginit();
sa.sa_flags |= SA_SIGINFO|SA_ONSTACK;
sa.sa_mask = 0xFFFFFFFFU;
- sa.sa_tramp = sigtramp; // sigtramp's job is to call into real handler
+ sa.sa_tramp = runtime·sigtramp; // runtime·sigtramp's job is to call into real handler
for(i = 0; i<NSIG; i++) {
- if(sigtab[i].flags) {
- if((sigtab[i].flags & SigQueue) != queue)
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
continue;
- if(sigtab[i].flags & (SigCatch | SigQueue)) {
- sa.__sigaction_u.__sa_sigaction = sighandler;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue)) {
+ sa.__sigaction_u.__sa_sigaction = runtime·sighandler;
} else {
- sa.__sigaction_u.__sa_sigaction = sigignore;
+ sa.__sigaction_u.__sa_sigaction = runtime·sigignore;
}
- if(sigtab[i].flags & SigRestart)
+ if(runtime·sigtab[i].flags & SigRestart)
sa.sa_flags |= SA_RESTART;
else
sa.sa_flags &= ~SA_RESTART;
- sigaction(i, &sa, nil);
+ runtime·sigaction(i, &sa, nil);
}
}
}
#include "386/asm.h"
-TEXT notok(SB),7,$0
+TEXT runtime·notok(SB),7,$0
MOVL $0xf1, 0xf1
RET
// Exit the entire program (like C exit)
-TEXT exit(SB),7,$0
+TEXT runtime·exit(SB),7,$0
MOVL $1, AX
INT $0x80
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// Exit this OS thread (like pthread_exit, which eventually
// calls __bsdthread_terminate).
-TEXT exit1(SB),7,$0
+TEXT runtime·exit1(SB),7,$0
MOVL $361, AX
INT $0x80
JAE 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT write(SB),7,$0
+TEXT runtime·write(SB),7,$0
MOVL $4, AX
INT $0x80
JAE 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT ·mmap(SB),7,$0
+TEXT runtime·mmap(SB),7,$0
MOVL $197, AX
INT $0x80
RET
-TEXT ·munmap(SB),7,$0
+TEXT runtime·munmap(SB),7,$0
MOVL $73, AX
INT $0x80
JAE 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// void gettime(int64 *sec, int32 *usec)
-TEXT gettime(SB), 7, $32
+TEXT runtime·gettime(SB), 7, $32
LEAL 12(SP), AX // must be non-nil, unused
MOVL AX, 4(SP)
MOVL $0, 8(SP) // time zone pointer
MOVL DX, (DI)
RET
-TEXT sigaction(SB),7,$0
+TEXT runtime·sigaction(SB),7,$0
MOVL $46, AX
INT $0x80
JAE 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// Sigtramp's job is to call the actual signal handler.
// 12(FP) signal number
// 16(FP) siginfo
// 20(FP) context
-TEXT sigtramp(SB),7,$40
+TEXT runtime·sigtramp(SB),7,$40
get_tls(CX)
// save g
MOVL BX, 8(SP)
MOVL $184, AX // sigreturn(ucontext, infostyle)
INT $0x80
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT sigaltstack(SB),7,$0
+TEXT runtime·sigaltstack(SB),7,$0
MOVL $53, AX
INT $0x80
JAE 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// void bsdthread_create(void *stk, M *m, G *g, void (*fn)(void))
// System call args are: func arg stack pthread flags.
-TEXT bsdthread_create(SB),7,$32
+TEXT runtime·bsdthread_create(SB),7,$32
MOVL $360, AX
// 0(SP) is where the caller PC would be; kernel skips it
MOVL func+12(FP), BX
// DI = stack top
// SI = flags (= 0x1000000)
// SP = stack - C_32_STK_ALIGN
-TEXT bsdthread_start(SB),7,$0
+TEXT runtime·bsdthread_start(SB),7,$0
// set up ldt 7+id to point at m->tls.
// m->tls is at m+40. newosproc left
// the m->id in tls[0].
PUSHL $32 // sizeof tls
PUSHL BP // &tls
PUSHL DI // tls #
- CALL setldt(SB)
+ CALL runtime·setldt(SB)
POPL AX
POPL AX
POPL AX
MOVL AX, g(BP)
MOVL DX, m(BP)
MOVL BX, m_procid(DX) // m->procid = thread port (for debuggers)
- CALL stackcheck(SB) // smashes AX
+ CALL runtime·stackcheck(SB) // smashes AX
CALL CX // fn()
- CALL exit1(SB)
+ CALL runtime·exit1(SB)
RET
// void bsdthread_register(void)
// registers callbacks for threadstart (see bsdthread_create above
// and wqthread and pthsize (not used). returns 0 on success.
-TEXT bsdthread_register(SB),7,$40
+TEXT runtime·bsdthread_register(SB),7,$40
MOVL $366, AX
// 0(SP) is where kernel expects caller PC; ignored
- MOVL $bsdthread_start(SB), 4(SP) // threadstart
+ MOVL $runtime·bsdthread_start(SB), 4(SP) // threadstart
MOVL $0, 8(SP) // wqthread, not used by us
MOVL $0, 12(SP) // pthsize, not used by us
MOVL $0, 16(SP) // dummy_value [sic]
MOVL $0, 24(SP) // dispatchqueue_offset
INT $0x80
JAE 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// Invoke Mach system call.
// in the high 16 bits that seems to be the
// argument count in bytes but is not always.
// INT $0x80 works fine for those.
-TEXT sysenter(SB),7,$0
+TEXT runtime·sysenter(SB),7,$0
POPL DX
MOVL SP, CX
BYTE $0x0F; BYTE $0x34; // SYSENTER
// returns to DX with SP set to CX
-TEXT mach_msg_trap(SB),7,$0
+TEXT runtime·mach_msg_trap(SB),7,$0
MOVL $-31, AX
- CALL sysenter(SB)
+ CALL runtime·sysenter(SB)
RET
-TEXT mach_reply_port(SB),7,$0
+TEXT runtime·mach_reply_port(SB),7,$0
MOVL $-26, AX
- CALL sysenter(SB)
+ CALL runtime·sysenter(SB)
RET
-TEXT mach_task_self(SB),7,$0
+TEXT runtime·mach_task_self(SB),7,$0
MOVL $-28, AX
- CALL sysenter(SB)
+ CALL runtime·sysenter(SB)
RET
// Mach provides trap versions of the semaphore ops,
// instead of requiring the use of RPC.
// uint32 mach_semaphore_wait(uint32)
-TEXT mach_semaphore_wait(SB),7,$0
+TEXT runtime·mach_semaphore_wait(SB),7,$0
MOVL $-36, AX
- CALL sysenter(SB)
+ CALL runtime·sysenter(SB)
RET
// uint32 mach_semaphore_timedwait(uint32, uint32, uint32)
-TEXT mach_semaphore_timedwait(SB),7,$0
+TEXT runtime·mach_semaphore_timedwait(SB),7,$0
MOVL $-38, AX
- CALL sysenter(SB)
+ CALL runtime·sysenter(SB)
RET
// uint32 mach_semaphore_signal(uint32)
-TEXT mach_semaphore_signal(SB),7,$0
+TEXT runtime·mach_semaphore_signal(SB),7,$0
MOVL $-33, AX
- CALL sysenter(SB)
+ CALL runtime·sysenter(SB)
RET
// uint32 mach_semaphore_signal_all(uint32)
-TEXT mach_semaphore_signal_all(SB),7,$0
+TEXT runtime·mach_semaphore_signal_all(SB),7,$0
MOVL $-34, AX
- CALL sysenter(SB)
+ CALL runtime·sysenter(SB)
RET
// setldt(int entry, int address, int limit)
// entry and limit are ignored.
-TEXT setldt(SB),7,$32
+TEXT runtime·setldt(SB),7,$32
MOVL address+4(FP), BX // aka base
/*
// Darwin and Linux use the same linkage to main
-TEXT _rt0_amd64_darwin(SB),7,$-8
+TEXT _rt0_amd64_darwin(SB),7,$-8
MOVQ $_rt0_amd64(SB), AX
MOVQ SP, DI
JMP AX
#include "signals.h"
void
-dumpregs(Regs *r)
+runtime·dumpregs(Regs *r)
{
- printf("rax %X\n", r->rax);
- printf("rbx %X\n", r->rbx);
- printf("rcx %X\n", r->rcx);
- printf("rdx %X\n", r->rdx);
- printf("rdi %X\n", r->rdi);
- printf("rsi %X\n", r->rsi);
- printf("rbp %X\n", r->rbp);
- printf("rsp %X\n", r->rsp);
- printf("r8 %X\n", r->r8 );
- printf("r9 %X\n", r->r9 );
- printf("r10 %X\n", r->r10);
- printf("r11 %X\n", r->r11);
- printf("r12 %X\n", r->r12);
- printf("r13 %X\n", r->r13);
- printf("r14 %X\n", r->r14);
- printf("r15 %X\n", r->r15);
- printf("rip %X\n", r->rip);
- printf("rflags %X\n", r->rflags);
- printf("cs %X\n", r->cs);
- printf("fs %X\n", r->fs);
- printf("gs %X\n", r->gs);
+ runtime·printf("rax %X\n", r->rax);
+ runtime·printf("rbx %X\n", r->rbx);
+ runtime·printf("rcx %X\n", r->rcx);
+ runtime·printf("rdx %X\n", r->rdx);
+ runtime·printf("rdi %X\n", r->rdi);
+ runtime·printf("rsi %X\n", r->rsi);
+ runtime·printf("rbp %X\n", r->rbp);
+ runtime·printf("rsp %X\n", r->rsp);
+ runtime·printf("r8 %X\n", r->r8 );
+ runtime·printf("r9 %X\n", r->r9 );
+ runtime·printf("r10 %X\n", r->r10);
+ runtime·printf("r11 %X\n", r->r11);
+ runtime·printf("r12 %X\n", r->r12);
+ runtime·printf("r13 %X\n", r->r13);
+ runtime·printf("r14 %X\n", r->r14);
+ runtime·printf("r15 %X\n", r->r15);
+ runtime·printf("rip %X\n", r->rip);
+ runtime·printf("rflags %X\n", r->rflags);
+ runtime·printf("cs %X\n", r->cs);
+ runtime·printf("fs %X\n", r->fs);
+ runtime·printf("gs %X\n", r->gs);
}
String
-signame(int32 sig)
+runtime·signame(int32 sig)
{
if(sig < 0 || sig >= NSIG)
- return emptystring;
- return gostringnocopy((byte*)sigtab[sig].name);
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
}
void
-sighandler(int32 sig, Siginfo *info, void *context)
+runtime·sighandler(int32 sig, Siginfo *info, void *context)
{
Ucontext *uc;
Mcontext *mc;
mc = uc->uc_mcontext;
r = &mc->ss;
- if((gp = m->curg) != nil && (sigtab[sig].flags & SigPanic)) {
+ if((gp = m->curg) != nil && (runtime·sigtab[sig].flags & SigPanic)) {
// Work around Leopard bug that doesn't set FPE_INTDIV.
// Look at instruction to see if it is a divide.
// Not necessary in Snow Leopard (si_code will be != 0).
gp->sigcode0 = info->si_code;
gp->sigcode1 = (uintptr)info->si_addr;
- // Only push sigpanic if r->rip != 0.
+ // Only push runtime·sigpanic if r->rip != 0.
// If r->rip == 0, probably panicked because of a
// call to a nil func. Not pushing that onto sp will
- // make the trace look like a call to sigpanic instead.
- // (Otherwise the trace will end at sigpanic and we
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
// won't get to see who faulted.)
if(r->rip != 0) {
sp = (uintptr*)r->rsp;
*--sp = r->rip;
r->rsp = (uintptr)sp;
}
- r->rip = (uintptr)sigpanic;
+ r->rip = (uintptr)runtime·sigpanic;
return;
}
- if(sigtab[sig].flags & SigQueue) {
- if(sigsend(sig) || (sigtab[sig].flags & SigIgnore))
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
return;
- exit(2); // SIGINT, SIGTERM, etc
+ runtime·exit(2); // SIGINT, SIGTERM, etc
}
- if(panicking) // traceback already printed
- exit(2);
- panicking = 1;
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
if(sig < 0 || sig >= NSIG){
- printf("Signal %d\n", sig);
+ runtime·printf("Signal %d\n", sig);
}else{
- printf("%s\n", sigtab[sig].name);
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
}
- printf("pc: %X\n", r->rip);
- printf("\n");
+ runtime·printf("pc: %X\n", r->rip);
+ runtime·printf("\n");
- if(gotraceback()){
- traceback((void*)r->rip, (void*)r->rsp, 0, g);
- tracebackothers(g);
- dumpregs(r);
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->rip, (void*)r->rsp, 0, g);
+ runtime·tracebackothers(g);
+ runtime·dumpregs(r);
}
- breakpoint();
- exit(2);
+ runtime·breakpoint();
+ runtime·exit(2);
}
void
-sigignore(int32, Siginfo*, void*)
+runtime·sigignore(int32, Siginfo*, void*)
{
}
void
-signalstack(byte *p, int32 n)
+runtime·signalstack(byte *p, int32 n)
{
StackT st;
st.ss_sp = p;
st.ss_size = n;
st.ss_flags = 0;
- sigaltstack(&st, nil);
+ runtime·sigaltstack(&st, nil);
}
void
-initsig(int32 queue)
+runtime·initsig(int32 queue)
{
int32 i;
static Sigaction sa;
- siginit();
+ runtime·siginit();
sa.sa_flags |= SA_SIGINFO|SA_ONSTACK;
sa.sa_mask = 0xFFFFFFFFU;
- sa.sa_tramp = sigtramp; // sigtramp's job is to call into real handler
+ sa.sa_tramp = runtime·sigtramp; // runtime·sigtramp's job is to call into real handler
for(i = 0; i<NSIG; i++) {
- if(sigtab[i].flags) {
- if((sigtab[i].flags & SigQueue) != queue)
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
continue;
- if(sigtab[i].flags & (SigCatch | SigQueue)) {
- sa.__sigaction_u.__sa_sigaction = sighandler;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue)) {
+ sa.__sigaction_u.__sa_sigaction = runtime·sighandler;
} else {
- sa.__sigaction_u.__sa_sigaction = sigignore;
+ sa.__sigaction_u.__sa_sigaction = runtime·sigignore;
}
- if(sigtab[i].flags & SigRestart)
+ if(runtime·sigtab[i].flags & SigRestart)
sa.sa_flags |= SA_RESTART;
else
sa.sa_flags &= ~SA_RESTART;
- sigaction(i, &sa, nil);
+ runtime·sigaction(i, &sa, nil);
}
}
}
#include "amd64/asm.h"
// Exit the entire program (like C exit)
-TEXT exit(SB),7,$0
+TEXT runtime·exit(SB),7,$0
MOVL 8(SP), DI // arg 1 exit status
MOVL $(0x2000000+1), AX // syscall entry
SYSCALL
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// Exit this OS thread (like pthread_exit, which eventually
// calls __bsdthread_terminate).
-TEXT exit1(SB),7,$0
+TEXT runtime·exit1(SB),7,$0
MOVL 8(SP), DI // arg 1 exit status
MOVL $(0x2000000+361), AX // syscall entry
SYSCALL
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT write(SB),7,$0
+TEXT runtime·write(SB),7,$0
MOVL 8(SP), DI // arg 1 fd
MOVQ 16(SP), SI // arg 2 buf
MOVL 24(SP), DX // arg 3 count
MOVL $(0x2000000+4), AX // syscall entry
SYSCALL
JCC 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// void gettime(int64 *sec, int32 *usec)
-TEXT gettime(SB), 7, $32
+TEXT runtime·gettime(SB), 7, $32
MOVQ SP, DI // must be non-nil, unused
MOVQ $0, SI
MOVQ $(0x2000000+116), AX
MOVL DX, (DI)
RET
-TEXT sigaction(SB),7,$0
+TEXT runtime·sigaction(SB),7,$0
MOVL 8(SP), DI // arg 1 sig
MOVQ 16(SP), SI // arg 2 act
MOVQ 24(SP), DX // arg 3 oact
MOVL $(0x2000000+46), AX // syscall entry
SYSCALL
JCC 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT sigtramp(SB),7,$64
+TEXT runtime·sigtramp(SB),7,$64
get_tls(BX)
// save g
SYSCALL
INT $3 // not reached
-TEXT ·mmap(SB),7,$0
+TEXT runtime·mmap(SB),7,$0
MOVQ 8(SP), DI // arg 1 addr
MOVQ 16(SP), SI // arg 2 len
MOVL 24(SP), DX // arg 3 prot
SYSCALL
RET
-TEXT ·munmap(SB),7,$0
+TEXT runtime·munmap(SB),7,$0
MOVQ 8(SP), DI // arg 1 addr
MOVQ 16(SP), SI // arg 2 len
MOVL $(0x2000000+73), AX // syscall entry
SYSCALL
JCC 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT notok(SB),7,$0
+TEXT runtime·notok(SB),7,$0
MOVL $0xf1, BP
MOVQ BP, (BP)
RET
-TEXT sigaltstack(SB),7,$0
+TEXT runtime·sigaltstack(SB),7,$0
MOVQ new+8(SP), DI
MOVQ old+16(SP), SI
MOVQ $(0x2000000+53), AX
SYSCALL
JCC 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// void bsdthread_create(void *stk, M *m, G *g, void (*fn)(void))
-TEXT bsdthread_create(SB),7,$0
+TEXT runtime·bsdthread_create(SB),7,$0
// Set up arguments to bsdthread_create system call.
// The ones in quotes pass through to the thread callback
// uninterpreted, so we can put whatever we want there.
// R8 = stack
// R9 = flags (= 0)
// SP = stack - C_64_REDZONE_LEN (= stack - 128)
-TEXT bsdthread_start(SB),7,$0
+TEXT runtime·bsdthread_start(SB),7,$0
MOVQ R8, SP // empirically, SP is very wrong but R8 is right
PUSHQ DX
// set up thread local storage pointing at m->tls.
LEAQ m_tls(CX), DI
- CALL settls(SB)
+ CALL runtime·settls(SB)
POPQ SI
POPQ CX
MOVQ SI, m_procid(CX) // thread port is m->procid
MOVQ m_g0(CX), AX
MOVQ AX, g(BX)
- CALL stackcheck(SB) // smashes AX, CX
+ CALL runtime·stackcheck(SB) // smashes AX, CX
CALL DX // fn
- CALL exit1(SB)
+ CALL runtime·exit1(SB)
RET
// void bsdthread_register(void)
// registers callbacks for threadstart (see bsdthread_create above
// and wqthread and pthsize (not used). returns 0 on success.
-TEXT bsdthread_register(SB),7,$0
- MOVQ $bsdthread_start(SB), DI // threadstart
+TEXT runtime·bsdthread_register(SB),7,$0
+ MOVQ $runtime·bsdthread_start(SB), DI // threadstart
MOVQ $0, SI // wqthread, not used by us
MOVQ $0, DX // pthsize, not used by us
MOVQ $0, R10 // dummy_value [sic]
MOVQ $(0x2000000+366), AX // bsdthread_register
SYSCALL
JCC 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// Mach system calls use 0x1000000 instead of the BSD's 0x2000000.
// uint32 mach_msg_trap(void*, uint32, uint32, uint32, uint32, uint32, uint32)
-TEXT mach_msg_trap(SB),7,$0
+TEXT runtime·mach_msg_trap(SB),7,$0
MOVQ 8(SP), DI
MOVL 16(SP), SI
MOVL 20(SP), DX
POPQ R11
RET
-TEXT mach_task_self(SB),7,$0
+TEXT runtime·mach_task_self(SB),7,$0
MOVL $(0x1000000+28), AX // task_self_trap
SYSCALL
RET
-TEXT mach_thread_self(SB),7,$0
+TEXT runtime·mach_thread_self(SB),7,$0
MOVL $(0x1000000+27), AX // thread_self_trap
SYSCALL
RET
-TEXT mach_reply_port(SB),7,$0
+TEXT runtime·mach_reply_port(SB),7,$0
MOVL $(0x1000000+26), AX // mach_reply_port
SYSCALL
RET
// instead of requiring the use of RPC.
// uint32 mach_semaphore_wait(uint32)
-TEXT mach_semaphore_wait(SB),7,$0
+TEXT runtime·mach_semaphore_wait(SB),7,$0
MOVL 8(SP), DI
MOVL $(0x1000000+36), AX // semaphore_wait_trap
SYSCALL
RET
// uint32 mach_semaphore_timedwait(uint32, uint32, uint32)
-TEXT mach_semaphore_timedwait(SB),7,$0
+TEXT runtime·mach_semaphore_timedwait(SB),7,$0
MOVL 8(SP), DI
MOVL 12(SP), SI
MOVL 16(SP), DX
RET
// uint32 mach_semaphore_signal(uint32)
-TEXT mach_semaphore_signal(SB),7,$0
+TEXT runtime·mach_semaphore_signal(SB),7,$0
MOVL 8(SP), DI
MOVL $(0x1000000+33), AX // semaphore_signal_trap
SYSCALL
RET
// uint32 mach_semaphore_signal_all(uint32)
-TEXT mach_semaphore_signal_all(SB),7,$0
+TEXT runtime·mach_semaphore_signal_all(SB),7,$0
MOVL 8(SP), DI
MOVL $(0x1000000+34), AX // semaphore_signal_all_trap
SYSCALL
RET
// set tls base to DI
-TEXT settls(SB),7,$32
+TEXT runtime·settls(SB),7,$32
/*
* Same as in ../386/sys.s:/ugliness, different constant.
* See ../../../../libcgo/darwin_amd64.c for the derivation
#include "malloc.h"
void*
-SysAlloc(uintptr n)
+runtime·SysAlloc(uintptr n)
{
void *v;
mstats.sys += n;
- v = runtime_mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
if(v < (void*)4096) {
- printf("mmap: errno=%p\n", v);
- throw("mmap");
+ runtime·printf("mmap: errno=%p\n", v);
+ runtime·throw("mmap");
}
return v;
}
void
-SysUnused(void *v, uintptr n)
+runtime·SysUnused(void *v, uintptr n)
{
USED(v);
USED(n);
}
void
-SysFree(void *v, uintptr n)
+runtime·SysFree(void *v, uintptr n)
{
mstats.sys -= n;
- runtime_munmap(v, n);
+ runtime·munmap(v, n);
}
void
-SysMemInit(void)
+runtime·SysMemInit(void)
{
// Code generators assume that references to addresses
// on the first page will fault. Map the page explicitly with
// allocating that page as the virtual address space fills.
// Ignore any error, since other systems might be smart
// enough to never allow anything there.
-// runtime_mmap(nil, 4096, PROT_NONE, MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0);
+// mmap(nil, 4096, PROT_NONE, MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0);
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-int32 bsdthread_create(void*, M*, G*, void(*)(void));
-void bsdthread_register(void);
-int32 mach_msg_trap(MachHeader*, int32, uint32, uint32, uint32, uint32, uint32);
-uint32 mach_reply_port(void);
-void mach_semacquire(uint32);
-uint32 mach_semcreate(void);
-void mach_semdestroy(uint32);
-void mach_semrelease(uint32);
-void mach_semreset(uint32);
-uint32 mach_task_self(void);
-uint32 mach_task_self(void);
-uint32 mach_thread_self(void);
-uint32 mach_thread_self(void);
+int32 runtime·bsdthread_create(void*, M*, G*, void(*)(void));
+void runtime·bsdthread_register(void);
+int32 runtime·mach_msg_trap(MachHeader*, int32, uint32, uint32, uint32, uint32, uint32);
+uint32 runtime·mach_reply_port(void);
+void runtime·mach_semacquire(uint32);
+uint32 runtime·mach_semcreate(void);
+void runtime·mach_semdestroy(uint32);
+void runtime·mach_semrelease(uint32);
+void runtime·mach_semreset(uint32);
+uint32 runtime·mach_task_self(void);
+uint32 runtime·mach_task_self(void);
+uint32 runtime·mach_thread_self(void);
+uint32 runtime·mach_thread_self(void);
struct Sigaction;
-void sigaction(uintptr, struct Sigaction*, struct Sigaction*);
+void runtime·sigaction(uintptr, struct Sigaction*, struct Sigaction*);
struct StackT;
-void sigaltstack(struct StackT*, struct StackT*);
-void sigtramp(void);
-void sigpanic(void);
+void runtime·sigaltstack(struct StackT*, struct StackT*);
+void runtime·sigtramp(void);
+void runtime·sigpanic(void);
#define Q SigQueue
#define P SigPanic
-SigTab sigtab[] = {
+SigTab runtime·sigtab[] = {
/* 0 */ 0, "SIGNONE: no trap",
/* 1 */ Q+R, "SIGHUP: terminal line hangup",
/* 2 */ Q+R, "SIGINT: interrupt",
#include "defs.h"
#include "os.h"
-extern SigTab sigtab[];
+extern SigTab runtime·sigtab[];
static void
unimplemented(int8 *name)
{
- prints(name);
- prints(" not implemented\n");
+ runtime·prints(name);
+ runtime·prints(" not implemented\n");
*(int32*)1231 = 1231;
}
if(*psema != 0) // already have one
return;
- sema = mach_semcreate();
- if(!cas(psema, 0, sema)){
+ sema = runtime·mach_semcreate();
+ if(!runtime·cas(psema, 0, sema)){
// Someone else filled it in. Use theirs.
- mach_semdestroy(sema);
+ runtime·mach_semdestroy(sema);
return;
}
}
// in Plan 9's user-level locks.
void
-lock(Lock *l)
+runtime·lock(Lock *l)
{
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
m->locks++;
- if(xadd(&l->key, 1) > 1) { // someone else has it; wait
+ if(runtime·xadd(&l->key, 1) > 1) { // someone else has it; wait
// Allocate semaphore if needed.
if(l->sema == 0)
initsema(&l->sema);
- mach_semacquire(l->sema);
+ runtime·mach_semacquire(l->sema);
}
}
void
-unlock(Lock *l)
+runtime·unlock(Lock *l)
{
m->locks--;
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
- if(xadd(&l->key, -1) > 0) { // someone else is waiting
+ if(runtime·xadd(&l->key, -1) > 0) { // someone else is waiting
// Allocate semaphore if needed.
if(l->sema == 0)
initsema(&l->sema);
- mach_semrelease(l->sema);
+ runtime·mach_semrelease(l->sema);
}
}
void
-destroylock(Lock *l)
+runtime·destroylock(Lock *l)
{
if(l->sema != 0) {
- mach_semdestroy(l->sema);
+ runtime·mach_semdestroy(l->sema);
l->sema = 0;
}
}
// but when it's time to block, fall back on the kernel semaphore k.
// This is the same algorithm used in Plan 9.
void
-usemacquire(Usema *s)
+runtime·usemacquire(Usema *s)
{
- if((int32)xadd(&s->u, -1) < 0) {
+ if((int32)runtime·xadd(&s->u, -1) < 0) {
if(s->k == 0)
initsema(&s->k);
- mach_semacquire(s->k);
+ runtime·mach_semacquire(s->k);
}
}
void
-usemrelease(Usema *s)
+runtime·usemrelease(Usema *s)
{
- if((int32)xadd(&s->u, 1) <= 0) {
+ if((int32)runtime·xadd(&s->u, 1) <= 0) {
if(s->k == 0)
initsema(&s->k);
- mach_semrelease(s->k);
+ runtime·mach_semrelease(s->k);
}
}
// Event notifications.
void
-noteclear(Note *n)
+runtime·noteclear(Note *n)
{
n->wakeup = 0;
}
void
-notesleep(Note *n)
+runtime·notesleep(Note *n)
{
while(!n->wakeup)
- usemacquire(&n->sema);
+ runtime·usemacquire(&n->sema);
}
void
-notewakeup(Note *n)
+runtime·notewakeup(Note *n)
{
n->wakeup = 1;
- usemrelease(&n->sema);
+ runtime·usemrelease(&n->sema);
}
// BSD interface for threading.
void
-osinit(void)
+runtime·osinit(void)
{
// Register our thread-creation callback (see {amd64,386}/sys.s)
// but only if we're not using cgo. If we are using cgo we need
// to let the C pthread libary install its own thread-creation callback.
extern void (*libcgo_thread_start)(void*);
if(libcgo_thread_start == nil)
- bsdthread_register();
+ runtime·bsdthread_register();
}
void
-newosproc(M *m, G *g, void *stk, void (*fn)(void))
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
{
m->tls[0] = m->id; // so 386 asm can find it
if(0){
- printf("newosproc stk=%p m=%p g=%p fn=%p id=%d/%d ostk=%p\n",
+ runtime·printf("newosproc stk=%p m=%p g=%p fn=%p id=%d/%d ostk=%p\n",
stk, m, g, fn, m->id, m->tls[0], &m);
}
- if(bsdthread_create(stk, m, g, fn) < 0)
- throw("cannot create new OS thread");
+ if(runtime·bsdthread_create(stk, m, g, fn) < 0)
+ runtime·throw("cannot create new OS thread");
}
// Called to initialize a new m (including the bootstrap m).
void
-minit(void)
+runtime·minit(void)
{
// Initialize signal handling.
- m->gsignal = malg(32*1024); // OS X wants >=8K, Linux >=2K
- signalstack(m->gsignal->stackguard, 32*1024);
+ m->gsignal = runtime·malg(32*1024); // OS X wants >=8K, Linux >=2K
+ runtime·signalstack(m->gsignal->stackguard, 32*1024);
}
// Mach IPC, to get at semaphores
static void
macherror(int32 r, int8 *fn)
{
- printf("mach error %s: %d\n", fn, r);
- throw("mach error");
+ runtime·printf("mach error %s: %d\n", fn, r);
+ runtime·throw("mach error");
}
enum
uint32 notify)
{
// TODO: Loop on interrupt.
- return mach_msg_trap(h, op, send_size, rcv_size, rcv_name, timeout, notify);
+ return runtime·mach_msg_trap(h, op, send_size, rcv_size, rcv_name, timeout, notify);
}
// Mach RPC (MIG)
CodeMsg *c;
if((port = m->machport) == 0){
- port = mach_reply_port();
+ port = runtime·mach_reply_port();
m->machport = port;
}
if(DebugMach){
p = (uint32*)h;
- prints("send:\t");
+ runtime·prints("send:\t");
for(i=0; i<h->msgh_size/sizeof(p[0]); i++){
- prints(" ");
- ·printpointer((void*)p[i]);
+ runtime·prints(" ");
+ runtime·printpointer((void*)p[i]);
if(i%8 == 7)
- prints("\n\t");
+ runtime·prints("\n\t");
}
if(i%8)
- prints("\n");
+ runtime·prints("\n");
}
ret = mach_msg(h, MACH_SEND_MSG|MACH_RCV_MSG,
h->msgh_size, maxsize, port, 0, 0);
if(ret != 0){
if(DebugMach){
- prints("mach_msg error ");
- ·printint(ret);
- prints("\n");
+ runtime·prints("mach_msg error ");
+ runtime·printint(ret);
+ runtime·prints("\n");
}
return ret;
}
if(DebugMach){
p = (uint32*)h;
- prints("recv:\t");
+ runtime·prints("recv:\t");
for(i=0; i<h->msgh_size/sizeof(p[0]); i++){
- prints(" ");
- ·printpointer((void*)p[i]);
+ runtime·prints(" ");
+ runtime·printpointer((void*)p[i]);
if(i%8 == 7)
- prints("\n\t");
+ runtime·prints("\n\t");
}
if(i%8)
- prints("\n");
+ runtime·prints("\n");
}
if(h->msgh_id != id+Reply){
if(DebugMach){
- prints("mach_msg reply id mismatch ");
- ·printint(h->msgh_id);
- prints(" != ");
- ·printint(id+Reply);
- prints("\n");
+ runtime·prints("mach_msg reply id mismatch ");
+ runtime·printint(h->msgh_id);
+ runtime·prints(" != ");
+ runtime·printint(id+Reply);
+ runtime·prints("\n");
}
return -303; // MIG_REPLY_MISMATCH
}
if(h->msgh_size == sizeof(CodeMsg)
&& !(h->msgh_bits & MACH_MSGH_BITS_COMPLEX)){
if(DebugMach){
- prints("mig result ");
- ·printint(c->code);
- prints("\n");
+ runtime·prints("mig result ");
+ runtime·printint(c->code);
+ runtime·prints("\n");
}
return c->code;
}
if(h->msgh_size != rxsize){
if(DebugMach){
- prints("mach_msg reply size mismatch ");
- ·printint(h->msgh_size);
- prints(" != ");
- ·printint(rxsize);
- prints("\n");
+ runtime·prints("mach_msg reply size mismatch ");
+ runtime·printint(h->msgh_size);
+ runtime·prints(" != ");
+ runtime·printint(rxsize);
+ runtime·prints("\n");
}
return -307; // MIG_ARRAY_TOO_LARGE
}
#pragma pack off
uint32
-mach_semcreate(void)
+runtime·mach_semcreate(void)
{
union {
Tmach_semcreateMsg tx;
m.tx.h.msgh_bits = 0;
m.tx.h.msgh_size = sizeof(m.tx);
- m.tx.h.msgh_remote_port = mach_task_self();
+ m.tx.h.msgh_remote_port = runtime·mach_task_self();
m.tx.h.msgh_id = Tmach_semcreate;
m.tx.ndr = zerondr;
}
void
-mach_semdestroy(uint32 sem)
+runtime·mach_semdestroy(uint32 sem)
{
union {
Tmach_semdestroyMsg tx;
m.tx.h.msgh_bits = MACH_MSGH_BITS_COMPLEX;
m.tx.h.msgh_size = sizeof(m.tx);
- m.tx.h.msgh_remote_port = mach_task_self();
+ m.tx.h.msgh_remote_port = runtime·mach_task_self();
m.tx.h.msgh_id = Tmach_semdestroy;
m.tx.body.msgh_descriptor_count = 1;
m.tx.semaphore.name = sem;
}
// The other calls have simple system call traps in sys.s
-int32 mach_semaphore_wait(uint32 sema);
-int32 mach_semaphore_timedwait(uint32 sema, uint32 sec, uint32 nsec);
-int32 mach_semaphore_signal(uint32 sema);
-int32 mach_semaphore_signal_all(uint32 sema);
+int32 runtime·mach_semaphore_wait(uint32 sema);
+int32 runtime·mach_semaphore_timedwait(uint32 sema, uint32 sec, uint32 nsec);
+int32 runtime·mach_semaphore_signal(uint32 sema);
+int32 runtime·mach_semaphore_signal_all(uint32 sema);
void
-mach_semacquire(uint32 sem)
+runtime·mach_semacquire(uint32 sem)
{
int32 r;
- while((r = mach_semaphore_wait(sem)) != 0) {
+ while((r = runtime·mach_semaphore_wait(sem)) != 0) {
if(r == KERN_ABORTED) // interrupted
continue;
macherror(r, "semaphore_wait");
}
void
-mach_semrelease(uint32 sem)
+runtime·mach_semrelease(uint32 sem)
{
int32 r;
- while((r = mach_semaphore_signal(sem)) != 0) {
+ while((r = runtime·mach_semaphore_signal(sem)) != 0) {
if(r == KERN_ABORTED) // interrupted
continue;
macherror(r, "semaphore_signal");
}
void
-sigpanic(void)
+runtime·sigpanic(void)
{
switch(g->sig) {
case SIGBUS:
if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000)
- panicstring("invalid memory address or nil pointer dereference");
- printf("unexpected fault address %p\n", g->sigcode1);
- throw("fault");
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
case SIGSEGV:
if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000)
- panicstring("invalid memory address or nil pointer dereference");
- printf("unexpected fault address %p\n", g->sigcode1);
- throw("fault");
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
case SIGFPE:
switch(g->sigcode0) {
case FPE_INTDIV:
- panicstring("integer divide by zero");
+ runtime·panicstring("integer divide by zero");
case FPE_INTOVF:
- panicstring("integer overflow");
+ runtime·panicstring("integer overflow");
}
- panicstring("floating point error");
+ runtime·panicstring("floating point error");
}
- panicstring(sigtab[g->sig].name);
+ runtime·panicstring(runtime·sigtab[g->sig].name);
}
static uint64 uvneginf = 0xFFF0000000000000ULL;
uint32
-float32tobits(float32 f)
+runtime·float32tobits(float32 f)
{
// The obvious cast-and-pointer code is technically
// not valid, and gcc miscompiles it. Use a union instead.
}
uint64
-float64tobits(float64 f)
+runtime·float64tobits(float64 f)
{
// The obvious cast-and-pointer code is technically
// not valid, and gcc miscompiles it. Use a union instead.
}
float64
-float64frombits(uint64 i)
+runtime·float64frombits(uint64 i)
{
// The obvious cast-and-pointer code is technically
// not valid, and gcc miscompiles it. Use a union instead.
}
float32
-float32frombits(uint32 i)
+runtime·float32frombits(uint32 i)
{
// The obvious cast-and-pointer code is technically
// not valid, and gcc miscompiles it. Use a union instead.
}
bool
-isInf(float64 f, int32 sign)
+runtime·isInf(float64 f, int32 sign)
{
uint64 x;
- x = float64tobits(f);
+ x = runtime·float64tobits(f);
if(sign == 0)
return x == uvinf || x == uvneginf;
if(sign > 0)
}
float64
-NaN(void)
+runtime·NaN(void)
{
- return float64frombits(uvnan);
+ return runtime·float64frombits(uvnan);
}
bool
-isNaN(float64 f)
+runtime·isNaN(float64 f)
{
uint64 x;
- x = float64tobits(f);
- return ((uint32)(x>>52) & 0x7FF) == 0x7FF && !isInf(f, 0);
+ x = runtime·float64tobits(f);
+ return ((uint32)(x>>52) & 0x7FF) == 0x7FF && !runtime·isInf(f, 0);
}
float64
-Inf(int32 sign)
+runtime·Inf(int32 sign)
{
if(sign >= 0)
- return float64frombits(uvinf);
+ return runtime·float64frombits(uvinf);
else
- return float64frombits(uvneginf);
+ return runtime·float64frombits(uvneginf);
}
enum
};
float64
-frexp(float64 d, int32 *ep)
+runtime·frexp(float64 d, int32 *ep)
{
uint64 x;
*ep = 0;
return 0;
}
- x = float64tobits(d);
+ x = runtime·float64tobits(d);
*ep = (int32)((x >> SHIFT) & MASK) - BIAS;
x &= ~((uint64)MASK << SHIFT);
x |= (uint64)BIAS << SHIFT;
- return float64frombits(x);
+ return runtime·float64frombits(x);
}
float64
-ldexp(float64 d, int32 e)
+runtime·ldexp(float64 d, int32 e)
{
uint64 x;
if(d == 0)
return 0;
- x = float64tobits(d);
+ x = runtime·float64tobits(d);
e += (int32)(x >> SHIFT) & MASK;
if(e <= 0)
return 0; /* underflow */
if(e >= MASK){ /* overflow */
if(d < 0)
- return Inf(-1);
- return Inf(1);
+ return runtime·Inf(-1);
+ return runtime·Inf(1);
}
x &= ~((uint64)MASK << SHIFT);
x |= (uint64)e << SHIFT;
- return float64frombits(x);
+ return runtime·float64frombits(x);
}
float64
-modf(float64 d, float64 *ip)
+runtime·modf(float64 d, float64 *ip)
{
float64 dd;
uint64 x;
if(d < 1) {
if(d < 0) {
- d = modf(-d, ip);
+ d = runtime·modf(-d, ip);
*ip = -*ip;
return -d;
}
return d;
}
- x = float64tobits(d);
+ x = runtime·float64tobits(d);
e = (int32)((x >> SHIFT) & MASK) - BIAS;
/*
*/
if(e <= 64-11)
x &= ~(((uint64)1 << (64LL-11LL-e))-1);
- dd = float64frombits(x);
+ dd = runtime·float64frombits(x);
*ip = dd;
return d - dd;
}
// Darwin and Linux use the same linkage to main
-TEXT _rt0_386_freebsd(SB),7,$0
+TEXT _rt0_386_freebsd(SB),7,$0
JMP _rt0_386(SB)
#include "signals.h"
#include "os.h"
-extern void sigtramp(void);
+extern void runtime·sigtramp(void);
typedef struct sigaction {
union {
} Sigaction;
void
-dumpregs(Mcontext *r)
+runtime·dumpregs(Mcontext *r)
{
- printf("eax %x\n", r->mc_eax);
- printf("ebx %x\n", r->mc_ebx);
- printf("ecx %x\n", r->mc_ecx);
- printf("edx %x\n", r->mc_edx);
- printf("edi %x\n", r->mc_edi);
- printf("esi %x\n", r->mc_esi);
- printf("ebp %x\n", r->mc_ebp);
- printf("esp %x\n", r->mc_esp);
- printf("eip %x\n", r->mc_eip);
- printf("eflags %x\n", r->mc_eflags);
- printf("cs %x\n", r->mc_cs);
- printf("fs %x\n", r->mc_fs);
- printf("gs %x\n", r->mc_gs);
+ runtime·printf("eax %x\n", r->mc_eax);
+ runtime·printf("ebx %x\n", r->mc_ebx);
+ runtime·printf("ecx %x\n", r->mc_ecx);
+ runtime·printf("edx %x\n", r->mc_edx);
+ runtime·printf("edi %x\n", r->mc_edi);
+ runtime·printf("esi %x\n", r->mc_esi);
+ runtime·printf("ebp %x\n", r->mc_ebp);
+ runtime·printf("esp %x\n", r->mc_esp);
+ runtime·printf("eip %x\n", r->mc_eip);
+ runtime·printf("eflags %x\n", r->mc_eflags);
+ runtime·printf("cs %x\n", r->mc_cs);
+ runtime·printf("fs %x\n", r->mc_fs);
+ runtime·printf("gs %x\n", r->mc_gs);
}
String
-signame(int32 sig)
+runtime·signame(int32 sig)
{
if(sig < 0 || sig >= NSIG)
- return emptystring;
- return gostringnocopy((byte*)sigtab[sig].name);
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
}
void
-sighandler(int32 sig, Siginfo* info, void* context)
+runtime·sighandler(int32 sig, Siginfo* info, void* context)
{
Ucontext *uc;
Mcontext *r;
uc = context;
r = &uc->uc_mcontext;
- if((gp = m->curg) != nil && (sigtab[sig].flags & SigPanic)) {
+ if((gp = m->curg) != nil && (runtime·sigtab[sig].flags & SigPanic)) {
// Make it look like a call to the signal func.
// Have to pass arguments out of band since
// augmenting the stack frame would break
gp->sigcode0 = info->si_code;
gp->sigcode1 = (uintptr)info->si_addr;
- // Only push sigpanic if r->mc_eip != 0.
+ // Only push runtime·sigpanic if r->mc_eip != 0.
// If r->mc_eip == 0, probably panicked because of a
// call to a nil func. Not pushing that onto sp will
- // make the trace look like a call to sigpanic instead.
- // (Otherwise the trace will end at sigpanic and we
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
// won't get to see who faulted.)
if(r->mc_eip != 0) {
sp = (uintptr*)r->mc_esp;
*--sp = r->mc_eip;
r->mc_esp = (uintptr)sp;
}
- r->mc_eip = (uintptr)sigpanic;
+ r->mc_eip = (uintptr)runtime·sigpanic;
return;
}
- if(sigtab[sig].flags & SigQueue) {
- if(sigsend(sig) || (sigtab[sig].flags & SigIgnore))
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
return;
- exit(2); // SIGINT, SIGTERM, etc
+ runtime·exit(2); // SIGINT, SIGTERM, etc
}
- if(panicking) // traceback already printed
- exit(2);
- panicking = 1;
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
if(sig < 0 || sig >= NSIG)
- printf("Signal %d\n", sig);
+ runtime·printf("Signal %d\n", sig);
else
- printf("%s\n", sigtab[sig].name);
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
- printf("PC=%X\n", r->mc_eip);
- printf("\n");
+ runtime·printf("PC=%X\n", r->mc_eip);
+ runtime·printf("\n");
- if(gotraceback()){
- traceback((void*)r->mc_eip, (void*)r->mc_esp, 0, m->curg);
- tracebackothers(m->curg);
- dumpregs(r);
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->mc_eip, (void*)r->mc_esp, 0, m->curg);
+ runtime·tracebackothers(m->curg);
+ runtime·dumpregs(r);
}
- breakpoint();
- exit(2);
+ runtime·breakpoint();
+ runtime·exit(2);
}
void
-sigignore(void)
+runtime·sigignore(void)
{
}
void
-signalstack(byte *p, int32 n)
+runtime·signalstack(byte *p, int32 n)
{
Sigaltstack st;
st.ss_sp = (int8*)p;
st.ss_size = n;
st.ss_flags = 0;
- sigaltstack(&st, nil);
+ runtime·sigaltstack(&st, nil);
}
void
-initsig(int32 queue)
+runtime·initsig(int32 queue)
{
static Sigaction sa;
- siginit();
+ runtime·siginit();
int32 i;
sa.sa_flags |= SA_ONSTACK | SA_SIGINFO;
sa.sa_mask = ~0x0ull;
for(i = 0; i < NSIG; i++) {
- if(sigtab[i].flags) {
- if((sigtab[i].flags & SigQueue) != queue)
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
continue;
- if(sigtab[i].flags & (SigCatch | SigQueue))
- sa.__sigaction_u.__sa_sigaction = (void*) sigtramp;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ sa.__sigaction_u.__sa_sigaction = (void*) runtime·sigtramp;
else
- sa.__sigaction_u.__sa_sigaction = (void*) sigignore;
+ sa.__sigaction_u.__sa_sigaction = (void*) runtime·sigignore;
- if(sigtab[i].flags & SigRestart)
+ if(runtime·sigtab[i].flags & SigRestart)
sa.sa_flags |= SA_RESTART;
else
sa.sa_flags &= ~SA_RESTART;
- sigaction(i, &sa, nil);
+ runtime·sigaction(i, &sa, nil);
}
}
}
#include "386/asm.h"
-TEXT sys_umtx_op(SB),7,$-4
+TEXT runtime·sys_umtx_op(SB),7,$-4
MOVL $454, AX
INT $0x80
RET
-TEXT thr_new(SB),7,$-4
+TEXT runtime·thr_new(SB),7,$-4
MOVL $455, AX
INT $0x80
RET
-TEXT thr_start(SB),7,$0
+TEXT runtime·thr_start(SB),7,$0
MOVL mm+0(FP), AX
MOVL m_g0(AX), BX
LEAL m_tls(AX), BP
PUSHL $32
PUSHL BP
PUSHL DI
- CALL setldt(SB)
+ CALL runtime·setldt(SB)
POPL AX
POPL AX
POPL AX
MOVL BX, g(CX)
MOVL AX, m(CX)
- CALL stackcheck(SB) // smashes AX
- CALL mstart(SB)
+ CALL runtime·stackcheck(SB) // smashes AX
+ CALL runtime·mstart(SB)
MOVL 0, AX // crash (not reached)
// Exit the entire program (like C exit)
-TEXT exit(SB),7,$-4
+TEXT runtime·exit(SB),7,$-4
MOVL $1, AX
INT $0x80
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT exit1(SB),7,$-4
+TEXT runtime·exit1(SB),7,$-4
MOVL $431, AX
INT $0x80
JAE 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT write(SB),7,$-4
+TEXT runtime·write(SB),7,$-4
MOVL $4, AX
INT $0x80
JAE 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT notok(SB),7,$0
+TEXT runtime·notok(SB),7,$0
MOVL $0xf1, 0xf1
RET
-TEXT ·mmap(SB),7,$32
+TEXT runtime·mmap(SB),7,$32
LEAL arg0+0(FP), SI
LEAL 4(SP), DI
CLD
INT $0x80
RET
-TEXT ·munmap(SB),7,$-4
+TEXT runtime·munmap(SB),7,$-4
MOVL $73, AX
INT $0x80
JAE 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT gettime(SB), 7, $32
+TEXT runtime·gettime(SB), 7, $32
MOVL $116, AX
LEAL 12(SP), BX
MOVL BX, 4(SP)
MOVL BX, (DI)
RET
-TEXT sigaction(SB),7,$-4
+TEXT runtime·sigaction(SB),7,$-4
MOVL $416, AX
INT $0x80
JAE 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT sigtramp(SB),7,$40
+TEXT runtime·sigtramp(SB),7,$40
// g = m->gsignal
get_tls(DX)
MOVL m(DX), BP
MOVL AX, 0(SP)
MOVL BX, 4(SP)
MOVL CX, 8(SP)
- CALL sighandler(SB)
+ CALL runtime·sighandler(SB)
// g = m->curg
get_tls(DX)
MOVL AX, 4(SP)
MOVL $417, AX // sigreturn(ucontext)
INT $0x80
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT sigaltstack(SB),7,$0
+TEXT runtime·sigaltstack(SB),7,$0
MOVL $53, AX
INT $0x80
JAE 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
/*
*/
// setldt(int entry, int address, int limit)
-TEXT setldt(SB),7,$32
+TEXT runtime·setldt(SB),7,$32
MOVL address+4(FP), BX // aka base
// see comment in linux/386/sys.s; freebsd is similar
ADDL $0x8, BX
MOVL $0xffffffff, 0(SP) // auto-allocate entry and return in AX
MOVL AX, 4(SP)
MOVL $1, 8(SP)
- CALL i386_set_ldt(SB)
+ CALL runtime·i386_set_ldt(SB)
// compute segment selector - (entry*8+7)
SHLL $3, AX
MOVW AX, GS
RET
-TEXT i386_set_ldt(SB),7,$16
+TEXT runtime·i386_set_ldt(SB),7,$16
LEAL args+0(FP), AX // 0(FP) == 4(SP) before SP got moved
MOVL $0, 0(SP) // syscall gap
MOVL $1, 4(SP)
INT $3
RET
-GLOBL tlsoffset(SB),$4
+GLOBL runtime·tlsoffset(SB),$4
// Darwin and Linux use the same linkage to main
-TEXT _rt0_amd64_freebsd(SB),7,$-8
+TEXT _rt0_amd64_freebsd(SB),7,$-8
MOVQ $_rt0_amd64(SB), DX
JMP DX
#include "signals.h"
#include "os.h"
-extern void sigtramp(void);
+extern void runtime·sigtramp(void);
typedef struct sigaction {
union {
} Sigaction;
void
-dumpregs(Mcontext *r)
+runtime·dumpregs(Mcontext *r)
{
- printf("rax %X\n", r->mc_rax);
- printf("rbx %X\n", r->mc_rbx);
- printf("rcx %X\n", r->mc_rcx);
- printf("rdx %X\n", r->mc_rdx);
- printf("rdi %X\n", r->mc_rdi);
- printf("rsi %X\n", r->mc_rsi);
- printf("rbp %X\n", r->mc_rbp);
- printf("rsp %X\n", r->mc_rsp);
- printf("r8 %X\n", r->mc_r8 );
- printf("r9 %X\n", r->mc_r9 );
- printf("r10 %X\n", r->mc_r10);
- printf("r11 %X\n", r->mc_r11);
- printf("r12 %X\n", r->mc_r12);
- printf("r13 %X\n", r->mc_r13);
- printf("r14 %X\n", r->mc_r14);
- printf("r15 %X\n", r->mc_r15);
- printf("rip %X\n", r->mc_rip);
- printf("rflags %X\n", r->mc_flags);
- printf("cs %X\n", r->mc_cs);
- printf("fs %X\n", r->mc_fs);
- printf("gs %X\n", r->mc_gs);
+ runtime·printf("rax %X\n", r->mc_rax);
+ runtime·printf("rbx %X\n", r->mc_rbx);
+ runtime·printf("rcx %X\n", r->mc_rcx);
+ runtime·printf("rdx %X\n", r->mc_rdx);
+ runtime·printf("rdi %X\n", r->mc_rdi);
+ runtime·printf("rsi %X\n", r->mc_rsi);
+ runtime·printf("rbp %X\n", r->mc_rbp);
+ runtime·printf("rsp %X\n", r->mc_rsp);
+ runtime·printf("r8 %X\n", r->mc_r8 );
+ runtime·printf("r9 %X\n", r->mc_r9 );
+ runtime·printf("r10 %X\n", r->mc_r10);
+ runtime·printf("r11 %X\n", r->mc_r11);
+ runtime·printf("r12 %X\n", r->mc_r12);
+ runtime·printf("r13 %X\n", r->mc_r13);
+ runtime·printf("r14 %X\n", r->mc_r14);
+ runtime·printf("r15 %X\n", r->mc_r15);
+ runtime·printf("rip %X\n", r->mc_rip);
+ runtime·printf("rflags %X\n", r->mc_flags);
+ runtime·printf("cs %X\n", r->mc_cs);
+ runtime·printf("fs %X\n", r->mc_fs);
+ runtime·printf("gs %X\n", r->mc_gs);
}
String
-signame(int32 sig)
+runtime·signame(int32 sig)
{
if(sig < 0 || sig >= NSIG)
- return emptystring;
- return gostringnocopy((byte*)sigtab[sig].name);
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
}
void
-sighandler(int32 sig, Siginfo* info, void* context)
+runtime·sighandler(int32 sig, Siginfo* info, void* context)
{
Ucontext *uc;
Mcontext *r;
uc = context;
r = &uc->uc_mcontext;
- if((gp = m->curg) != nil && (sigtab[sig].flags & SigPanic)) {
+ if((gp = m->curg) != nil && (runtime·sigtab[sig].flags & SigPanic)) {
// Make it look like a call to the signal func.
// Have to pass arguments out of band since
// augmenting the stack frame would break
gp->sigcode0 = info->si_code;
gp->sigcode1 = (uintptr)info->si_addr;
- // Only push sigpanic if r->mc_rip != 0.
+ // Only push runtime·sigpanic if r->mc_rip != 0.
// If r->mc_rip == 0, probably panicked because of a
// call to a nil func. Not pushing that onto sp will
- // make the trace look like a call to sigpanic instead.
- // (Otherwise the trace will end at sigpanic and we
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
// won't get to see who faulted.)
if(r->mc_rip != 0) {
sp = (uintptr*)r->mc_rsp;
*--sp = r->mc_rip;
r->mc_rsp = (uintptr)sp;
}
- r->mc_rip = (uintptr)sigpanic;
+ r->mc_rip = (uintptr)runtime·sigpanic;
return;
}
- if(sigtab[sig].flags & SigQueue) {
- if(sigsend(sig) || (sigtab[sig].flags & SigIgnore))
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
return;
- exit(2); // SIGINT, SIGTERM, etc
+ runtime·exit(2); // SIGINT, SIGTERM, etc
}
- if(panicking) // traceback already printed
- exit(2);
- panicking = 1;
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
if(sig < 0 || sig >= NSIG)
- printf("Signal %d\n", sig);
+ runtime·printf("Signal %d\n", sig);
else
- printf("%s\n", sigtab[sig].name);
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
- printf("PC=%X\n", r->mc_rip);
- printf("\n");
+ runtime·printf("PC=%X\n", r->mc_rip);
+ runtime·printf("\n");
- if(gotraceback()){
- traceback((void*)r->mc_rip, (void*)r->mc_rsp, 0, g);
- tracebackothers(g);
- dumpregs(r);
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->mc_rip, (void*)r->mc_rsp, 0, g);
+ runtime·tracebackothers(g);
+ runtime·dumpregs(r);
}
- breakpoint();
- exit(2);
+ runtime·breakpoint();
+ runtime·exit(2);
}
void
-sigignore(void)
+runtime·sigignore(void)
{
}
void
-signalstack(byte *p, int32 n)
+runtime·signalstack(byte *p, int32 n)
{
Sigaltstack st;
st.ss_sp = (int8*)p;
st.ss_size = n;
st.ss_flags = 0;
- sigaltstack(&st, nil);
+ runtime·sigaltstack(&st, nil);
}
void
-initsig(int32 queue)
+runtime·initsig(int32 queue)
{
static Sigaction sa;
- siginit();
+ runtime·siginit();
int32 i;
sa.sa_flags |= SA_ONSTACK | SA_SIGINFO;
sa.sa_mask = ~0x0ull;
for(i = 0; i < NSIG; i++) {
- if(sigtab[i].flags) {
- if((sigtab[i].flags & SigQueue) != queue)
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
continue;
- if(sigtab[i].flags & (SigCatch | SigQueue))
- sa.__sigaction_u.__sa_sigaction = (void*) sigtramp;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ sa.__sigaction_u.__sa_sigaction = (void*) runtime·sigtramp;
else
- sa.__sigaction_u.__sa_sigaction = (void*) sigignore;
+ sa.__sigaction_u.__sa_sigaction = (void*) runtime·sigignore;
- if(sigtab[i].flags & SigRestart)
+ if(runtime·sigtab[i].flags & SigRestart)
sa.sa_flags |= SA_RESTART;
else
sa.sa_flags &= ~SA_RESTART;
- sigaction(i, &sa, nil);
+ runtime·sigaction(i, &sa, nil);
}
}
}
#include "amd64/asm.h"
-TEXT sys_umtx_op(SB),7,$0
+TEXT runtime·sys_umtx_op(SB),7,$0
MOVQ 8(SP), DI
MOVL 16(SP), SI
MOVL 20(SP), DX
SYSCALL
RET
-TEXT thr_new(SB),7,$0
+TEXT runtime·thr_new(SB),7,$0
MOVQ 8(SP), DI
MOVQ 16(SP), SI
MOVL $455, AX
SYSCALL
RET
-TEXT thr_start(SB),7,$0
+TEXT runtime·thr_start(SB),7,$0
MOVQ DI, R13 // m
// set up FS to point at m->tls
LEAQ m_tls(R13), DI
- CALL settls(SB) // smashes DI
+ CALL runtime·settls(SB) // smashes DI
// set up m, g
get_tls(CX)
MOVQ m_g0(R13), DI
MOVQ DI, g(CX)
- CALL stackcheck(SB)
- CALL mstart(SB)
+ CALL runtime·stackcheck(SB)
+ CALL runtime·mstart(SB)
MOVQ 0, AX // crash (not reached)
// Exit the entire program (like C exit)
-TEXT exit(SB),7,$-8
+TEXT runtime·exit(SB),7,$-8
MOVL 8(SP), DI // arg 1 exit status
MOVL $1, AX
SYSCALL
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT exit1(SB),7,$-8
+TEXT runtime·exit1(SB),7,$-8
MOVQ 8(SP), DI // arg 1 exit status
MOVL $431, AX
SYSCALL
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT write(SB),7,$-8
+TEXT runtime·write(SB),7,$-8
MOVL 8(SP), DI // arg 1 fd
MOVQ 16(SP), SI // arg 2 buf
MOVL 24(SP), DX // arg 3 count
MOVL $4, AX
SYSCALL
JCC 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT gettime(SB), 7, $32
+TEXT runtime·gettime(SB), 7, $32
MOVL $116, AX
LEAQ 8(SP), DI
MOVQ $0, SI
MOVL BX, (DI)
RET
-TEXT sigaction(SB),7,$-8
+TEXT runtime·sigaction(SB),7,$-8
MOVL 8(SP), DI // arg 1 sig
MOVQ 16(SP), SI // arg 2 act
MOVQ 24(SP), DX // arg 3 oact
MOVL $416, AX
SYSCALL
JCC 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT sigtramp(SB),7,$24-16
+TEXT runtime·sigtramp(SB),7,$24-16
get_tls(CX)
MOVQ m(CX), AX
MOVQ m_gsignal(AX), AX
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ DX, 16(SP)
- CALL sighandler(SB)
+ CALL runtime·sighandler(SB)
RET
-TEXT ·mmap(SB),7,$0
+TEXT runtime·mmap(SB),7,$0
MOVQ 8(SP), DI // arg 1 addr
MOVQ 16(SP), SI // arg 2 len
MOVL 24(SP), DX // arg 3 prot
SYSCALL
RET
-TEXT ·munmap(SB),7,$0
+TEXT runtime·munmap(SB),7,$0
MOVQ 8(SP), DI // arg 1 addr
MOVQ 16(SP), SI // arg 2 len
MOVL $73, AX
SYSCALL
JCC 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT notok(SB),7,$-8
+TEXT runtime·notok(SB),7,$-8
MOVL $0xf1, BP
MOVQ BP, (BP)
RET
-TEXT sigaltstack(SB),7,$-8
+TEXT runtime·sigaltstack(SB),7,$-8
MOVQ new+8(SP), DI
MOVQ old+16(SP), SI
MOVQ $53, AX
SYSCALL
JCC 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// set tls base to DI
-TEXT settls(SB),7,$8
+TEXT runtime·settls(SB),7,$8
ADDQ $16, DI // adjust for ELF: wants to use -16(FS) and -8(FS) for g and m
MOVQ DI, 0(SP)
MOVQ SP, SI
MOVQ $165, AX // sysarch
SYSCALL
JCC 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
#include "malloc.h"
void*
-SysAlloc(uintptr n)
+runtime·SysAlloc(uintptr n)
{
void *v;
mstats.sys += n;
- v = runtime_mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
if(v < (void*)4096) {
- printf("mmap: errno=%p\n", v);
- throw("mmap");
+ runtime·printf("mmap: errno=%p\n", v);
+ runtime·throw("mmap");
}
return v;
}
void
-SysUnused(void *v, uintptr n)
+runtime·SysUnused(void *v, uintptr n)
{
USED(v);
USED(n);
}
void
-SysFree(void *v, uintptr n)
+runtime·SysFree(void *v, uintptr n)
{
mstats.sys -= n;
- runtime_munmap(v, n);
+ runtime·munmap(v, n);
}
void
-SysMemInit(void)
+runtime·SysMemInit(void)
{
// Code generators assume that references to addresses
// on the first page will fault. Map the page explicitly with
// allocating that page as the virtual address space fills.
// Ignore any error, since other systems might be smart
// enough to never allow anything there.
- runtime_mmap(nil, 4096, PROT_NONE, MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0);
+ runtime·mmap(nil, 4096, PROT_NONE, MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0);
}
-int32 thr_new(ThrParam*, int32);
-void sigpanic(void);
-void sigaltstack(Sigaltstack*, Sigaltstack*);
+int32 runtime·thr_new(ThrParam*, int32);
+void runtime·sigpanic(void);
+void runtime·sigaltstack(Sigaltstack*, Sigaltstack*);
struct sigaction;
-void sigaction(int32, struct sigaction*, struct sigaction*);
+void runtime·sigaction(int32, struct sigaction*, struct sigaction*);
#define Q SigQueue
#define P SigPanic
-SigTab sigtab[] = {
+SigTab runtime·sigtab[] = {
/* 0 */ 0, "SIGNONE: no trap",
/* 1 */ Q+R, "SIGHUP: terminal line hangup",
/* 2 */ Q+R, "SIGINT: interrupt",
#include "defs.h"
#include "os.h"
-extern SigTab sigtab[];
-extern int32 sys_umtx_op(uint32*, int32, uint32, void*, void*);
+extern SigTab runtime·sigtab[];
+extern int32 runtime·sys_umtx_op(uint32*, int32, uint32, void*, void*);
// FreeBSD's umtx_op syscall is effectively the same as Linux's futex, and
// thus the code is largely similar. See linux/thread.c for comments.
{
int32 ret;
- ret = sys_umtx_op(addr, UMTX_OP_WAIT, val, nil, nil);
+ ret = runtime·sys_umtx_op(addr, UMTX_OP_WAIT, val, nil, nil);
if(ret >= 0 || ret == -EINTR)
return;
- printf("umtx_wait addr=%p val=%d ret=%d\n", addr, val, ret);
+ runtime·printf("umtx_wait addr=%p val=%d ret=%d\n", addr, val, ret);
*(int32*)0x1005 = 0x1005;
}
{
int32 ret;
- ret = sys_umtx_op(addr, UMTX_OP_WAKE, 1, nil, nil);
+ ret = runtime·sys_umtx_op(addr, UMTX_OP_WAKE, 1, nil, nil);
if(ret >= 0)
return;
- printf("umtx_wake addr=%p ret=%d\n", addr, ret);
+ runtime·printf("umtx_wake addr=%p ret=%d\n", addr, ret);
*(int32*)0x1006 = 0x1006;
}
again:
v = l->key;
if((v&1) == 0){
- if(cas(&l->key, v, v|1))
+ if(runtime·cas(&l->key, v, v|1))
return;
goto again;
}
- if(!cas(&l->key, v, v+2))
+ if(!runtime·cas(&l->key, v, v+2))
goto again;
umtx_wait(&l->key, v+2);
for(;;){
v = l->key;
if(v < 2)
- throw("bad lock key");
- if(cas(&l->key, v, v-2))
+ runtime·throw("bad lock key");
+ if(runtime·cas(&l->key, v, v-2))
break;
}
again:
v = l->key;
if((v&1) == 0)
- throw("unlock of unlocked lock");
- if(!cas(&l->key, v, v&~1))
+ runtime·throw("unlock of unlocked lock");
+ if(!runtime·cas(&l->key, v, v&~1))
goto again;
if(v&~1)
}
void
-lock(Lock *l)
+runtime·lock(Lock *l)
{
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
m->locks++;
umtx_lock(l);
}
void
-unlock(Lock *l)
+runtime·unlock(Lock *l)
{
m->locks--;
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
umtx_unlock(l);
}
void
-destroylock(Lock*)
+runtime·destroylock(Lock*)
{
}
// Event notifications.
void
-noteclear(Note *n)
+runtime·noteclear(Note *n)
{
n->lock.key = 0;
umtx_lock(&n->lock);
}
void
-notesleep(Note *n)
+runtime·notesleep(Note *n)
{
umtx_lock(&n->lock);
umtx_unlock(&n->lock);
}
void
-notewakeup(Note *n)
+runtime·notewakeup(Note *n)
{
umtx_unlock(&n->lock);
}
-void thr_start(void*);
+void runtime·thr_start(void*);
void
-newosproc(M *m, G *g, void *stk, void (*fn)(void))
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
{
ThrParam param;
USED(g); // thr_start assumes g == m->g0
if(0){
- printf("newosproc stk=%p m=%p g=%p fn=%p id=%d/%d ostk=%p\n",
+ runtime·printf("newosproc stk=%p m=%p g=%p fn=%p id=%d/%d ostk=%p\n",
stk, m, g, fn, m->id, m->tls[0], &m);
}
- runtime_memclr((byte*)¶m, sizeof param);
+ runtime·memclr((byte*)¶m, sizeof param);
- param.start_func = thr_start;
+ param.start_func = runtime·thr_start;
param.arg = m;
param.stack_base = (int8*)g->stackbase;
param.stack_size = (byte*)stk - (byte*)g->stackbase;
m->tls[0] = m->id; // so 386 asm can find it
- thr_new(¶m, sizeof param);
+ runtime·thr_new(¶m, sizeof param);
}
void
-osinit(void)
+runtime·osinit(void)
{
}
// Called to initialize a new m (including the bootstrap m).
void
-minit(void)
+runtime·minit(void)
{
// Initialize signal handling
- m->gsignal = malg(32*1024);
- signalstack(m->gsignal->stackguard, 32*1024);
+ m->gsignal = runtime·malg(32*1024);
+ runtime·signalstack(m->gsignal->stackguard, 32*1024);
}
void
-sigpanic(void)
+runtime·sigpanic(void)
{
switch(g->sig) {
case SIGBUS:
if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000)
- panicstring("invalid memory address or nil pointer dereference");
- printf("unexpected fault address %p\n", g->sigcode1);
- throw("fault");
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
case SIGSEGV:
if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000)
- panicstring("invalid memory address or nil pointer dereference");
- printf("unexpected fault address %p\n", g->sigcode1);
- throw("fault");
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
case SIGFPE:
switch(g->sigcode0) {
case FPE_INTDIV:
- panicstring("integer divide by zero");
+ runtime·panicstring("integer divide by zero");
case FPE_INTOVF:
- panicstring("integer overflow");
+ runtime·panicstring("integer overflow");
}
- panicstring("floating point error");
+ runtime·panicstring("floating point error");
}
- panicstring(sigtab[g->sig].name);
+ runtime·panicstring(runtime·sigtab[g->sig].name);
}
if(datasize < sizeof (void *))
datasize = sizeof (void *);
- datasize = rnd(datasize, sizeof (void *));
+ datasize = runtime·rnd(datasize, sizeof (void *));
init_sizes (hint, &init_power, &max_power);
h->datasize = datasize;
h->max_power = max_power;
free (old_st);
}
-int32
+static int32
hash_lookup (struct hash *h, void *data, void **pres)
{
int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]);
return (0);
}
-int32
+static int32
hash_remove (struct hash *h, void *data, void *arg)
{
int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]);
}
}
-int32
+static int32
hash_insert (struct hash *h, void *data, void **pres)
{
int32 rc = hash_insert_internal (&h->st, 0, (*h->data_hash) (h->keysize, data), h, data, pres);
return (rc);
}
-uint32
+static uint32
hash_count (struct hash *h)
{
return (h->count);
sub->e = e;
}
-void *
+static void *
hash_next (struct hash_iter *it)
{
int32 elemsize = it->elemsize;
}
}
-void
+static void
hash_iter_init (struct hash *h, struct hash_iter *it)
{
it->elemsize = h->datasize + offsetof (struct hash_entry, data[0]);
*used += lused;
}
-void
+static void
hash_destroy (struct hash *h)
{
int32 slots = 0;
}
}
-void
+static void
hash_visit (struct hash *h, void (*data_visit) (void *arg, int32 level, void *data), void *arg)
{
hash_visit_internal (h->st, 0, 0, data_visit, arg);
// makemap(key, val *Type, hint uint32) (hmap *map[any]any);
Hmap*
-makemap(Type *key, Type *val, int64 hint)
+runtime·makemap_c(Type *key, Type *val, int64 hint)
{
Hmap *h;
int32 keyalg, valalg, keysize, valsize, valsize_in_hash;
void (*data_del)(uint32, void*, void*);
if(hint < 0 || (int32)hint != hint)
- panicstring("makemap: size out of range");
+ runtime·panicstring("makemap: size out of range");
keyalg = key->alg;
valalg = val->alg;
keysize = key->size;
valsize = val->size;
- if(keyalg >= nelem(algarray) || algarray[keyalg].hash == nohash) {
- printf("map(keyalg=%d)\n", keyalg);
- throw("runtime.makemap: unsupported map key type");
+ if(keyalg >= nelem(runtime·algarray) || runtime·algarray[keyalg].hash == runtime·nohash) {
+ runtime·printf("map(keyalg=%d)\n", keyalg);
+ runtime·throw("runtime.makemap: unsupported map key type");
}
- if(valalg >= nelem(algarray)) {
- printf("map(valalg=%d)\n", valalg);
- throw("runtime.makemap: unsupported map value type");
+ if(valalg >= nelem(runtime·algarray)) {
+ runtime·printf("map(valalg=%d)\n", valalg);
+ runtime·throw("runtime.makemap: unsupported map value type");
}
- h = mal(sizeof(*h));
+ h = runtime·mal(sizeof(*h));
valsize_in_hash = valsize;
data_del = donothing;
// might remove in the future and just assume datavo == keysize.
h->datavo = keysize;
if(valsize_in_hash >= sizeof(void*))
- h->datavo = rnd(keysize, sizeof(void*));
+ h->datavo = runtime·rnd(keysize, sizeof(void*));
hash_init(h, h->datavo+valsize_in_hash,
- algarray[keyalg].hash,
- algarray[keyalg].equal,
+ runtime·algarray[keyalg].hash,
+ runtime·algarray[keyalg].equal,
data_del,
hint);
h->keysize = keysize;
h->valsize = valsize;
- h->keyalg = &algarray[keyalg];
- h->valalg = &algarray[valalg];
+ h->keyalg = &runtime·algarray[keyalg];
+ h->valalg = &runtime·algarray[valalg];
// these calculations are compiler dependent.
// figure out offsets of map call arguments.
// func() (key, val)
- h->ko0 = rnd(sizeof(h), Structrnd);
- h->vo0 = rnd(h->ko0+keysize, val->align);
+ h->ko0 = runtime·rnd(sizeof(h), Structrnd);
+ h->vo0 = runtime·rnd(h->ko0+keysize, val->align);
// func(key) (val[, pres])
- h->ko1 = rnd(sizeof(h), key->align);
- h->vo1 = rnd(h->ko1+keysize, Structrnd);
- h->po1 = rnd(h->vo1+valsize, 1);
+ h->ko1 = runtime·rnd(sizeof(h), key->align);
+ h->vo1 = runtime·rnd(h->ko1+keysize, Structrnd);
+ h->po1 = runtime·rnd(h->vo1+valsize, 1);
// func(key, val[, pres])
- h->ko2 = rnd(sizeof(h), key->align);
- h->vo2 = rnd(h->ko2+keysize, val->align);
- h->po2 = rnd(h->vo2+valsize, 1);
+ h->ko2 = runtime·rnd(sizeof(h), key->align);
+ h->vo2 = runtime·rnd(h->ko2+keysize, val->align);
+ h->po2 = runtime·rnd(h->vo2+valsize, 1);
if(debug) {
- printf("makemap: map=%p; keysize=%d; valsize=%d; keyalg=%d; valalg=%d; offsets=%d,%d; %d,%d,%d; %d,%d,%d\n",
+ runtime·printf("makemap: map=%p; keysize=%d; valsize=%d; keyalg=%d; valalg=%d; offsets=%d,%d; %d,%d,%d; %d,%d,%d\n",
h, keysize, valsize, keyalg, valalg, h->ko0, h->vo0, h->ko1, h->vo1, h->po1, h->ko2, h->vo2, h->po2);
}
// makemap(key, val *Type, hint int64) (hmap *map[any]any);
void
-·makemap(Type *key, Type *val, int64 hint, Hmap *ret)
+runtime·makemap(Type *key, Type *val, int64 hint, Hmap *ret)
{
- ret = makemap(key, val, hint);
+ ret = runtime·makemap_c(key, val, hint);
FLUSH(&ret);
}
void
-mapaccess(Hmap *h, byte *ak, byte *av, bool *pres)
+runtime·mapaccess(Hmap *h, byte *ak, byte *av, bool *pres)
{
byte *res;
- if(gcwaiting)
- gosched();
+ if(runtime·gcwaiting)
+ runtime·gosched();
res = nil;
if(hash_lookup(h, ak, (void**)&res)) {
// mapaccess1(hmap *map[any]any, key any) (val any);
#pragma textflag 7
void
-·mapaccess1(Hmap *h, ...)
+runtime·mapaccess1(Hmap *h, ...)
{
byte *ak, *av;
bool pres;
ak = (byte*)&h + h->ko1;
av = (byte*)&h + h->vo1;
- mapaccess(h, ak, av, &pres);
+ runtime·mapaccess(h, ak, av, &pres);
if(debug) {
- prints("runtime.mapaccess1: map=");
- ·printpointer(h);
- prints("; key=");
+ runtime·prints("runtime.mapaccess1: map=");
+ runtime·printpointer(h);
+ runtime·prints("; key=");
h->keyalg->print(h->keysize, ak);
- prints("; val=");
+ runtime·prints("; val=");
h->valalg->print(h->valsize, av);
- prints("; pres=");
- ·printbool(pres);
- prints("\n");
+ runtime·prints("; pres=");
+ runtime·printbool(pres);
+ runtime·prints("\n");
}
}
// mapaccess2(hmap *map[any]any, key any) (val any, pres bool);
#pragma textflag 7
void
-·mapaccess2(Hmap *h, ...)
+runtime·mapaccess2(Hmap *h, ...)
{
byte *ak, *av, *ap;
av = (byte*)&h + h->vo1;
ap = (byte*)&h + h->po1;
- mapaccess(h, ak, av, ap);
+ runtime·mapaccess(h, ak, av, ap);
if(debug) {
- prints("runtime.mapaccess2: map=");
- ·printpointer(h);
- prints("; key=");
+ runtime·prints("runtime.mapaccess2: map=");
+ runtime·printpointer(h);
+ runtime·prints("; key=");
h->keyalg->print(h->keysize, ak);
- prints("; val=");
+ runtime·prints("; val=");
h->valalg->print(h->valsize, av);
- prints("; pres=");
- ·printbool(*ap);
- prints("\n");
+ runtime·prints("; pres=");
+ runtime·printbool(*ap);
+ runtime·prints("\n");
}
}
void
-mapassign(Hmap *h, byte *ak, byte *av)
+runtime·mapassign(Hmap *h, byte *ak, byte *av)
{
byte *res;
int32 hit;
- if(gcwaiting)
- gosched();
+ if(runtime·gcwaiting)
+ runtime·gosched();
res = nil;
if(av == nil) {
hit = hash_insert(h, ak, (void**)&res);
if(!hit && h->indirectval)
- *(void**)(res+h->datavo) = mal(h->valsize);
+ *(void**)(res+h->datavo) = runtime·mal(h->valsize);
h->keyalg->copy(h->keysize, res, ak);
h->valalg->copy(h->valsize, hash_indirect(h, res+h->datavo), av);
if(debug) {
- prints("mapassign: map=");
- ·printpointer(h);
- prints("; key=");
+ runtime·prints("mapassign: map=");
+ runtime·printpointer(h);
+ runtime·prints("; key=");
h->keyalg->print(h->keysize, ak);
- prints("; val=");
+ runtime·prints("; val=");
h->valalg->print(h->valsize, av);
- prints("; hit=");
- ·printint(hit);
- prints("; res=");
- ·printpointer(res);
- prints("\n");
+ runtime·prints("; hit=");
+ runtime·printint(hit);
+ runtime·prints("; res=");
+ runtime·printpointer(res);
+ runtime·prints("\n");
}
}
// mapassign1(hmap *map[any]any, key any, val any);
#pragma textflag 7
void
-·mapassign1(Hmap *h, ...)
+runtime·mapassign1(Hmap *h, ...)
{
byte *ak, *av;
ak = (byte*)&h + h->ko2;
av = (byte*)&h + h->vo2;
- mapassign(h, ak, av);
+ runtime·mapassign(h, ak, av);
}
// mapassign2(hmap *map[any]any, key any, val any, pres bool);
#pragma textflag 7
void
-·mapassign2(Hmap *h, ...)
+runtime·mapassign2(Hmap *h, ...)
{
byte *ak, *av, *ap;
if(*ap == false)
av = nil; // delete
- mapassign(h, ak, av);
+ runtime·mapassign(h, ak, av);
if(debug) {
- prints("mapassign2: map=");
- ·printpointer(h);
- prints("; key=");
+ runtime·prints("mapassign2: map=");
+ runtime·printpointer(h);
+ runtime·prints("; key=");
h->keyalg->print(h->keysize, ak);
- prints("\n");
+ runtime·prints("\n");
}
}
// mapiterinit(hmap *map[any]any, hiter *any);
void
-·mapiterinit(Hmap *h, struct hash_iter *it)
+runtime·mapiterinit(Hmap *h, struct hash_iter *it)
{
if(h == nil) {
it->data = nil;
hash_iter_init(h, it);
it->data = hash_next(it);
if(debug) {
- prints("runtime.mapiterinit: map=");
- ·printpointer(h);
- prints("; iter=");
- ·printpointer(it);
- prints("; data=");
- ·printpointer(it->data);
- prints("\n");
+ runtime·prints("runtime.mapiterinit: map=");
+ runtime·printpointer(h);
+ runtime·prints("; iter=");
+ runtime·printpointer(it);
+ runtime·prints("; data=");
+ runtime·printpointer(it->data);
+ runtime·prints("\n");
}
}
struct hash_iter*
-mapiterinit(Hmap *h)
+runtime·newmapiterinit(Hmap *h)
{
struct hash_iter *it;
- it = mal(sizeof *it);
- ·mapiterinit(h, it);
+ it = runtime·mal(sizeof *it);
+ runtime·mapiterinit(h, it);
return it;
}
// mapiternext(hiter *any);
void
-·mapiternext(struct hash_iter *it)
+runtime·mapiternext(struct hash_iter *it)
{
- if(gcwaiting)
- gosched();
+ if(runtime·gcwaiting)
+ runtime·gosched();
it->data = hash_next(it);
if(debug) {
- prints("runtime.mapiternext: iter=");
- ·printpointer(it);
- prints("; data=");
- ·printpointer(it->data);
- prints("\n");
+ runtime·prints("runtime.mapiternext: iter=");
+ runtime·printpointer(it);
+ runtime·prints("; data=");
+ runtime·printpointer(it->data);
+ runtime·prints("\n");
}
}
-void
-mapiternext(struct hash_iter *it)
-{
- ·mapiternext(it);
-}
-
// mapiter1(hiter *any) (key any);
#pragma textflag 7
void
-·mapiter1(struct hash_iter *it, ...)
+runtime·mapiter1(struct hash_iter *it, ...)
{
Hmap *h;
byte *ak, *res;
res = it->data;
if(res == nil)
- throw("runtime.mapiter1: key:val nil pointer");
+ runtime·throw("runtime.mapiter1: key:val nil pointer");
h->keyalg->copy(h->keysize, ak, res);
if(debug) {
- prints("mapiter2: iter=");
- ·printpointer(it);
- prints("; map=");
- ·printpointer(h);
- prints("\n");
+ runtime·prints("mapiter2: iter=");
+ runtime·printpointer(it);
+ runtime·prints("; map=");
+ runtime·printpointer(h);
+ runtime·prints("\n");
}
}
bool
-mapiterkey(struct hash_iter *it, void *ak)
+runtime·mapiterkey(struct hash_iter *it, void *ak)
{
Hmap *h;
byte *res;
// mapiter2(hiter *any) (key any, val any);
#pragma textflag 7
void
-·mapiter2(struct hash_iter *it, ...)
+runtime·mapiter2(struct hash_iter *it, ...)
{
Hmap *h;
byte *ak, *av, *res;
res = it->data;
if(res == nil)
- throw("runtime.mapiter2: key:val nil pointer");
+ runtime·throw("runtime.mapiter2: key:val nil pointer");
h->keyalg->copy(h->keysize, ak, res);
h->valalg->copy(h->valsize, av, hash_indirect(h, res+h->datavo));
if(debug) {
- prints("mapiter2: iter=");
- ·printpointer(it);
- prints("; map=");
- ·printpointer(h);
- prints("\n");
+ runtime·prints("mapiter2: iter=");
+ runtime·printpointer(it);
+ runtime·prints("; map=");
+ runtime·printpointer(h);
+ runtime·prints("\n");
}
}
}
*/
-#define malloc mal
+#define malloc runtime·mal
#define offsetof(s,m) (uint32)(&(((s*)0)->m))
-#define memset(a,b,c) ·memclr((byte*)(a), (uint32)(c))
-#define memcpy(a,b,c) mcpy((byte*)(a),(byte*)(b),(uint32)(c))
-#define assert(a) if(!(a)) throw("assert")
+#define memset(a,b,c) runtime·memclr((byte*)(a), (uint32)(c))
+#define memcpy(a,b,c) runtime·mcpy((byte*)(a),(byte*)(b),(uint32)(c))
+#define assert(a) if(!(a)) runtime·throw("assert")
+#define free(x) runtime·free(x)
+#define memmove(a,b,c) runtime·memmove(a, b, c)
struct hash; /* opaque */
struct hash_subtable; /* opaque */
/* Lookup *data in *h. If the data is found, return 1 and place a pointer to
the found element in *pres. Otherwise return 0 and place 0 in *pres. */
-int32 hash_lookup (struct hash *h, void *data, void **pres);
+// int32 hash_lookup (struct hash *h, void *data, void **pres);
/* Lookup *data in *h. If the data is found, execute (*data_del) (arg, p)
where p points to the data in the table, then remove it from *h and return
1. Otherwise return 0. */
-int32 hash_remove (struct hash *h, void *data, void *arg);
+// int32 hash_remove (struct hash *h, void *data, void *arg);
/* Lookup *data in *h. If the data is found, return 1, and place a pointer
to the found element in *pres. Otherwise, return 0, allocate a region
If using garbage collection, it is the caller's responsibility to
add references for **pres if HASH_ADDED is returned. */
-int32 hash_insert (struct hash *h, void *data, void **pres);
+// int32 hash_insert (struct hash *h, void *data, void **pres);
/* Return the number of elements in the table. */
-uint32 hash_count (struct hash *h);
+// uint32 hash_count (struct hash *h);
/* The following call is useful only if not using garbage collection on the
table.
If other memory pointed to by user data must be freed, the caller is
responsible for doiing do by iterating over *h first; see
hash_iter_init()/hash_next(). */
-void hash_destroy (struct hash *h);
+// void hash_destroy (struct hash *h);
/*----- iteration -----*/
/* Initialize *it from *h. */
-void hash_iter_init (struct hash *h, struct hash_iter *it);
+// void hash_iter_init (struct hash *h, struct hash_iter *it);
/* Return the next used entry in the table which which *it was initialized. */
-void *hash_next (struct hash_iter *it);
+// void *hash_next (struct hash_iter *it);
/*---- test interface ----*/
/* Call (*data_visit) (arg, level, data) for every data entry in the table,
whether used or not. "level" is the subtable level, 0 means first level. */
/* TESTING ONLY: DO NOT USE THIS ROUTINE IN NORMAL CODE */
-void hash_visit (struct hash *h, void (*data_visit) (void *arg, int32 level, void *data), void *arg);
+// void hash_visit (struct hash *h, void (*data_visit) (void *arg, int32 level, void *data), void *arg);
#include "type.h"
#include "malloc.h"
-static void
-printiface(Iface i)
+void
+runtime·printiface(Iface i)
{
- printf("(%p,%p)", i.tab, i.data);
+ runtime·printf("(%p,%p)", i.tab, i.data);
}
-static void
-printeface(Eface e)
+void
+runtime·printeface(Eface e)
{
- printf("(%p,%p)", e.type, e.data);
+ runtime·printf("(%p,%p)", e.type, e.data);
}
/*
Eface err;
if(inter->mhdr.len == 0)
- throw("internal error - misuse of itab");
+ runtime·throw("internal error - misuse of itab");
locked = 0;
// common case will be no lock contention.
for(locked=0; locked<2; locked++) {
if(locked)
- lock(&ifacelock);
+ runtime·lock(&ifacelock);
for(m=hash[h]; m!=nil; m=m->link) {
if(m->inter == inter && m->type == type) {
if(m->bad) {
}
}
if(locked)
- unlock(&ifacelock);
+ runtime·unlock(&ifacelock);
return m;
}
}
}
ni = inter->mhdr.len;
- m = malloc(sizeof(*m) + ni*sizeof m->fun[0]);
+ m = runtime·malloc(sizeof(*m) + ni*sizeof m->fun[0]);
m->inter = inter;
m->type = type;
if(!canfail) {
throw:
// didn't find method
- ·newTypeAssertionError(nil, type, inter,
+ runtime·newTypeAssertionError(nil, type, inter,
nil, type->string, inter->string,
iname, &err);
if(locked)
- unlock(&ifacelock);
- ·panic(err);
+ runtime·unlock(&ifacelock);
+ runtime·panic(err);
return nil; // not reached
}
m->bad = 1;
m->link = hash[h];
hash[h] = m;
if(locked)
- unlock(&ifacelock);
+ runtime·unlock(&ifacelock);
if(m->bad)
return nil;
return m;
alg = t->alg;
if(wid <= sizeof(*dst))
- algarray[alg].copy(wid, dst, src);
+ runtime·algarray[alg].copy(wid, dst, src);
else {
- p = mal(wid);
- algarray[alg].copy(wid, p, src);
+ p = runtime·mal(wid);
+ runtime·algarray[alg].copy(wid, p, src);
*dst = p;
}
}
alg = t->alg;
if(wid <= sizeof(*src))
- algarray[alg].copy(wid, dst, src);
+ runtime·algarray[alg].copy(wid, dst, src);
else
- algarray[alg].copy(wid, dst, *src);
+ runtime·algarray[alg].copy(wid, dst, *src);
}
// func convT2I(typ *byte, typ2 *byte, elem any) (ret any)
#pragma textflag 7
void
-·convT2I(Type *t, InterfaceType *inter, ...)
+runtime·convT2I(Type *t, InterfaceType *inter, ...)
{
byte *elem;
Iface *ret;
elem = (byte*)(&inter+1);
wid = t->size;
- ret = (Iface*)(elem + rnd(wid, Structrnd));
+ ret = (Iface*)(elem + runtime·rnd(wid, Structrnd));
ret->tab = itab(inter, t, 0);
copyin(t, elem, &ret->data);
}
// func convT2E(typ *byte, elem any) (ret any)
#pragma textflag 7
void
-·convT2E(Type *t, ...)
+runtime·convT2E(Type *t, ...)
{
byte *elem;
Eface *ret;
elem = (byte*)(&t+1);
wid = t->size;
- ret = (Eface*)(elem + rnd(wid, Structrnd));
+ ret = (Eface*)(elem + runtime·rnd(wid, Structrnd));
ret->type = t;
copyin(t, elem, &ret->data);
}
// func ifaceI2T(typ *byte, iface any) (ret any)
#pragma textflag 7
void
-·assertI2T(Type *t, Iface i, ...)
+runtime·assertI2T(Type *t, Iface i, ...)
{
Itab *tab;
byte *ret;
ret = (byte*)(&i+1);
tab = i.tab;
if(tab == nil) {
- ·newTypeAssertionError(nil, nil, t,
+ runtime·newTypeAssertionError(nil, nil, t,
nil, nil, t->string,
nil, &err);
- ·panic(err);
+ runtime·panic(err);
}
if(tab->type != t) {
- ·newTypeAssertionError(tab->inter, tab->type, t,
+ runtime·newTypeAssertionError(tab->inter, tab->type, t,
tab->inter->string, tab->type->string, t->string,
nil, &err);
- ·panic(err);
+ runtime·panic(err);
}
copyout(t, &i.data, ret);
}
// func ifaceI2T2(typ *byte, iface any) (ret any, ok bool)
#pragma textflag 7
void
-·assertI2T2(Type *t, Iface i, ...)
+runtime·assertI2T2(Type *t, Iface i, ...)
{
byte *ret;
bool *ok;
ret = (byte*)(&i+1);
wid = t->size;
- ok = (bool*)(ret+rnd(wid, 1));
+ ok = (bool*)(ret+runtime·rnd(wid, 1));
if(i.tab == nil || i.tab->type != t) {
*ok = false;
- ·memclr(ret, wid);
+ runtime·memclr(ret, wid);
return;
}
// func ifaceE2T(typ *byte, iface any) (ret any)
#pragma textflag 7
void
-·assertE2T(Type *t, Eface e, ...)
+runtime·assertE2T(Type *t, Eface e, ...)
{
byte *ret;
Eface err;
ret = (byte*)(&e+1);
if(e.type == nil) {
- ·newTypeAssertionError(nil, nil, t,
+ runtime·newTypeAssertionError(nil, nil, t,
nil, nil, t->string,
nil, &err);
- ·panic(err);
+ runtime·panic(err);
}
if(e.type != t) {
- ·newTypeAssertionError(nil, e.type, t,
+ runtime·newTypeAssertionError(nil, e.type, t,
nil, e.type->string, t->string,
nil, &err);
- ·panic(err);
+ runtime·panic(err);
}
copyout(t, &e.data, ret);
}
// func ifaceE2T2(sigt *byte, iface any) (ret any, ok bool);
#pragma textflag 7
void
-·assertE2T2(Type *t, Eface e, ...)
+runtime·assertE2T2(Type *t, Eface e, ...)
{
byte *ret;
bool *ok;
ret = (byte*)(&e+1);
wid = t->size;
- ok = (bool*)(ret+rnd(wid, 1));
+ ok = (bool*)(ret+runtime·rnd(wid, 1));
if(t != e.type) {
*ok = false;
- ·memclr(ret, wid);
+ runtime·memclr(ret, wid);
return;
}
// func convI2E(elem any) (ret any)
#pragma textflag 7
void
-·convI2E(Iface i, Eface ret)
+runtime·convI2E(Iface i, Eface ret)
{
Itab *tab;
// func ifaceI2E(typ *byte, iface any) (ret any)
#pragma textflag 7
void
-·assertI2E(InterfaceType* inter, Iface i, Eface ret)
+runtime·assertI2E(InterfaceType* inter, Iface i, Eface ret)
{
Itab *tab;
Eface err;
tab = i.tab;
if(tab == nil) {
// explicit conversions require non-nil interface value.
- ·newTypeAssertionError(nil, nil, inter,
+ runtime·newTypeAssertionError(nil, nil, inter,
nil, nil, inter->string,
nil, &err);
- ·panic(err);
+ runtime·panic(err);
}
ret.data = i.data;
ret.type = tab->type;
// func ifaceI2E2(typ *byte, iface any) (ret any, ok bool)
#pragma textflag 7
void
-·assertI2E2(InterfaceType* inter, Iface i, Eface ret, bool ok)
+runtime·assertI2E2(InterfaceType* inter, Iface i, Eface ret, bool ok)
{
Itab *tab;
// func convI2I(typ *byte, elem any) (ret any)
#pragma textflag 7
void
-·convI2I(InterfaceType* inter, Iface i, Iface ret)
+runtime·convI2I(InterfaceType* inter, Iface i, Iface ret)
{
Itab *tab;
}
void
-ifaceI2I(InterfaceType *inter, Iface i, Iface *ret)
+runtime·ifaceI2I(InterfaceType *inter, Iface i, Iface *ret)
{
Itab *tab;
Eface err;
tab = i.tab;
if(tab == nil) {
// explicit conversions require non-nil interface value.
- ·newTypeAssertionError(nil, nil, inter,
+ runtime·newTypeAssertionError(nil, nil, inter,
nil, nil, inter->string,
nil, &err);
- ·panic(err);
+ runtime·panic(err);
}
ret->data = i.data;
ret->tab = itab(inter, tab->type, 0);
// func ifaceI2I(sigi *byte, iface any) (ret any)
#pragma textflag 7
void
-·assertI2I(InterfaceType* inter, Iface i, Iface ret)
+runtime·assertI2I(InterfaceType* inter, Iface i, Iface ret)
{
- ifaceI2I(inter, i, &ret);
+ runtime·ifaceI2I(inter, i, &ret);
}
// func ifaceI2I2(sigi *byte, iface any) (ret any, ok bool)
#pragma textflag 7
void
-·assertI2I2(InterfaceType *inter, Iface i, Iface ret, bool ok)
+runtime·assertI2I2(InterfaceType *inter, Iface i, Iface ret, bool ok)
{
Itab *tab;
}
void
-ifaceE2I(InterfaceType *inter, Eface e, Iface *ret)
+runtime·ifaceE2I(InterfaceType *inter, Eface e, Iface *ret)
{
Type *t;
Eface err;
t = e.type;
if(t == nil) {
// explicit conversions require non-nil interface value.
- ·newTypeAssertionError(nil, nil, inter,
+ runtime·newTypeAssertionError(nil, nil, inter,
nil, nil, inter->string,
nil, &err);
- ·panic(err);
+ runtime·panic(err);
}
ret->data = e.data;
ret->tab = itab(inter, t, 0);
// func ifaceE2I(sigi *byte, iface any) (ret any)
#pragma textflag 7
void
-·assertE2I(InterfaceType* inter, Eface e, Iface ret)
+runtime·assertE2I(InterfaceType* inter, Eface e, Iface ret)
{
- ifaceE2I(inter, e, &ret);
+ runtime·ifaceE2I(inter, e, &ret);
}
// ifaceE2I2(sigi *byte, iface any) (ret any, ok bool)
#pragma textflag 7
void
-·assertE2I2(InterfaceType *inter, Eface e, Iface ret, bool ok)
+runtime·assertE2I2(InterfaceType *inter, Eface e, Iface ret, bool ok)
{
if(e.type == nil) {
ok = 0;
// func ifaceE2E(typ *byte, iface any) (ret any)
#pragma textflag 7
void
-·assertE2E(InterfaceType* inter, Eface e, Eface ret)
+runtime·assertE2E(InterfaceType* inter, Eface e, Eface ret)
{
Type *t;
Eface err;
t = e.type;
if(t == nil) {
// explicit conversions require non-nil interface value.
- ·newTypeAssertionError(nil, nil, inter,
+ runtime·newTypeAssertionError(nil, nil, inter,
nil, nil, inter->string,
nil, &err);
- ·panic(err);
+ runtime·panic(err);
}
ret = e;
FLUSH(&ret);
// func ifaceE2E2(iface any) (ret any, ok bool)
#pragma textflag 7
void
-·assertE2E2(InterfaceType* inter, Eface e, Eface ret, bool ok)
+runtime·assertE2E2(InterfaceType* inter, Eface e, Eface ret, bool ok)
{
USED(inter);
ret = e;
alg = t->alg;
wid = t->size;
- if(algarray[alg].hash == nohash) {
+ if(runtime·algarray[alg].hash == runtime·nohash) {
// calling nohash will panic too,
// but we can print a better error.
- ·newErrorString(catstring(gostringnocopy((byte*)"hash of unhashable type "), *t->string), &err);
- ·panic(err);
+ runtime·newErrorString(runtime·catstring(runtime·gostringnocopy((byte*)"hash of unhashable type "), *t->string), &err);
+ runtime·panic(err);
}
if(wid <= sizeof(data))
- return algarray[alg].hash(wid, &data);
- return algarray[alg].hash(wid, data);
+ return runtime·algarray[alg].hash(wid, &data);
+ return runtime·algarray[alg].hash(wid, data);
}
uintptr
-ifacehash(Iface a)
+runtime·ifacehash(Iface a)
{
if(a.tab == nil)
return 0;
}
uintptr
-efacehash(Eface a)
+runtime·efacehash(Eface a)
{
return ifacehash1(a.data, a.type);
}
alg = t->alg;
wid = t->size;
- if(algarray[alg].equal == noequal) {
+ if(runtime·algarray[alg].equal == runtime·noequal) {
// calling noequal will panic too,
// but we can print a better error.
- ·newErrorString(catstring(gostringnocopy((byte*)"comparing uncomparable type "), *t->string), &err);
- ·panic(err);
+ runtime·newErrorString(runtime·catstring(runtime·gostringnocopy((byte*)"comparing uncomparable type "), *t->string), &err);
+ runtime·panic(err);
}
if(wid <= sizeof(data1))
- return algarray[alg].equal(wid, &data1, &data2);
- return algarray[alg].equal(wid, data1, data2);
+ return runtime·algarray[alg].equal(wid, &data1, &data2);
+ return runtime·algarray[alg].equal(wid, data1, data2);
}
bool
-ifaceeq(Iface i1, Iface i2)
+runtime·ifaceeq_c(Iface i1, Iface i2)
{
if(i1.tab != i2.tab)
return false;
}
bool
-efaceeq(Eface e1, Eface e2)
+runtime·efaceeq_c(Eface e1, Eface e2)
{
if(e1.type != e2.type)
return false;
// ifaceeq(i1 any, i2 any) (ret bool);
void
-·ifaceeq(Iface i1, Iface i2, bool ret)
+runtime·ifaceeq(Iface i1, Iface i2, bool ret)
{
- ret = ifaceeq(i1, i2);
+ ret = runtime·ifaceeq_c(i1, i2);
FLUSH(&ret);
}
// efaceeq(i1 any, i2 any) (ret bool)
void
-·efaceeq(Eface e1, Eface e2, bool ret)
+runtime·efaceeq(Eface e1, Eface e2, bool ret)
{
- ret = efaceeq(e1, e2);
+ ret = runtime·efaceeq_c(e1, e2);
FLUSH(&ret);
}
// ifacethash(i1 any) (ret uint32);
void
-·ifacethash(Iface i1, uint32 ret)
+runtime·ifacethash(Iface i1, uint32 ret)
{
Itab *tab;
// efacethash(e1 any) (ret uint32)
void
-·efacethash(Eface e1, uint32 ret)
+runtime·efacethash(Eface e1, uint32 ret)
{
Type *t;
FLUSH(&ret);
}
-void
-·printiface(Iface i)
-{
- printiface(i);
-}
-
-void
-·printeface(Eface e)
-{
- printeface(e);
-}
-
void
unsafe·Typeof(Eface e, Eface ret)
{
if(e.type->size <= sizeof(uintptr)) {
// Copy data into x ...
x = 0;
- algarray[e.type->alg].copy(e.type->size, &x, &e.data);
+ runtime·algarray[e.type->alg].copy(e.type->size, &x, &e.data);
// but then build pointer to x so that Reflect
// always returns pointer to data.
- p = mal(sizeof(uintptr));
+ p = runtime·mal(sizeof(uintptr));
*p = x;
} else {
// Already a pointer, but still make a copy,
// to preserve value semantics for interface data.
- p = mal(e.type->size);
- algarray[e.type->alg].copy(e.type->size, p, e.data);
+ p = runtime·mal(e.type->size);
+ runtime·algarray[e.type->alg].copy(e.type->size, p, e.data);
}
retaddr = p;
}
// Interface holds either pointer to data
// or copy of original data.
if(e.type->size <= sizeof(uintptr))
- algarray[e.type->alg].copy(e.type->size, &e.data, addr);
+ runtime·algarray[e.type->alg].copy(e.type->size, &e.data, addr);
else {
// Easier: already a pointer to data.
// TODO(rsc): Should this make a copy?
t = (Type*)((Eface*)typ.data-1);
if(t->kind&KindNoPointers)
- ret = mallocgc(t->size, RefNoPointers, 1, 1);
+ ret = runtime·mallocgc(t->size, RefNoPointers, 1, 1);
else
- ret = mal(t->size);
+ ret = runtime·mal(t->size);
FLUSH(&ret);
}
size = n*t->size;
if(t->kind&KindNoPointers)
- ret = mallocgc(size, RefNoPointers, 1, 1);
+ ret = runtime·mallocgc(size, RefNoPointers, 1, 1);
else
- ret = mal(size);
+ ret = runtime·mal(size);
FLUSH(&ret);
}
// Darwin and Linux use the same linkage to main
-TEXT _rt0_386_linux(SB),7,$0
+TEXT _rt0_386_linux(SB),7,$0
JMP _rt0_386(SB)
#include "os.h"
void
-dumpregs(Sigcontext *r)
+runtime·dumpregs(Sigcontext *r)
{
- printf("eax %x\n", r->eax);
- printf("ebx %x\n", r->ebx);
- printf("ecx %x\n", r->ecx);
- printf("edx %x\n", r->edx);
- printf("edi %x\n", r->edi);
- printf("esi %x\n", r->esi);
- printf("ebp %x\n", r->ebp);
- printf("esp %x\n", r->esp);
- printf("eip %x\n", r->eip);
- printf("eflags %x\n", r->eflags);
- printf("cs %x\n", r->cs);
- printf("fs %x\n", r->fs);
- printf("gs %x\n", r->gs);
+ runtime·printf("eax %x\n", r->eax);
+ runtime·printf("ebx %x\n", r->ebx);
+ runtime·printf("ecx %x\n", r->ecx);
+ runtime·printf("edx %x\n", r->edx);
+ runtime·printf("edi %x\n", r->edi);
+ runtime·printf("esi %x\n", r->esi);
+ runtime·printf("ebp %x\n", r->ebp);
+ runtime·printf("esp %x\n", r->esp);
+ runtime·printf("eip %x\n", r->eip);
+ runtime·printf("eflags %x\n", r->eflags);
+ runtime·printf("cs %x\n", r->cs);
+ runtime·printf("fs %x\n", r->fs);
+ runtime·printf("gs %x\n", r->gs);
}
/*
* This assembler routine takes the args from registers, puts them on the stack,
* and calls sighandler().
*/
-extern void sigtramp(void);
-extern void sigignore(void); // just returns
-extern void sigreturn(void); // calls sigreturn
+extern void runtime·sigtramp(void);
+extern void runtime·sigignore(void); // just returns
+extern void runtime·sigreturn(void); // calls runtime·sigreturn
String
-signame(int32 sig)
+runtime·signame(int32 sig)
{
if(sig < 0 || sig >= NSIG)
- return emptystring;
- return gostringnocopy((byte*)sigtab[sig].name);
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
}
void
-sighandler(int32 sig, Siginfo* info, void* context)
+runtime·sighandler(int32 sig, Siginfo* info, void* context)
{
Ucontext *uc;
Sigcontext *r;
uc = context;
r = &uc->uc_mcontext;
- if((gp = m->curg) != nil && (sigtab[sig].flags & SigPanic)) {
+ if((gp = m->curg) != nil && (runtime·sigtab[sig].flags & SigPanic)) {
// Make it look like a call to the signal func.
// Have to pass arguments out of band since
// augmenting the stack frame would break
gp->sigcode0 = info->si_code;
gp->sigcode1 = ((uintptr*)info)[3];
- // Only push sigpanic if r->eip != 0.
+ // Only push runtime·sigpanic if r->eip != 0.
// If r->eip == 0, probably panicked because of a
// call to a nil func. Not pushing that onto sp will
- // make the trace look like a call to sigpanic instead.
- // (Otherwise the trace will end at sigpanic and we
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
// won't get to see who faulted.)
if(r->eip != 0) {
sp = (uintptr*)r->esp;
*--sp = r->eip;
r->esp = (uintptr)sp;
}
- r->eip = (uintptr)sigpanic;
+ r->eip = (uintptr)runtime·sigpanic;
return;
}
- if(sigtab[sig].flags & SigQueue) {
- if(sigsend(sig) || (sigtab[sig].flags & SigIgnore))
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
return;
- exit(2); // SIGINT, SIGTERM, etc
+ runtime·exit(2); // SIGINT, SIGTERM, etc
}
- if(panicking) // traceback already printed
- exit(2);
- panicking = 1;
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
if(sig < 0 || sig >= NSIG)
- printf("Signal %d\n", sig);
+ runtime·printf("Signal %d\n", sig);
else
- printf("%s\n", sigtab[sig].name);
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
- printf("PC=%X\n", r->eip);
- printf("\n");
+ runtime·printf("PC=%X\n", r->eip);
+ runtime·printf("\n");
- if(gotraceback()){
- traceback((void*)r->eip, (void*)r->esp, 0, m->curg);
- tracebackothers(m->curg);
- dumpregs(r);
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->eip, (void*)r->esp, 0, m->curg);
+ runtime·tracebackothers(m->curg);
+ runtime·dumpregs(r);
}
- breakpoint();
- exit(2);
+ runtime·breakpoint();
+ runtime·exit(2);
}
void
-signalstack(byte *p, int32 n)
+runtime·signalstack(byte *p, int32 n)
{
Sigaltstack st;
st.ss_sp = p;
st.ss_size = n;
st.ss_flags = 0;
- sigaltstack(&st, nil);
+ runtime·sigaltstack(&st, nil);
}
void
-initsig(int32 queue)
+runtime·initsig(int32 queue)
{
static Sigaction sa;
- siginit();
+ runtime·siginit();
int32 i;
sa.sa_flags = SA_ONSTACK | SA_SIGINFO | SA_RESTORER;
sa.sa_mask = 0xFFFFFFFFFFFFFFFFULL;
- sa.sa_restorer = (void*)sigreturn;
+ sa.sa_restorer = (void*)runtime·sigreturn;
for(i = 0; i<NSIG; i++) {
- if(sigtab[i].flags) {
- if((sigtab[i].flags & SigQueue) != queue)
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
continue;
- if(sigtab[i].flags & (SigCatch | SigQueue))
- sa.k_sa_handler = (void*)sigtramp;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ sa.k_sa_handler = (void*)runtime·sigtramp;
else
- sa.k_sa_handler = (void*)sigignore;
- if(sigtab[i].flags & SigRestart)
+ sa.k_sa_handler = (void*)runtime·sigignore;
+ if(runtime·sigtab[i].flags & SigRestart)
sa.sa_flags |= SA_RESTART;
else
sa.sa_flags &= ~SA_RESTART;
- rt_sigaction(i, &sa, nil, 8);
+ runtime·rt_sigaction(i, &sa, nil, 8);
}
}
}
#include "386/asm.h"
-TEXT exit(SB),7,$0
+TEXT runtime·exit(SB),7,$0
MOVL $252, AX // syscall number
MOVL 4(SP), BX
INT $0x80
INT $3 // not reached
RET
-TEXT exit1(SB),7,$0
+TEXT runtime·exit1(SB),7,$0
MOVL $1, AX // exit - exit the current os thread
MOVL 4(SP), BX
INT $0x80
INT $3 // not reached
RET
-TEXT write(SB),7,$0
+TEXT runtime·write(SB),7,$0
MOVL $4, AX // syscall - write
MOVL 4(SP), BX
MOVL 8(SP), CX
INT $0x80
RET
-TEXT gettime(SB), 7, $32
+TEXT runtime·gettime(SB), 7, $32
MOVL $78, AX // syscall - gettimeofday
LEAL 8(SP), BX
MOVL $0, CX
MOVL BX, (DI)
RET
-TEXT rt_sigaction(SB),7,$0
+TEXT runtime·rt_sigaction(SB),7,$0
MOVL $174, AX // syscall - rt_sigaction
MOVL 4(SP), BX
MOVL 8(SP), CX
INT $0x80
RET
-TEXT sigtramp(SB),7,$40
+TEXT runtime·sigtramp(SB),7,$40
get_tls(CX)
// save g
MOVL context+8(FP), BX
MOVL BX, 8(SP)
- CALL sighandler(SB)
+ CALL runtime·sighandler(SB)
// restore g
get_tls(CX)
RET
-TEXT sigignore(SB),7,$0
+TEXT runtime·sigignore(SB),7,$0
RET
-TEXT sigreturn(SB),7,$0
+TEXT runtime·sigreturn(SB),7,$0
MOVL $173, AX // rt_sigreturn
INT $0x80
INT $3 // not reached
RET
-TEXT ·mmap(SB),7,$0
+TEXT runtime·mmap(SB),7,$0
MOVL $192, AX // mmap2
MOVL 4(SP), BX
MOVL 8(SP), CX
INCL AX
RET
-TEXT ·munmap(SB),7,$0
+TEXT runtime·munmap(SB),7,$0
MOVL $91, AX // munmap
MOVL 4(SP), BX
MOVL 8(SP), CX
// int32 futex(int32 *uaddr, int32 op, int32 val,
// struct timespec *timeout, int32 *uaddr2, int32 val2);
-TEXT futex(SB),7,$0
+TEXT runtime·futex(SB),7,$0
MOVL $240, AX // futex
MOVL 4(SP), BX
MOVL 8(SP), CX
RET
// int32 clone(int32 flags, void *stack, M *m, G *g, void (*fn)(void));
-TEXT clone(SB),7,$0
+TEXT runtime·clone(SB),7,$0
MOVL $120, AX // clone
MOVL flags+4(SP), BX
MOVL stack+8(SP), CX
PUSHL $32 // sizeof tls
PUSHL BP // &tls
PUSHL DI // tls #
- CALL setldt(SB)
+ CALL runtime·setldt(SB)
POPL AX
POPL AX
POPL AX
MOVL DX, g(AX)
MOVL BX, m(AX)
- CALL stackcheck(SB) // smashes AX, CX
+ CALL runtime·stackcheck(SB) // smashes AX, CX
MOVL 0(DX), DX // paranoia; check they are not nil
MOVL 0(BX), BX
// more paranoia; check that stack splitting code works
PUSHAL
- CALL emptyfunc(SB)
+ CALL runtime·emptyfunc(SB)
POPAL
CALL SI // fn()
- CALL exit1(SB)
+ CALL runtime·exit1(SB)
MOVL $0x1234, 0x1005
RET
-TEXT sigaltstack(SB),7,$-8
+TEXT runtime·sigaltstack(SB),7,$-8
MOVL $186, AX // sigaltstack
MOVL new+4(SP), BX
MOVL old+8(SP), CX
#define USEABLE 0x40
// setldt(int entry, int address, int limit)
-TEXT setldt(SB),7,$32
+TEXT runtime·setldt(SB),7,$32
MOVL entry+0(FP), BX // entry
MOVL address+4(FP), CX // base address
// Darwin and Linux use the same linkage to main
-TEXT _rt0_amd64_linux(SB),7,$-8
+TEXT _rt0_amd64_linux(SB),7,$-8
MOVQ $_rt0_amd64(SB), AX
MOVQ SP, DI
JMP AX
#include "os.h"
void
-dumpregs(Sigcontext *r)
+runtime·dumpregs(Sigcontext *r)
{
- printf("rax %X\n", r->rax);
- printf("rbx %X\n", r->rbx);
- printf("rcx %X\n", r->rcx);
- printf("rdx %X\n", r->rdx);
- printf("rdi %X\n", r->rdi);
- printf("rsi %X\n", r->rsi);
- printf("rbp %X\n", r->rbp);
- printf("rsp %X\n", r->rsp);
- printf("r8 %X\n", r->r8 );
- printf("r9 %X\n", r->r9 );
- printf("r10 %X\n", r->r10);
- printf("r11 %X\n", r->r11);
- printf("r12 %X\n", r->r12);
- printf("r13 %X\n", r->r13);
- printf("r14 %X\n", r->r14);
- printf("r15 %X\n", r->r15);
- printf("rip %X\n", r->rip);
- printf("rflags %X\n", r->eflags);
- printf("cs %X\n", (uint64)r->cs);
- printf("fs %X\n", (uint64)r->fs);
- printf("gs %X\n", (uint64)r->gs);
+ runtime·printf("rax %X\n", r->rax);
+ runtime·printf("rbx %X\n", r->rbx);
+ runtime·printf("rcx %X\n", r->rcx);
+ runtime·printf("rdx %X\n", r->rdx);
+ runtime·printf("rdi %X\n", r->rdi);
+ runtime·printf("rsi %X\n", r->rsi);
+ runtime·printf("rbp %X\n", r->rbp);
+ runtime·printf("rsp %X\n", r->rsp);
+ runtime·printf("r8 %X\n", r->r8 );
+ runtime·printf("r9 %X\n", r->r9 );
+ runtime·printf("r10 %X\n", r->r10);
+ runtime·printf("r11 %X\n", r->r11);
+ runtime·printf("r12 %X\n", r->r12);
+ runtime·printf("r13 %X\n", r->r13);
+ runtime·printf("r14 %X\n", r->r14);
+ runtime·printf("r15 %X\n", r->r15);
+ runtime·printf("rip %X\n", r->rip);
+ runtime·printf("rflags %X\n", r->eflags);
+ runtime·printf("cs %X\n", (uint64)r->cs);
+ runtime·printf("fs %X\n", (uint64)r->fs);
+ runtime·printf("gs %X\n", (uint64)r->gs);
}
/*
* This assembler routine takes the args from registers, puts them on the stack,
* and calls sighandler().
*/
-extern void sigtramp(void);
-extern void sigignore(void); // just returns
-extern void sigreturn(void); // calls sigreturn
+extern void runtime·sigtramp(void);
+extern void runtime·sigignore(void); // just returns
+extern void runtime·sigreturn(void); // calls runtime·sigreturn
String
-signame(int32 sig)
+runtime·signame(int32 sig)
{
if(sig < 0 || sig >= NSIG)
- return emptystring;
- return gostringnocopy((byte*)sigtab[sig].name);
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
}
void
-sighandler(int32 sig, Siginfo* info, void* context)
+runtime·sighandler(int32 sig, Siginfo* info, void* context)
{
Ucontext *uc;
Mcontext *mc;
mc = &uc->uc_mcontext;
r = (Sigcontext*)mc; // same layout, more conveient names
- if((gp = m->curg) != nil && (sigtab[sig].flags & SigPanic)) {
+ if((gp = m->curg) != nil && (runtime·sigtab[sig].flags & SigPanic)) {
// Make it look like a call to the signal func.
// Have to pass arguments out of band since
// augmenting the stack frame would break
gp->sigcode0 = info->si_code;
gp->sigcode1 = ((uintptr*)info)[2];
- // Only push sigpanic if r->rip != 0.
+ // Only push runtime·sigpanic if r->rip != 0.
// If r->rip == 0, probably panicked because of a
// call to a nil func. Not pushing that onto sp will
- // make the trace look like a call to sigpanic instead.
- // (Otherwise the trace will end at sigpanic and we
+ // make the trace look like a call to runtime·sigpanic instead.
+ // (Otherwise the trace will end at runtime·sigpanic and we
// won't get to see who faulted.)
if(r->rip != 0) {
sp = (uintptr*)r->rsp;
*--sp = r->rip;
r->rsp = (uintptr)sp;
}
- r->rip = (uintptr)sigpanic;
+ r->rip = (uintptr)runtime·sigpanic;
return;
}
- if(sigtab[sig].flags & SigQueue) {
- if(sigsend(sig) || (sigtab[sig].flags & SigIgnore))
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
return;
- exit(2); // SIGINT, SIGTERM, etc
+ runtime·exit(2); // SIGINT, SIGTERM, etc
}
- if(panicking) // traceback already printed
- exit(2);
- panicking = 1;
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
if(sig < 0 || sig >= NSIG)
- printf("Signal %d\n", sig);
+ runtime·printf("Signal %d\n", sig);
else
- printf("%s\n", sigtab[sig].name);
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
- printf("PC=%X\n", r->rip);
- printf("\n");
+ runtime·printf("PC=%X\n", r->rip);
+ runtime·printf("\n");
- if(gotraceback()){
- traceback((void*)r->rip, (void*)r->rsp, 0, g);
- tracebackothers(g);
- dumpregs(r);
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->rip, (void*)r->rsp, 0, g);
+ runtime·tracebackothers(g);
+ runtime·dumpregs(r);
}
- breakpoint();
- exit(2);
+ runtime·breakpoint();
+ runtime·exit(2);
}
void
-signalstack(byte *p, int32 n)
+runtime·signalstack(byte *p, int32 n)
{
Sigaltstack st;
st.ss_sp = p;
st.ss_size = n;
st.ss_flags = 0;
- sigaltstack(&st, nil);
+ runtime·sigaltstack(&st, nil);
}
void
-initsig(int32 queue)
+runtime·initsig(int32 queue)
{
static Sigaction sa;
- siginit();
+ runtime·siginit();
int32 i;
sa.sa_flags = SA_ONSTACK | SA_SIGINFO | SA_RESTORER;
sa.sa_mask = 0xFFFFFFFFFFFFFFFFULL;
- sa.sa_restorer = (void*)sigreturn;
+ sa.sa_restorer = (void*)runtime·sigreturn;
for(i = 0; i<NSIG; i++) {
- if(sigtab[i].flags) {
- if((sigtab[i].flags & SigQueue) != queue)
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
continue;
- if(sigtab[i].flags & (SigCatch | SigQueue))
- sa.sa_handler = (void*)sigtramp;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ sa.sa_handler = (void*)runtime·sigtramp;
else
- sa.sa_handler = (void*)sigignore;
- if(sigtab[i].flags & SigRestart)
+ sa.sa_handler = (void*)runtime·sigignore;
+ if(runtime·sigtab[i].flags & SigRestart)
sa.sa_flags |= SA_RESTART;
else
sa.sa_flags &= ~SA_RESTART;
- rt_sigaction(i, &sa, nil, 8);
+ runtime·rt_sigaction(i, &sa, nil, 8);
}
}
}
#include "amd64/asm.h"
-TEXT exit(SB),7,$0-8
+TEXT runtime·exit(SB),7,$0-8
MOVL 8(SP), DI
MOVL $231, AX // exitgroup - force all os threads to exit
SYSCALL
RET
-TEXT exit1(SB),7,$0-8
+TEXT runtime·exit1(SB),7,$0-8
MOVL 8(SP), DI
MOVL $60, AX // exit - exit the current os thread
SYSCALL
RET
-TEXT open(SB),7,$0-16
+TEXT runtime·open(SB),7,$0-16
MOVQ 8(SP), DI
MOVL 16(SP), SI
MOVL 20(SP), DX
SYSCALL
RET
-TEXT write(SB),7,$0-24
+TEXT runtime·write(SB),7,$0-24
MOVL 8(SP), DI
MOVQ 16(SP), SI
MOVL 24(SP), DX
SYSCALL
RET
-TEXT gettime(SB), 7, $32
+TEXT runtime·gettime(SB), 7, $32
LEAQ 8(SP), DI
MOVQ $0, SI
MOVQ $0xffffffffff600000, AX
MOVL BX, (DI)
RET
-TEXT rt_sigaction(SB),7,$0-32
+TEXT runtime·rt_sigaction(SB),7,$0-32
MOVL 8(SP), DI
MOVQ 16(SP), SI
MOVQ 24(SP), DX
SYSCALL
RET
-TEXT sigtramp(SB),7,$64
+TEXT runtime·sigtramp(SB),7,$64
get_tls(BX)
// save g
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ DX, 16(SP)
- CALL sighandler(SB)
+ CALL runtime·sighandler(SB)
// restore g
get_tls(BX)
MOVQ BP, g(BX)
RET
-TEXT sigignore(SB),7,$0
+TEXT runtime·sigignore(SB),7,$0
RET
-TEXT sigreturn(SB),7,$0
+TEXT runtime·sigreturn(SB),7,$0
MOVL $15, AX // rt_sigreturn
SYSCALL
INT $3 // not reached
-TEXT ·mmap(SB),7,$0
+TEXT runtime·mmap(SB),7,$0
MOVQ 8(SP), DI
MOVQ $0, SI
MOVQ 16(SP), SI
INCQ AX
RET
-TEXT ·munmap(SB),7,$0
+TEXT runtime·munmap(SB),7,$0
MOVQ 8(SP), DI
MOVQ 16(SP), SI
MOVQ $11, AX // munmap
SYSCALL
CMPQ AX, $0xfffffffffffff001
JLS 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
-TEXT notok(SB),7,$0
+TEXT runtime·notok(SB),7,$0
MOVQ $0xf1, BP
MOVQ BP, (BP)
RET
// int64 futex(int32 *uaddr, int32 op, int32 val,
// struct timespec *timeout, int32 *uaddr2, int32 val2);
-TEXT futex(SB),7,$0
+TEXT runtime·futex(SB),7,$0
MOVQ 8(SP), DI
MOVL 16(SP), SI
MOVL 20(SP), DX
RET
// int64 clone(int32 flags, void *stack, M *m, G *g, void (*fn)(void));
-TEXT clone(SB),7,$0
+TEXT runtime·clone(SB),7,$0
MOVL flags+8(SP), DI
MOVQ stack+16(SP), SI
// Set FS to point at m->tls.
LEAQ m_tls(R8), DI
- CALL settls(SB)
+ CALL runtime·settls(SB)
// In child, set up new stack
get_tls(CX)
MOVQ R8, m(CX)
MOVQ R9, g(CX)
- CALL stackcheck(SB)
+ CALL runtime·stackcheck(SB)
// Call fn
CALL R12
SYSCALL
JMP -3(PC) // keep exiting
-TEXT sigaltstack(SB),7,$-8
+TEXT runtime·sigaltstack(SB),7,$-8
MOVQ new+8(SP), DI
MOVQ old+16(SP), SI
MOVQ $131, AX
SYSCALL
CMPQ AX, $0xfffffffffffff001
JLS 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// set tls base to DI
-TEXT settls(SB),7,$32
+TEXT runtime·settls(SB),7,$32
ADDQ $16, DI // ELF wants to use -16(FS), -8(FS)
MOVQ DI, SI
SYSCALL
CMPQ AX, $0xfffffffffffff001
JLS 2(PC)
- CALL notok(SB)
+ CALL runtime·notok(SB)
RET
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-TEXT _rt0_arm_linux(SB),7,$0
+TEXT _rt0_arm_linux(SB),7,$0
B _rt0_arm(SB)
#include "os.h"
void
-dumpregs(Sigcontext *r)
+runtime·dumpregs(Sigcontext *r)
{
- printf("trap %x\n", r->trap_no);
- printf("error %x\n", r->error_code);
- printf("oldmask %x\n", r->oldmask);
- printf("r0 %x\n", r->arm_r0);
- printf("r1 %x\n", r->arm_r1);
- printf("r2 %x\n", r->arm_r2);
- printf("r3 %x\n", r->arm_r3);
- printf("r4 %x\n", r->arm_r4);
- printf("r5 %x\n", r->arm_r5);
- printf("r6 %x\n", r->arm_r6);
- printf("r7 %x\n", r->arm_r7);
- printf("r8 %x\n", r->arm_r8);
- printf("r9 %x\n", r->arm_r9);
- printf("r10 %x\n", r->arm_r10);
- printf("fp %x\n", r->arm_fp);
- printf("ip %x\n", r->arm_ip);
- printf("sp %x\n", r->arm_sp);
- printf("lr %x\n", r->arm_lr);
- printf("pc %x\n", r->arm_pc);
- printf("cpsr %x\n", r->arm_cpsr);
- printf("fault %x\n", r->fault_address);
+ runtime·printf("trap %x\n", r->trap_no);
+ runtime·printf("error %x\n", r->error_code);
+ runtime·printf("oldmask %x\n", r->oldmask);
+ runtime·printf("r0 %x\n", r->arm_r0);
+ runtime·printf("r1 %x\n", r->arm_r1);
+ runtime·printf("r2 %x\n", r->arm_r2);
+ runtime·printf("r3 %x\n", r->arm_r3);
+ runtime·printf("r4 %x\n", r->arm_r4);
+ runtime·printf("r5 %x\n", r->arm_r5);
+ runtime·printf("r6 %x\n", r->arm_r6);
+ runtime·printf("r7 %x\n", r->arm_r7);
+ runtime·printf("r8 %x\n", r->arm_r8);
+ runtime·printf("r9 %x\n", r->arm_r9);
+ runtime·printf("r10 %x\n", r->arm_r10);
+ runtime·printf("fp %x\n", r->arm_fp);
+ runtime·printf("ip %x\n", r->arm_ip);
+ runtime·printf("sp %x\n", r->arm_sp);
+ runtime·printf("lr %x\n", r->arm_lr);
+ runtime·printf("pc %x\n", r->arm_pc);
+ runtime·printf("cpsr %x\n", r->arm_cpsr);
+ runtime·printf("fault %x\n", r->fault_address);
}
/*
* This assembler routine takes the args from registers, puts them on the stack,
* and calls sighandler().
*/
-extern void sigtramp(void);
-extern void sigignore(void); // just returns
-extern void sigreturn(void); // calls sigreturn
+extern void runtime·sigtramp(void);
+extern void runtime·sigignore(void); // just returns
+extern void runtime·sigreturn(void); // calls runtime·sigreturn
String
-signame(int32 sig)
+runtime·signame(int32 sig)
{
if(sig < 0 || sig >= NSIG)
- return emptystring;
- return gostringnocopy((byte*)sigtab[sig].name);
+ return runtime·emptystring;
+ return runtime·gostringnocopy((byte*)runtime·sigtab[sig].name);
}
void
-sighandler(int32 sig, Siginfo *info, void *context)
+runtime·sighandler(int32 sig, Siginfo *info, void *context)
{
Ucontext *uc;
Sigcontext *r;
uc = context;
r = &uc->uc_mcontext;
- if((gp = m->curg) != nil && (sigtab[sig].flags & SigPanic)) {
+ if((gp = m->curg) != nil && (runtime·sigtab[sig].flags & SigPanic)) {
// Make it look like a call to the signal func.
// Have to pass arguments out of band since
// augmenting the stack frame would break
// old link register is more useful in the stack trace.
if(r->arm_pc != 0)
r->arm_lr = r->arm_pc;
- r->arm_pc = (uintptr)sigpanic;
+ r->arm_pc = (uintptr)runtime·sigpanic;
return;
}
- if(sigtab[sig].flags & SigQueue) {
- if(sigsend(sig) || (sigtab[sig].flags & SigIgnore))
+ if(runtime·sigtab[sig].flags & SigQueue) {
+ if(runtime·sigsend(sig) || (runtime·sigtab[sig].flags & SigIgnore))
return;
- exit(2); // SIGINT, SIGTERM, etc
+ runtime·exit(2); // SIGINT, SIGTERM, etc
}
- if(panicking) // traceback already printed
- exit(2);
- panicking = 1;
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
if(sig < 0 || sig >= NSIG)
- printf("Signal %d\n", sig);
+ runtime·printf("Signal %d\n", sig);
else
- printf("%s\n", sigtab[sig].name);
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
- printf("PC=%x\n", r->arm_pc);
- printf("\n");
+ runtime·printf("PC=%x\n", r->arm_pc);
+ runtime·printf("\n");
- if(gotraceback()){
- traceback((void*)r->arm_pc, (void*)r->arm_sp, (void*)r->arm_lr, m->curg);
- tracebackothers(m->curg);
- printf("\n");
- dumpregs(r);
+ if(runtime·gotraceback()){
+ runtime·traceback((void*)r->arm_pc, (void*)r->arm_sp, (void*)r->arm_lr, m->curg);
+ runtime·tracebackothers(m->curg);
+ runtime·printf("\n");
+ runtime·dumpregs(r);
}
// breakpoint();
- exit(2);
+ runtime·exit(2);
}
void
-signalstack(byte *p, int32 n)
+runtime·signalstack(byte *p, int32 n)
{
Sigaltstack st;
st.ss_sp = p;
st.ss_size = n;
st.ss_flags = 0;
- sigaltstack(&st, nil);
+ runtime·sigaltstack(&st, nil);
}
void
-initsig(int32 queue)
+runtime·initsig(int32 queue)
{
static Sigaction sa;
- siginit();
+ runtime·siginit();
int32 i;
sa.sa_flags = SA_ONSTACK | SA_SIGINFO | SA_RESTORER;
sa.sa_mask.sig[0] = 0xFFFFFFFF;
sa.sa_mask.sig[1] = 0xFFFFFFFF;
- sa.sa_restorer = (void*)sigreturn;
+ sa.sa_restorer = (void*)runtime·sigreturn;
for(i = 0; i<NSIG; i++) {
- if(sigtab[i].flags) {
- if((sigtab[i].flags & SigQueue) != queue)
+ if(runtime·sigtab[i].flags) {
+ if((runtime·sigtab[i].flags & SigQueue) != queue)
continue;
- if(sigtab[i].flags & (SigCatch | SigQueue))
- sa.sa_handler = (void*)sigtramp;
+ if(runtime·sigtab[i].flags & (SigCatch | SigQueue))
+ sa.sa_handler = (void*)runtime·sigtramp;
else
- sa.sa_handler = (void*)sigignore;
- if(sigtab[i].flags & SigRestart)
+ sa.sa_handler = (void*)runtime·sigignore;
+ if(runtime·sigtab[i].flags & SigRestart)
sa.sa_flags |= SA_RESTART;
else
sa.sa_flags &= ~SA_RESTART;
- rt_sigaction(i, &sa, nil, 8);
+ runtime·rt_sigaction(i, &sa, nil, 8);
}
}
}
#define ARM_BASE (SYS_BASE + 0x0f0000)
#define SYS_ARM_cacheflush (ARM_BASE + 2)
-TEXT write(SB),7,$0
+TEXT runtime·write(SB),7,$0
MOVW 0(FP), R0
MOVW 4(FP), R1
MOVW 8(FP), R2
SWI $0
RET
-TEXT exit(SB),7,$-4
+TEXT runtime·exit(SB),7,$-4
MOVW 0(FP), R0
MOVW $SYS_exit_group, R7
SWI $0
MOVW $1002, R1
MOVW R0, (R1) // fail hard
-TEXT exit1(SB),7,$-4
+TEXT runtime·exit1(SB),7,$-4
MOVW 0(FP), R0
MOVW $SYS_exit, R7
SWI $0
MOVW $1003, R1
MOVW R0, (R1) // fail hard
-TEXT ·mmap(SB),7,$0
+TEXT runtime·mmap(SB),7,$0
MOVW 0(FP), R0
MOVW 4(FP), R1
MOVW 8(FP), R2
SWI $0
RET
-TEXT ·munmap(SB),7,$0
+TEXT runtime·munmap(SB),7,$0
MOVW 0(FP), R0
MOVW 4(FP), R1
MOVW $SYS_munmap, R7
SWI $0
RET
-TEXT gettime(SB),7,$32
+TEXT runtime·gettime(SB),7,$32
/* dummy version - return 0,0 */
MOVW $0, R1
MOVW 0(FP), R0
// int32 futex(int32 *uaddr, int32 op, int32 val,
// struct timespec *timeout, int32 *uaddr2, int32 val2);
-TEXT futex(SB),7,$0
+TEXT runtime·futex(SB),7,$0
MOVW 4(SP), R0
MOVW 8(SP), R1
MOVW 12(SP), R2
// int32 clone(int32 flags, void *stack, M *m, G *g, void (*fn)(void));
-TEXT clone(SB),7,$0
+TEXT runtime·clone(SB),7,$0
MOVW flags+0(FP), R0
MOVW stack+4(FP), R1
MOVW $0, R2 // parent tid ptr
MOVW $1234, R1
CMP R0, R1
BEQ 2(PC)
- BL abort(SB)
+ BL runtime·abort(SB)
MOVW 0(R13), m
MOVW 4(R13), g
MOVW 0(m), R0
MOVW 0(g), R0
- BL emptyfunc(SB) // fault if stack check is wrong
+ BL runtime·emptyfunc(SB) // fault if stack check is wrong
// Initialize m->procid to Linux tid
MOVW $SYS_gettid, R7
MOVW $0, R0
MOVW R0, 4(R13)
- BL exit1(SB)
+ BL runtime·exit1(SB)
// It shouldn't return
MOVW $1234, R0
MOVW R0, (R1)
-TEXT cacheflush(SB),7,$0
+TEXT runtime·cacheflush(SB),7,$0
MOVW 0(FP), R0
MOVW 4(FP), R1
MOVW $0, R2
SWI $0
RET
-TEXT sigaltstack(SB),7,$0
+TEXT runtime·sigaltstack(SB),7,$0
MOVW 0(FP), R0
MOVW 4(FP), R1
MOVW $SYS_sigaltstack, R7
SWI $0
RET
-TEXT sigignore(SB),7,$0
+TEXT runtime·sigignore(SB),7,$0
RET
-TEXT sigtramp(SB),7,$24
+TEXT runtime·sigtramp(SB),7,$24
MOVW m_gsignal(m), g
MOVW R0, 4(R13)
MOVW R1, 8(R13)
MOVW R2, 12(R13)
- BL sighandler(SB)
+ BL runtime·sighandler(SB)
RET
-TEXT rt_sigaction(SB),7,$0
+TEXT runtime·rt_sigaction(SB),7,$0
MOVW 0(FP), R0
MOVW 4(FP), R1
MOVW 8(FP), R2
SWI $0
RET
-TEXT sigreturn(SB),7,$0
+TEXT runtime·sigreturn(SB),7,$0
MOVW $SYS_rt_sigreturn, R7
SWI $0
RET
#include "malloc.h"
void*
-SysAlloc(uintptr n)
+runtime·SysAlloc(uintptr n)
{
void *p;
mstats.sys += n;
- p = runtime_mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ p = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p < (void*)4096) {
if(p == (void*)EACCES) {
- printf("mmap: access denied\n");
- printf("If you're running SELinux, enable execmem for this process.\n");
- exit(2);
+ runtime·printf("mmap: access denied\n");
+ runtime·printf("If you're running SELinux, enable execmem for this process.\n");
+ runtime·exit(2);
}
- printf("mmap: errno=%p\n", p);
- throw("mmap");
+ runtime·printf("mmap: errno=%p\n", p);
+ runtime·throw("mmap");
}
return p;
}
void
-SysUnused(void *v, uintptr n)
+runtime·SysUnused(void *v, uintptr n)
{
USED(v);
USED(n);
}
void
-SysFree(void *v, uintptr n)
+runtime·SysFree(void *v, uintptr n)
{
mstats.sys -= n;
- runtime_munmap(v, n);
+ runtime·munmap(v, n);
}
void
-SysMemInit(void)
+runtime·SysMemInit(void)
{
// Code generators assume that references to addresses
// on the first page will fault. Map the page explicitly with
// allocating that page as the virtual address space fills.
// Ignore any error, since other systems might be smart
// enough to never allow anything there.
- runtime_mmap(nil, 4096, PROT_NONE, MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0);
+ runtime·mmap(nil, 4096, PROT_NONE, MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0);
}
// license that can be found in the LICENSE file.
// Linux-specific system calls
-int32 futex(uint32*, int32, uint32, Timespec*, uint32*, uint32);
-int32 clone(int32, void*, M*, G*, void(*)(void));
+int32 runtime·futex(uint32*, int32, uint32, Timespec*, uint32*, uint32);
+int32 runtime·clone(int32, void*, M*, G*, void(*)(void));
struct Sigaction;
-void rt_sigaction(uintptr, struct Sigaction*, void*, uintptr);
+void runtime·rt_sigaction(uintptr, struct Sigaction*, void*, uintptr);
-void sigaltstack(Sigaltstack*, Sigaltstack*);
-void sigpanic(void);
+void runtime·sigaltstack(Sigaltstack*, Sigaltstack*);
+void runtime·sigpanic(void);
#define Q SigQueue
#define P SigPanic
-SigTab sigtab[] = {
+SigTab runtime·sigtab[] = {
/* 0 */ 0, "SIGNONE: no trap",
/* 1 */ Q+R, "SIGHUP: terminal line hangup",
/* 2 */ Q+R, "SIGINT: interrupt",
#include "defs.h"
#include "os.h"
-extern SigTab sigtab[];
+extern SigTab runtime·sigtab[];
// Linux futex.
//
// as an errno. Libpthread ignores the return value
// here, and so can we: as it says a few lines up,
// spurious wakeups are allowed.
- futex(addr, FUTEX_WAIT, val, &longtime, nil, 0);
+ runtime·futex(addr, FUTEX_WAIT, val, &longtime, nil, 0);
}
// If any procs are sleeping on addr, wake up at least one.
{
int64 ret;
- ret = futex(addr, FUTEX_WAKE, 1, nil, nil, 0);
+ ret = runtime·futex(addr, FUTEX_WAKE, 1, nil, nil, 0);
if(ret >= 0)
return;
// EAGAIN or EINTR, but if it does, it would be
// safe to loop and call futex again.
- prints("futexwakeup addr=");
- ·printpointer(addr);
- prints(" returned ");
- ·printint(ret);
- prints("\n");
+ runtime·prints("futexwakeup addr=");
+ runtime·printpointer(addr);
+ runtime·prints(" returned ");
+ runtime·printint(ret);
+ runtime·prints("\n");
*(int32*)0x1006 = 0x1006;
}
// The uncontended case runs entirely in user space.
// When contention is detected, we defer to the kernel (futex).
//
-// A reminder: compare-and-swap cas(addr, old, new) does
+// A reminder: compare-and-swap runtime·cas(addr, old, new) does
// if(*addr == old) { *addr = new; return 1; }
// else return 0;
// but atomically.
again:
v = l->key;
if((v&1) == 0){
- if(cas(&l->key, v, v|1)){
+ if(runtime·cas(&l->key, v, v|1)){
// Lock wasn't held; we grabbed it.
return;
}
}
// Lock was held; try to add ourselves to the waiter count.
- if(!cas(&l->key, v, v+2))
+ if(!runtime·cas(&l->key, v, v+2))
goto again;
// We're accounted for, now sleep in the kernel.
for(;;){
v = l->key;
if(v < 2)
- throw("bad lock key");
- if(cas(&l->key, v, v-2))
+ runtime·throw("bad lock key");
+ if(runtime·cas(&l->key, v, v-2))
break;
}
again:
v = l->key;
if((v&1) == 0)
- throw("unlock of unlocked lock");
- if(!cas(&l->key, v, v&~1))
+ runtime·throw("unlock of unlocked lock");
+ if(!runtime·cas(&l->key, v, v&~1))
goto again;
// If there were waiters, wake one.
}
void
-lock(Lock *l)
+runtime·lock(Lock *l)
{
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
m->locks++;
futexlock(l);
}
void
-unlock(Lock *l)
+runtime·unlock(Lock *l)
{
m->locks--;
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
futexunlock(l);
}
void
-destroylock(Lock*)
+runtime·destroylock(Lock*)
{
}
// you unlock the lock.
void
-noteclear(Note *n)
+runtime·noteclear(Note *n)
{
n->lock.key = 0; // memset(n, 0, sizeof *n)
futexlock(&n->lock);
}
void
-notewakeup(Note *n)
+runtime·notewakeup(Note *n)
{
futexunlock(&n->lock);
}
void
-notesleep(Note *n)
+runtime·notesleep(Note *n)
{
futexlock(&n->lock);
futexunlock(&n->lock); // Let other sleepers find out too.
};
void
-newosproc(M *m, G *g, void *stk, void (*fn)(void))
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
{
int32 ret;
int32 flags;
m->tls[0] = m->id; // so 386 asm can find it
if(0){
- printf("newosproc stk=%p m=%p g=%p fn=%p clone=%p id=%d/%d ostk=%p\n",
- stk, m, g, fn, clone, m->id, m->tls[0], &m);
+ runtime·printf("newosproc stk=%p m=%p g=%p fn=%p clone=%p id=%d/%d ostk=%p\n",
+ stk, m, g, fn, runtime·clone, m->id, m->tls[0], &m);
}
- ret = clone(flags, stk, m, g, fn);
+ ret = runtime·clone(flags, stk, m, g, fn);
if(ret < 0)
*(int32*)123 = 123;
}
void
-osinit(void)
+runtime·osinit(void)
{
}
// Called to initialize a new m (including the bootstrap m).
void
-minit(void)
+runtime·minit(void)
{
// Initialize signal handling.
- m->gsignal = malg(32*1024); // OS X wants >=8K, Linux >=2K
- signalstack(m->gsignal->stackguard, 32*1024);
+ m->gsignal = runtime·malg(32*1024); // OS X wants >=8K, Linux >=2K
+ runtime·signalstack(m->gsignal->stackguard, 32*1024);
}
void
-sigpanic(void)
+runtime·sigpanic(void)
{
switch(g->sig) {
case SIGBUS:
if(g->sigcode0 == BUS_ADRERR && g->sigcode1 < 0x1000)
- panicstring("invalid memory address or nil pointer dereference");
- printf("unexpected fault address %p\n", g->sigcode1);
- throw("fault");
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
case SIGSEGV:
if((g->sigcode0 == 0 || g->sigcode0 == SEGV_MAPERR || g->sigcode0 == SEGV_ACCERR) && g->sigcode1 < 0x1000)
- panicstring("invalid memory address or nil pointer dereference");
- printf("unexpected fault address %p\n", g->sigcode1);
- throw("fault");
+ runtime·panicstring("invalid memory address or nil pointer dereference");
+ runtime·printf("unexpected fault address %p\n", g->sigcode1);
+ runtime·throw("fault");
case SIGFPE:
switch(g->sigcode0) {
case FPE_INTDIV:
- panicstring("integer divide by zero");
+ runtime·panicstring("integer divide by zero");
case FPE_INTOVF:
- panicstring("integer overflow");
+ runtime·panicstring("integer overflow");
}
- panicstring("floating point error");
+ runtime·panicstring("floating point error");
}
- panicstring(sigtab[g->sig].name);
+ runtime·panicstring(runtime·sigtab[g->sig].name);
}
#include "defs.h"
#include "type.h"
-MHeap mheap;
-MStats mstats;
+MHeap runtime·mheap;
+extern MStats mstats; // defined in extern.go
-extern volatile int32 ·MemProfileRate;
+extern volatile int32 runtime·MemProfileRate;
// Same algorithm from chan.c, but a different
// instance of the static uint32 x.
// Small objects are allocated from the per-thread cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
void*
-mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
+runtime·mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
{
int32 sizeclass, rate;
MCache *c;
void *v;
uint32 *ref;
- if(gcwaiting && g != m->g0 && m->locks == 0)
- gosched();
+ if(runtime·gcwaiting && g != m->g0 && m->locks == 0)
+ runtime·gosched();
if(m->mallocing)
- throw("malloc/free - deadlock");
+ runtime·throw("malloc/free - deadlock");
m->mallocing = 1;
if(size == 0)
size = 1;
mstats.nmalloc++;
if(size <= MaxSmallSize) {
// Allocate from mcache free lists.
- sizeclass = SizeToClass(size);
- size = class_to_size[sizeclass];
+ sizeclass = runtime·SizeToClass(size);
+ size = runtime·class_to_size[sizeclass];
c = m->mcache;
- v = MCache_Alloc(c, sizeclass, size, zeroed);
+ v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
if(v == nil)
- throw("out of memory");
+ runtime·throw("out of memory");
mstats.alloc += size;
mstats.total_alloc += size;
mstats.by_size[sizeclass].nmalloc++;
- if(!mlookup(v, nil, nil, nil, &ref)) {
- printf("malloc %D; mlookup failed\n", (uint64)size);
- throw("malloc mlookup");
+ if(!runtime·mlookup(v, nil, nil, nil, &ref)) {
+ runtime·printf("malloc %D; runtime·mlookup failed\n", (uint64)size);
+ runtime·throw("malloc runtime·mlookup");
}
*ref = RefNone | refflag;
} else {
npages = size >> PageShift;
if((size & PageMask) != 0)
npages++;
- s = MHeap_Alloc(&mheap, npages, 0, 1);
+ s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1);
if(s == nil)
- throw("out of memory");
+ runtime·throw("out of memory");
size = npages<<PageShift;
mstats.alloc += size;
mstats.total_alloc += size;
m->mallocing = 0;
- if(!(refflag & RefNoProfiling) && (rate = ·MemProfileRate) > 0) {
+ if(!(refflag & RefNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
if(size >= rate)
goto profile;
if(m->mcache->next_sample > size)
m->mcache->next_sample = fastrand1() % (2*rate);
profile:
*ref |= RefProfiled;
- MProf_Malloc(v, size);
+ runtime·MProf_Malloc(v, size);
}
}
if(dogc && mstats.heap_alloc >= mstats.next_gc)
- gc(0);
+ runtime·gc(0);
return v;
}
void*
-malloc(uintptr size)
+runtime·malloc(uintptr size)
{
- return mallocgc(size, 0, 0, 1);
+ return runtime·mallocgc(size, 0, 0, 1);
}
// Free the object whose base pointer is v.
void
-free(void *v)
+runtime·free(void *v)
{
int32 sizeclass, size;
MSpan *s;
return;
if(m->mallocing)
- throw("malloc/free - deadlock");
+ runtime·throw("malloc/free - deadlock");
m->mallocing = 1;
- if(!mlookup(v, nil, nil, &s, &ref)) {
- printf("free %p: not an allocated block\n", v);
- throw("free mlookup");
+ if(!runtime·mlookup(v, nil, nil, &s, &ref)) {
+ runtime·printf("free %p: not an allocated block\n", v);
+ runtime·throw("free runtime·mlookup");
}
prof = *ref & RefProfiled;
*ref = RefFree;
if(sizeclass == 0) {
// Large object.
if(prof)
- MProf_Free(v, s->npages<<PageShift);
+ runtime·MProf_Free(v, s->npages<<PageShift);
mstats.alloc -= s->npages<<PageShift;
- runtime_memclr(v, s->npages<<PageShift);
- MHeap_Free(&mheap, s, 1);
+ runtime·memclr(v, s->npages<<PageShift);
+ runtime·MHeap_Free(&runtime·mheap, s, 1);
} else {
// Small object.
c = m->mcache;
- size = class_to_size[sizeclass];
+ size = runtime·class_to_size[sizeclass];
if(size > sizeof(uintptr))
((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
if(prof)
- MProf_Free(v, size);
+ runtime·MProf_Free(v, size);
mstats.alloc -= size;
mstats.by_size[sizeclass].nfree++;
- MCache_Free(c, v, sizeclass, size);
+ runtime·MCache_Free(c, v, sizeclass, size);
}
m->mallocing = 0;
}
int32
-mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
+runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
{
uintptr n, nobj, i;
byte *p;
MSpan *s;
mstats.nlookup++;
- s = MHeap_LookupMaybe(&mheap, (uintptr)v>>PageShift);
+ s = runtime·MHeap_LookupMaybe(&runtime·mheap, (uintptr)v>>PageShift);
if(sp)
*sp = s;
if(s == nil) {
return 0;
}
- n = class_to_size[s->sizeclass];
+ n = runtime·class_to_size[s->sizeclass];
i = ((byte*)v - p)/n;
if(base)
*base = p + i*n;
if(0) {
nobj = (s->npages << PageShift) / (n + RefcountOverhead);
if((byte*)s->gcref < p || (byte*)(s->gcref+nobj) > p+(s->npages<<PageShift)) {
- printf("odd span state=%d span=%p base=%p sizeclass=%d n=%D size=%D npages=%D\n",
+ runtime·printf("odd span state=%d span=%p base=%p sizeclass=%d n=%D size=%D npages=%D\n",
s->state, s, p, s->sizeclass, (uint64)nobj, (uint64)n, (uint64)s->npages);
- printf("s->base sizeclass %d v=%p base=%p gcref=%p blocksize=%D nobj=%D size=%D end=%p end=%p\n",
+ runtime·printf("s->base sizeclass %d v=%p base=%p gcref=%p blocksize=%D nobj=%D size=%D end=%p end=%p\n",
s->sizeclass, v, p, s->gcref, (uint64)s->npages<<PageShift,
(uint64)nobj, (uint64)n, s->gcref + nobj, p+(s->npages<<PageShift));
- throw("bad gcref");
+ runtime·throw("bad gcref");
}
}
if(ref)
}
MCache*
-allocmcache(void)
+runtime·allocmcache(void)
{
MCache *c;
- lock(&mheap);
- c = FixAlloc_Alloc(&mheap.cachealloc);
- mstats.mcache_inuse = mheap.cachealloc.inuse;
- mstats.mcache_sys = mheap.cachealloc.sys;
- unlock(&mheap);
+ runtime·lock(&runtime·mheap);
+ c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
+ mstats.mcache_inuse = runtime·mheap.cachealloc.inuse;
+ mstats.mcache_sys = runtime·mheap.cachealloc.sys;
+ runtime·unlock(&runtime·mheap);
return c;
}
void
-mallocinit(void)
+runtime·mallocinit(void)
{
- SysMemInit();
- InitSizes();
- MHeap_Init(&mheap, SysAlloc);
- m->mcache = allocmcache();
+ runtime·SysMemInit();
+ runtime·InitSizes();
+ runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc);
+ m->mcache = runtime·allocmcache();
// See if it works.
- free(malloc(1));
+ runtime·free(runtime·malloc(1));
}
// Runtime stubs.
void*
-mal(uintptr n)
+runtime·mal(uintptr n)
{
- return mallocgc(n, 0, 1, 1);
+ return runtime·mallocgc(n, 0, 1, 1);
}
-func mal(n uint32) (ret *uint8) {
- ret = mal(n);
+func new(n uint32) (ret *uint8) {
+ ret = runtime·mal(n);
}
// Stack allocator uses malloc/free most of the time,
// allocator, assuming that inside malloc all the stack
// frames are small, so that all the stack allocations
// will be a single size, the minimum (right now, 5k).
-struct {
+static struct {
Lock;
FixAlloc;
} stacks;
void*
-stackalloc(uint32 n)
+runtime·stackalloc(uint32 n)
{
void *v;
uint32 *ref;
if(m->mallocing || m->gcing) {
- lock(&stacks);
+ runtime·lock(&stacks);
if(stacks.size == 0)
- FixAlloc_Init(&stacks, n, SysAlloc, nil, nil);
+ runtime·FixAlloc_Init(&stacks, n, runtime·SysAlloc, nil, nil);
if(stacks.size != n) {
- printf("stackalloc: in malloc, size=%D want %d", (uint64)stacks.size, n);
- throw("stackalloc");
+ runtime·printf("stackalloc: in malloc, size=%D want %d", (uint64)stacks.size, n);
+ runtime·throw("stackalloc");
}
- v = FixAlloc_Alloc(&stacks);
+ v = runtime·FixAlloc_Alloc(&stacks);
mstats.stacks_inuse = stacks.inuse;
mstats.stacks_sys = stacks.sys;
- unlock(&stacks);
+ runtime·unlock(&stacks);
return v;
}
- v = mallocgc(n, RefNoProfiling, 0, 0);
- if(!mlookup(v, nil, nil, nil, &ref))
- throw("stackalloc mlookup");
+ v = runtime·mallocgc(n, RefNoProfiling, 0, 0);
+ if(!runtime·mlookup(v, nil, nil, nil, &ref))
+ runtime·throw("stackalloc runtime·mlookup");
*ref = RefStack;
return v;
}
void
-stackfree(void *v)
+runtime·stackfree(void *v)
{
if(m->mallocing || m->gcing) {
- lock(&stacks);
- FixAlloc_Free(&stacks, v);
+ runtime·lock(&stacks);
+ runtime·FixAlloc_Free(&stacks, v);
mstats.stacks_inuse = stacks.inuse;
mstats.stacks_sys = stacks.sys;
- unlock(&stacks);
+ runtime·unlock(&stacks);
return;
}
- free(v);
+ runtime·free(v);
}
func Alloc(n uintptr) (p *byte) {
- p = malloc(n);
+ p = runtime·malloc(n);
}
func Free(p *byte) {
- free(p);
+ runtime·free(p);
}
func Lookup(p *byte) (base *byte, size uintptr) {
- mlookup(p, &base, &size, nil, nil);
+ runtime·mlookup(p, &base, &size, nil, nil);
}
func GC() {
- gc(1);
+ runtime·gc(1);
}
func SetFinalizer(obj Eface, finalizer Eface) {
Type *t;
if(obj.type == nil) {
- printf("runtime.SetFinalizer: first argument is nil interface\n");
+ runtime·printf("runtime.SetFinalizer: first argument is nil interface\n");
throw:
- throw("runtime.SetFinalizer");
+ runtime·throw("runtime.SetFinalizer");
}
if(obj.type->kind != KindPtr) {
- printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
+ runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
goto throw;
}
- if(!mlookup(obj.data, &base, &size, nil, nil) || obj.data != base) {
- printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
+ if(!runtime·mlookup(obj.data, &base, &size, nil, nil) || obj.data != base) {
+ runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
goto throw;
}
nret = 0;
if(finalizer.type != nil) {
if(finalizer.type->kind != KindFunc) {
badfunc:
- printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
+ runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
goto throw;
}
ft = (FuncType*)finalizer.type;
}
nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1);
- if(getfinalizer(obj.data, 0)) {
- printf("runtime.SetFinalizer: finalizer already set");
+ if(runtime·getfinalizer(obj.data, 0)) {
+ runtime·printf("runtime.SetFinalizer: finalizer already set");
goto throw;
}
}
- addfinalizer(obj.data, finalizer.data, nret);
+ runtime·addfinalizer(obj.data, finalizer.data, nret);
}
// an out-of-memory error has been detected midway through
// an allocation. It is okay if SysFree is a no-op.
-void* SysAlloc(uintptr nbytes);
-void SysFree(void *v, uintptr nbytes);
-void SysUnused(void *v, uintptr nbytes);
-void SysMemInit(void);
+void* runtime·SysAlloc(uintptr nbytes);
+void runtime·SysFree(void *v, uintptr nbytes);
+void runtime·SysUnused(void *v, uintptr nbytes);
+void runtime·SysMemInit(void);
// FixAlloc is a simple free-list allocator for fixed size objects.
// Malloc uses a FixAlloc wrapped around SysAlloc to manages its
uintptr sys; // bytes obtained from system
};
-void FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg);
-void* FixAlloc_Alloc(FixAlloc *f);
-void FixAlloc_Free(FixAlloc *f, void *p);
+void runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg);
+void* runtime·FixAlloc_Alloc(FixAlloc *f);
+void runtime·FixAlloc_Free(FixAlloc *f, void *p);
// Statistics.
} by_size[NumSizeClasses];
};
-#define mstats ·MemStats /* name shared with Go */
+#define mstats runtime·MemStats /* name shared with Go */
extern MStats mstats;
// taking a bunch of objects out of the central lists
// and putting them in the thread free list.
-int32 SizeToClass(int32);
-extern int32 class_to_size[NumSizeClasses];
-extern int32 class_to_allocnpages[NumSizeClasses];
-extern int32 class_to_transfercount[NumSizeClasses];
-extern void InitSizes(void);
+int32 runtime·SizeToClass(int32);
+extern int32 runtime·class_to_size[NumSizeClasses];
+extern int32 runtime·class_to_allocnpages[NumSizeClasses];
+extern int32 runtime·class_to_transfercount[NumSizeClasses];
+extern void runtime·InitSizes(void);
// Per-thread (in Go, per-M) cache for small objects.
int32 next_sample; // trigger heap sample after allocating this many bytes
};
-void* MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
-void MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
-void MCache_ReleaseAll(MCache *c);
+void* runtime·MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
+void runtime·MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
+void runtime·MCache_ReleaseAll(MCache *c);
// An MSpan is a run of pages.
enum
};
};
-void MSpan_Init(MSpan *span, PageID start, uintptr npages);
+void runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages);
// Every MSpan is in one doubly-linked list,
// either one of the MHeap's free lists or one of the
// MCentral's span lists. We use empty MSpan structures as list heads.
-void MSpanList_Init(MSpan *list);
-bool MSpanList_IsEmpty(MSpan *list);
-void MSpanList_Insert(MSpan *list, MSpan *span);
-void MSpanList_Remove(MSpan *span); // from whatever list it is in
+void runtime·MSpanList_Init(MSpan *list);
+bool runtime·MSpanList_IsEmpty(MSpan *list);
+void runtime·MSpanList_Insert(MSpan *list, MSpan *span);
+void runtime·MSpanList_Remove(MSpan *span); // from whatever list it is in
// Central list of free objects of a given size.
int32 nfree;
};
-void MCentral_Init(MCentral *c, int32 sizeclass);
-int32 MCentral_AllocList(MCentral *c, int32 n, MLink **first);
-void MCentral_FreeList(MCentral *c, int32 n, MLink *first);
+void runtime·MCentral_Init(MCentral *c, int32 sizeclass);
+int32 runtime·MCentral_AllocList(MCentral *c, int32 n, MLink **first);
+void runtime·MCentral_FreeList(MCentral *c, int32 n, MLink *first);
// Main malloc heap.
// The heap itself is the "free[]" and "large" arrays,
FixAlloc spanalloc; // allocator for Span*
FixAlloc cachealloc; // allocator for MCache*
};
-extern MHeap mheap;
+extern MHeap runtime·mheap;
-void MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
-MSpan* MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct);
-void MHeap_Free(MHeap *h, MSpan *s, int32 acct);
-MSpan* MHeap_Lookup(MHeap *h, PageID p);
-MSpan* MHeap_LookupMaybe(MHeap *h, PageID p);
-void MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
+void runtime·MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
+MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct);
+void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct);
+MSpan* runtime·MHeap_Lookup(MHeap *h, PageID p);
+MSpan* runtime·MHeap_LookupMaybe(MHeap *h, PageID p);
+void runtime·MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
-void* mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
-int32 mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
-void gc(int32 force);
+void* runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
+int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
+void runtime·gc(int32 force);
-void* SysAlloc(uintptr);
-void SysUnused(void*, uintptr);
-void SysFree(void*, uintptr);
+void* runtime·SysAlloc(uintptr);
+void runtime·SysUnused(void*, uintptr);
+void runtime·SysFree(void*, uintptr);
enum
{
RefFlags = 0xFFFF0000U,
};
-void MProf_Malloc(void*, uintptr);
-void MProf_Free(void*, uintptr);
+void runtime·MProf_Malloc(void*, uintptr);
+void runtime·MProf_Free(void*, uintptr);
// Malloc profiling settings.
// Must match definition in extern.go.
MProf_Sample = 1,
MProf_All = 2,
};
-extern int32 malloc_profile;
+extern int32 runtime·malloc_profile;
typedef struct Finalizer Finalizer;
struct Finalizer
int32 nret;
};
-Finalizer* getfinalizer(void*, bool);
+Finalizer* runtime·getfinalizer(void*, bool);
#include "malloc.h"
void*
-MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed)
+runtime·MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed)
{
MCacheList *l;
MLink *first, *v;
l = &c->list[sizeclass];
if(l->list == nil) {
// Replenish using central lists.
- n = MCentral_AllocList(&mheap.central[sizeclass],
- class_to_transfercount[sizeclass], &first);
+ n = runtime·MCentral_AllocList(&runtime·mheap.central[sizeclass],
+ runtime·class_to_transfercount[sizeclass], &first);
l->list = first;
l->nlist = n;
c->size += n*size;
if(zeroed) {
// block is zeroed iff second word is zero ...
if(size > sizeof(uintptr) && ((uintptr*)v)[1] != 0)
- runtime_memclr((byte*)v, size);
+ runtime·memclr((byte*)v, size);
else {
// ... except for the link pointer
// that we used above; zero that.
l->nlist -= n;
if(l->nlist < l->nlistmin)
l->nlistmin = l->nlist;
- c->size -= n*class_to_size[sizeclass];
+ c->size -= n*runtime·class_to_size[sizeclass];
// Return them to central free list.
- MCentral_FreeList(&mheap.central[sizeclass], n, first);
+ runtime·MCentral_FreeList(&runtime·mheap.central[sizeclass], n, first);
}
void
-MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size)
+runtime·MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size)
{
int32 i, n;
MCacheList *l;
if(l->nlist >= MaxMCacheListLen) {
// Release a chunk back.
- ReleaseN(c, l, class_to_transfercount[sizeclass], sizeclass);
+ ReleaseN(c, l, runtime·class_to_transfercount[sizeclass], sizeclass);
}
if(c->size >= MaxMCacheSize) {
}
void
-MCache_ReleaseAll(MCache *c)
+runtime·MCache_ReleaseAll(MCache *c)
{
int32 i;
MCacheList *l;
// Initialize a single central free list.
void
-MCentral_Init(MCentral *c, int32 sizeclass)
+runtime·MCentral_Init(MCentral *c, int32 sizeclass)
{
c->sizeclass = sizeclass;
- MSpanList_Init(&c->nonempty);
- MSpanList_Init(&c->empty);
+ runtime·MSpanList_Init(&c->nonempty);
+ runtime·MSpanList_Init(&c->empty);
}
// Allocate up to n objects from the central free list.
// The objects are linked together by their first words.
// On return, *pstart points at the first object and *pend at the last.
int32
-MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
+runtime·MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
{
MLink *first, *last, *v;
int32 i;
- lock(c);
+ runtime·lock(c);
// Replenish central list if empty.
- if(MSpanList_IsEmpty(&c->nonempty)) {
+ if(runtime·MSpanList_IsEmpty(&c->nonempty)) {
if(!MCentral_Grow(c)) {
- unlock(c);
+ runtime·unlock(c);
*pfirst = nil;
return 0;
}
last->next = nil;
c->nfree -= i;
- unlock(c);
+ runtime·unlock(c);
*pfirst = first;
return i;
}
MSpan *s;
MLink *v;
- if(MSpanList_IsEmpty(&c->nonempty))
+ if(runtime·MSpanList_IsEmpty(&c->nonempty))
return nil;
s = c->nonempty.next;
s->ref++;
v = s->freelist;
s->freelist = v->next;
if(s->freelist == nil) {
- MSpanList_Remove(s);
- MSpanList_Insert(&c->empty, s);
+ runtime·MSpanList_Remove(s);
+ runtime·MSpanList_Insert(&c->empty, s);
}
return v;
}
// The objects are linked together by their first words.
// On return, *pstart points at the first object and *pend at the last.
void
-MCentral_FreeList(MCentral *c, int32 n, MLink *start)
+runtime·MCentral_FreeList(MCentral *c, int32 n, MLink *start)
{
MLink *v, *next;
// the transfer cache optimization in the TODO above.
USED(n);
- lock(c);
+ runtime·lock(c);
for(v=start; v; v=next) {
next = v->next;
MCentral_Free(c, v);
}
- unlock(c);
+ runtime·unlock(c);
}
// Helper: free one object back into the central free list.
// Find span for v.
page = (uintptr)v >> PageShift;
- s = MHeap_Lookup(&mheap, page);
+ s = runtime·MHeap_Lookup(&runtime·mheap, page);
if(s == nil || s->ref == 0)
- throw("invalid free");
+ runtime·throw("invalid free");
// Move to nonempty if necessary.
if(s->freelist == nil) {
- MSpanList_Remove(s);
- MSpanList_Insert(&c->nonempty, s);
+ runtime·MSpanList_Remove(s);
+ runtime·MSpanList_Insert(&c->nonempty, s);
}
// Add v back to s's free list.
// If s is completely freed, return it to the heap.
if(--s->ref == 0) {
- size = class_to_size[c->sizeclass];
- MSpanList_Remove(s);
+ size = runtime·class_to_size[c->sizeclass];
+ runtime·MSpanList_Remove(s);
// The second word of each freed block indicates
// whether it needs to be zeroed. The first word
// is the link pointer and must always be cleared.
for(p=s->freelist; p; p=next) {
next = p->next;
if(size > sizeof(uintptr) && ((uintptr*)p)[1] != 0)
- runtime_memclr((byte*)p, size);
+ runtime·memclr((byte*)p, size);
else
p->next = nil;
}
s->freelist = nil;
c->nfree -= (s->npages << PageShift) / size;
- unlock(c);
- MHeap_Free(&mheap, s, 0);
- lock(c);
+ runtime·unlock(c);
+ runtime·MHeap_Free(&runtime·mheap, s, 0);
+ runtime·lock(c);
}
}
void
-MGetSizeClassInfo(int32 sizeclass, int32 *sizep, int32 *npagesp, int32 *nobj)
+runtime·MGetSizeClassInfo(int32 sizeclass, int32 *sizep, int32 *npagesp, int32 *nobj)
{
int32 size;
int32 npages;
- npages = class_to_allocnpages[sizeclass];
- size = class_to_size[sizeclass];
+ npages = runtime·class_to_allocnpages[sizeclass];
+ size = runtime·class_to_size[sizeclass];
*npagesp = npages;
*sizep = size;
*nobj = (npages << PageShift) / (size + RefcountOverhead);
byte *p;
MSpan *s;
- unlock(c);
- MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
- s = MHeap_Alloc(&mheap, npages, c->sizeclass, 0);
+ runtime·unlock(c);
+ runtime·MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
+ s = runtime·MHeap_Alloc(&runtime·mheap, npages, c->sizeclass, 0);
if(s == nil) {
// TODO(rsc): Log out of memory
- lock(c);
+ runtime·lock(c);
return false;
}
}
*tailp = nil;
- lock(c);
+ runtime·lock(c);
c->nfree += n;
- MSpanList_Insert(&c->nonempty, s);
+ runtime·MSpanList_Insert(&c->nonempty, s);
return true;
}
#include "runtime.h"
#include "malloc.h"
-Lock finlock;
+static Lock finlock;
// Finalizer hash table. Direct hash, linear scan, at most 3/4 full.
// Table size is power of 3 so that hash can be key % max.
}
// cannot happen - table is known to be non-full
- throw("finalizer table inconsistent");
+ runtime·throw("finalizer table inconsistent");
ret:
t->key[i] = k;
}
// cannot happen - table is known to be non-full
- throw("finalizer table inconsistent");
+ runtime·throw("finalizer table inconsistent");
return nil;
}
// add finalizer; caller is responsible for making sure not already in table
void
-addfinalizer(void *p, void (*f)(void*), int32 nret)
+runtime·addfinalizer(void *p, void (*f)(void*), int32 nret)
{
Fintab newtab;
int32 i;
e = nil;
if(f != nil) {
- e = mal(sizeof *e);
+ e = runtime·mal(sizeof *e);
e->fn = f;
e->nret = nret;
}
- lock(&finlock);
- if(!mlookup(p, &base, nil, nil, &ref) || p != base) {
- unlock(&finlock);
- throw("addfinalizer on invalid pointer");
+ runtime·lock(&finlock);
+ if(!runtime·mlookup(p, &base, nil, nil, &ref) || p != base) {
+ runtime·unlock(&finlock);
+ runtime·throw("addfinalizer on invalid pointer");
}
if(f == nil) {
if(*ref & RefHasFinalizer) {
lookfintab(&fintab, p, 1);
*ref &= ~RefHasFinalizer;
}
- unlock(&finlock);
+ runtime·unlock(&finlock);
return;
}
if(*ref & RefHasFinalizer) {
- unlock(&finlock);
- throw("double finalizer");
+ runtime·unlock(&finlock);
+ runtime·throw("double finalizer");
}
*ref |= RefHasFinalizer;
// keep table at most 3/4 full:
// allocate new table and rehash.
- runtime_memclr((byte*)&newtab, sizeof newtab);
+ runtime·memclr((byte*)&newtab, sizeof newtab);
newtab.max = fintab.max;
if(newtab.max == 0)
newtab.max = 3*3*3;
newtab.max *= 3;
}
- newtab.key = mallocgc(newtab.max*sizeof newtab.key[0], RefNoPointers, 0, 1);
- newtab.val = mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1);
+ newtab.key = runtime·mallocgc(newtab.max*sizeof newtab.key[0], RefNoPointers, 0, 1);
+ newtab.val = runtime·mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1);
for(i=0; i<fintab.max; i++) {
void *k;
if(k != nil && k != (void*)-1)
addfintab(&newtab, k, fintab.val[i]);
}
- free(fintab.key);
- free(fintab.val);
+ runtime·free(fintab.key);
+ runtime·free(fintab.val);
fintab = newtab;
}
addfintab(&fintab, p, e);
- unlock(&finlock);
+ runtime·unlock(&finlock);
}
// get finalizer; if del, delete finalizer.
// caller is responsible for updating RefHasFinalizer bit.
Finalizer*
-getfinalizer(void *p, bool del)
+runtime·getfinalizer(void *p, bool del)
{
Finalizer *f;
- lock(&finlock);
+ runtime·lock(&finlock);
f = lookfintab(&fintab, p, del);
- unlock(&finlock);
+ runtime·unlock(&finlock);
return f;
}
void
-walkfintab(void (*fn)(void*))
+runtime·walkfintab(void (*fn)(void*))
{
void **key;
void **ekey;
- lock(&finlock);
+ runtime·lock(&finlock);
key = fintab.key;
ekey = key + fintab.max;
for(; key < ekey; key++)
if(*key != nil && *key != ((void*)-1))
fn(*key);
- unlock(&finlock);
+ runtime·unlock(&finlock);
}
// Initialize f to allocate objects of the given size,
// using the allocator to obtain chunks of memory.
void
-FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg)
+runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg)
{
f->size = size;
f->alloc = alloc;
}
void*
-FixAlloc_Alloc(FixAlloc *f)
+runtime·FixAlloc_Alloc(FixAlloc *f)
{
void *v;
f->sys += FixAllocChunk;
f->chunk = f->alloc(FixAllocChunk);
if(f->chunk == nil)
- throw("out of memory (FixAlloc)");
+ runtime·throw("out of memory (FixAlloc)");
f->nchunk = FixAllocChunk;
}
v = f->chunk;
}
void
-FixAlloc_Free(FixAlloc *f, void *p)
+runtime·FixAlloc_Free(FixAlloc *f, void *p)
{
f->inuse -= f->size;
*(void**)p = f->list;
n = w->size;
if(Debug > 1)
- printf("scanblock %p %D\n", b, n);
+ runtime·printf("scanblock %p %D\n", b, n);
off = (uint32)(uintptr)b & (PtrSize-1);
if(off) {
b += PtrSize - off;
obj = vp[i];
if(obj == nil)
continue;
- if(mheap.closure_min != nil && mheap.closure_min <= (byte*)obj && (byte*)obj < mheap.closure_max) {
+ if(runtime·mheap.closure_min != nil && runtime·mheap.closure_min <= (byte*)obj && (byte*)obj < runtime·mheap.closure_max) {
if((((uintptr)obj) & 63) != 0)
continue;
continue;
obj = *pp;
}
- if(mheap.min <= (byte*)obj && (byte*)obj < mheap.max) {
- if(mlookup(obj, &obj, &size, nil, &refp)) {
+ if(runtime·mheap.min <= (byte*)obj && (byte*)obj < runtime·mheap.max) {
+ if(runtime·mlookup(obj, &obj, &size, nil, &refp)) {
ref = *refp;
switch(ref & ~RefFlags) {
case RefNone:
if(Debug > 1)
- printf("found at %p: ", &vp[i]);
+ runtime·printf("found at %p: ", &vp[i]);
*refp = RefSome | (ref & RefFlags);
if(!(ref & RefNoPointers)) {
if(w >= ebl)
- throw("scanblock: garbage collection stack overflow");
+ runtime·throw("scanblock: garbage collection stack overflow");
w->obj = obj;
w->size = size;
w++;
else
sp = gp->sched.sp;
if(Debug > 1)
- printf("scanstack %d %p\n", gp->goid, sp);
+ runtime·printf("scanstack %d %p\n", gp->goid, sp);
stk = (Stktop*)gp->stackbase;
while(stk) {
scanblock(sp, (byte*)stk - sp);
size = 0;
refp = nil;
- if(!mlookup(v, &v, &size, nil, &refp) || !(*refp & RefHasFinalizer))
- throw("mark - finalizer inconsistency");
+ if(!runtime·mlookup(v, &v, &size, nil, &refp) || !(*refp & RefHasFinalizer))
+ runtime·throw("mark - finalizer inconsistency");
// do not mark the finalizer block itself. just mark the things it points at.
scanblock(v, size);
nobj = mstats.heap_objects;
if(nobj > ebl - bl || nobj < (ebl-bl)/4) {
if(bl != nil)
- SysFree(bl, (byte*)ebl - (byte*)bl);
+ runtime·SysFree(bl, (byte*)ebl - (byte*)bl);
// While we're allocated a new object stack,
// add 20% headroom and also round up to
blsize = nobj * sizeof *bl;
blsize = (blsize + 4095) & ~4095;
nobj = blsize / sizeof *bl;
- bl = SysAlloc(blsize);
+ bl = runtime·SysAlloc(blsize);
ebl = bl + nobj;
}
// mark data+bss.
- // skip mheap itself, which has no interesting pointers
+ // skip runtime·mheap itself, which has no interesting pointers
// and is mostly zeroed and would not otherwise be paged in.
- scanblock(data, (byte*)&mheap - data);
- scanblock((byte*)(&mheap+1), end - (byte*)(&mheap+1));
+ scanblock(data, (byte*)&runtime·mheap - data);
+ scanblock((byte*)(&runtime·mheap+1), end - (byte*)(&runtime·mheap+1));
// mark stacks
- for(gp=allg; gp!=nil; gp=gp->alllink) {
+ for(gp=runtime·allg; gp!=nil; gp=gp->alllink) {
switch(gp->status){
default:
- printf("unexpected G.status %d\n", gp->status);
- throw("mark - bad status");
+ runtime·printf("unexpected G.status %d\n", gp->status);
+ runtime·throw("mark - bad status");
case Gdead:
break;
case Grunning:
case Grecovery:
if(gp != g)
- throw("mark - world not stopped");
+ runtime·throw("mark - world not stopped");
scanstack(gp);
break;
case Grunnable:
}
// mark things pointed at by objects with finalizers
- walkfintab(markfin);
+ runtime·walkfintab(markfin);
}
// free RefNone, free & queue finalizers for RefNone|RefHasFinalizer, reset RefSome
case RefNone:
// Free large object.
mstats.alloc -= s->npages<<PageShift;
- runtime_memclr(p, s->npages<<PageShift);
+ runtime·memclr(p, s->npages<<PageShift);
if(ref & RefProfiled)
- MProf_Free(p, s->npages<<PageShift);
+ runtime·MProf_Free(p, s->npages<<PageShift);
s->gcref0 = RefFree;
- MHeap_Free(&mheap, s, 1);
+ runtime·MHeap_Free(&runtime·mheap, s, 1);
break;
case RefNone|RefHasFinalizer:
- f = getfinalizer(p, 1);
+ f = runtime·getfinalizer(p, 1);
if(f == nil)
- throw("finalizer inconsistency");
+ runtime·throw("finalizer inconsistency");
f->arg = p;
f->next = finq;
finq = f;
}
// Chunk full of small blocks.
- MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
+ runtime·MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
gcrefp = s->gcref;
gcrefep = s->gcref + n;
for(; gcrefp < gcrefep; gcrefp++, p += size) {
case RefNone:
// Free small object.
if(ref & RefProfiled)
- MProf_Free(p, size);
+ runtime·MProf_Free(p, size);
*gcrefp = RefFree;
c = m->mcache;
if(size > sizeof(uintptr))
((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
mstats.alloc -= size;
mstats.by_size[s->sizeclass].nfree++;
- MCache_Free(c, p, s->sizeclass, size);
+ runtime·MCache_Free(c, p, s->sizeclass, size);
break;
case RefNone|RefHasFinalizer:
- f = getfinalizer(p, 1);
+ f = runtime·getfinalizer(p, 1);
if(f == nil)
- throw("finalizer inconsistency");
+ runtime·throw("finalizer inconsistency");
f->arg = p;
f->next = finq;
finq = f;
{
MSpan *s;
- for(s = mheap.allspans; s != nil; s = s->allnext)
+ for(s = runtime·mheap.allspans; s != nil; s = s->allnext)
if(s->state == MSpanInUse)
sweepspan(s);
}
{
M *m;
- for(m=allm; m; m=m->alllink)
- MCache_ReleaseAll(m->mcache);
+ for(m=runtime·allm; m; m=m->alllink)
+ runtime·MCache_ReleaseAll(m->mcache);
}
static void
M *m;
MCache *c;
- for(m=allm; m; m=m->alllink) {
+ for(m=runtime·allm; m; m=m->alllink) {
c = m->mcache;
mstats.heap_alloc += c->local_alloc;
c->local_alloc = 0;
}
void
-gc(int32 force)
+runtime·gc(int32 force)
{
int64 t0, t1;
byte *p;
// problems, don't bother trying to run gc
// while holding a lock. The next mallocgc
// without a lock will do the gc instead.
- if(!mstats.enablegc || m->locks > 0 || panicking)
+ if(!mstats.enablegc || m->locks > 0 || runtime·panicking)
return;
if(gcpercent == -2) { // first time through
- p = getenv("GOGC");
+ p = runtime·getenv("GOGC");
if(p == nil || p[0] == '\0')
gcpercent = 100;
- else if(strcmp(p, (byte*)"off") == 0)
+ else if(runtime·strcmp(p, (byte*)"off") == 0)
gcpercent = -1;
else
- gcpercent = atoi(p);
+ gcpercent = runtime·atoi(p);
}
if(gcpercent < 0)
return;
- semacquire(&gcsema);
- t0 = nanotime();
+ runtime·semacquire(&gcsema);
+ t0 = runtime·nanotime();
m->gcing = 1;
- stoptheworld();
- if(mheap.Lock.key != 0)
- throw("mheap locked during gc");
+ runtime·stoptheworld();
+ if(runtime·mheap.Lock.key != 0)
+ runtime·throw("runtime·mheap locked during gc");
if(force || mstats.heap_alloc >= mstats.next_gc) {
cachestats();
mark();
if(fp != nil) {
// kick off or wake up goroutine to run queued finalizers
if(fing == nil)
- fing = newproc1((byte*)runfinq, nil, 0, 0);
+ fing = runtime·newproc1((byte*)runfinq, nil, 0, 0);
else if(fingwait) {
fingwait = 0;
- ready(fing);
+ runtime·ready(fing);
}
}
m->locks--;
- t1 = nanotime();
+ t1 = runtime·nanotime();
mstats.numgc++;
mstats.pause_ns += t1 - t0;
if(mstats.debuggc)
- printf("pause %D\n", t1-t0);
- semrelease(&gcsema);
- starttheworld();
+ runtime·printf("pause %D\n", t1-t0);
+ runtime·semrelease(&gcsema);
+ runtime·starttheworld();
// give the queued finalizers, if any, a chance to run
if(fp != nil)
- gosched();
+ runtime·gosched();
}
static void
if(f == nil) {
fingwait = 1;
g->status = Gwaiting;
- gosched();
+ runtime·gosched();
continue;
}
for(; f; f=next) {
next = f->next;
- frame = mal(sizeof(uintptr) + f->nret);
+ frame = runtime·mal(sizeof(uintptr) + f->nret);
*(void**)frame = f->arg;
reflect·call((byte*)f->fn, frame, sizeof(uintptr) + f->nret);
- free(frame);
+ runtime·free(frame);
f->fn = nil;
f->arg = nil;
f->next = nil;
- free(f);
+ runtime·free(f);
}
- gc(1); // trigger another gc to clean up the finalized objects, if possible
+ runtime·gc(1); // trigger another gc to clean up the finalized objects, if possible
}
}
// Initialize the heap; fetch memory using alloc.
void
-MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
+runtime·MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
{
uint32 i;
- FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
- FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
- MHeapMap_Init(&h->map, alloc);
+ runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
+ runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
+ runtime·MHeapMap_Init(&h->map, alloc);
// h->mapcache needs no init
for(i=0; i<nelem(h->free); i++)
- MSpanList_Init(&h->free[i]);
- MSpanList_Init(&h->large);
+ runtime·MSpanList_Init(&h->free[i]);
+ runtime·MSpanList_Init(&h->large);
for(i=0; i<nelem(h->central); i++)
- MCentral_Init(&h->central[i], i);
+ runtime·MCentral_Init(&h->central[i], i);
}
// Allocate a new span of npage pages from the heap
// and record its size class in the HeapMap and HeapMapCache.
MSpan*
-MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct)
+runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct)
{
MSpan *s;
- lock(h);
+ runtime·lock(h);
mstats.heap_alloc += m->mcache->local_alloc;
m->mcache->local_alloc = 0;
mstats.heap_objects += m->mcache->local_objects;
mstats.heap_alloc += npage<<PageShift;
}
}
- unlock(h);
+ runtime·unlock(h);
return s;
}
// Try in fixed-size lists up to max.
for(n=npage; n < nelem(h->free); n++) {
- if(!MSpanList_IsEmpty(&h->free[n])) {
+ if(!runtime·MSpanList_IsEmpty(&h->free[n])) {
s = h->free[n].next;
goto HaveSpan;
}
HaveSpan:
// Mark span in use.
if(s->state != MSpanFree)
- throw("MHeap_AllocLocked - MSpan not free");
+ runtime·throw("MHeap_AllocLocked - MSpan not free");
if(s->npages < npage)
- throw("MHeap_AllocLocked - bad npages");
- MSpanList_Remove(s);
+ runtime·throw("MHeap_AllocLocked - bad npages");
+ runtime·MSpanList_Remove(s);
s->state = MSpanInUse;
if(s->npages > npage) {
// Trim extra and put it back in the heap.
- t = FixAlloc_Alloc(&h->spanalloc);
+ t = runtime·FixAlloc_Alloc(&h->spanalloc);
mstats.mspan_inuse = h->spanalloc.inuse;
mstats.mspan_sys = h->spanalloc.sys;
- MSpan_Init(t, s->start + npage, s->npages - npage);
+ runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
s->npages = npage;
- MHeapMap_Set(&h->map, t->start - 1, s);
- MHeapMap_Set(&h->map, t->start, t);
- MHeapMap_Set(&h->map, t->start + t->npages - 1, t);
+ runtime·MHeapMap_Set(&h->map, t->start - 1, s);
+ runtime·MHeapMap_Set(&h->map, t->start, t);
+ runtime·MHeapMap_Set(&h->map, t->start + t->npages - 1, t);
t->state = MSpanInUse;
MHeap_FreeLocked(h, t);
}
// able to map interior pointer to containing span.
s->sizeclass = sizeclass;
for(n=0; n<npage; n++)
- MHeapMap_Set(&h->map, s->start+n, s);
+ runtime·MHeapMap_Set(&h->map, s->start+n, s);
return s;
}
if(ask < HeapAllocChunk)
ask = HeapAllocChunk;
- v = SysAlloc(ask);
+ v = runtime·SysAlloc(ask);
if(v == nil) {
if(ask > (npage<<PageShift)) {
ask = npage<<PageShift;
- v = SysAlloc(ask);
+ v = runtime·SysAlloc(ask);
}
if(v == nil)
return false;
// NOTE(rsc): In tcmalloc, if we've accumulated enough
// system allocations, the heap map gets entirely allocated
// in 32-bit mode. (In 64-bit mode that's not practical.)
- if(!MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) {
- SysFree(v, ask);
+ if(!runtime·MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) {
+ runtime·SysFree(v, ask);
return false;
}
// Create a fake "in use" span and free it, so that the
// right coalescing happens.
- s = FixAlloc_Alloc(&h->spanalloc);
+ s = runtime·FixAlloc_Alloc(&h->spanalloc);
mstats.mspan_inuse = h->spanalloc.inuse;
mstats.mspan_sys = h->spanalloc.sys;
- MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
- MHeapMap_Set(&h->map, s->start, s);
- MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
+ runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
+ runtime·MHeapMap_Set(&h->map, s->start, s);
+ runtime·MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
s->state = MSpanInUse;
MHeap_FreeLocked(h, s);
return true;
// Page number is guaranteed to be in map
// and is guaranteed to be start or end of span.
MSpan*
-MHeap_Lookup(MHeap *h, PageID p)
+runtime·MHeap_Lookup(MHeap *h, PageID p)
{
- return MHeapMap_Get(&h->map, p);
+ return runtime·MHeapMap_Get(&h->map, p);
}
// Look up the span at the given page number.
// other garbage in their middles, so we have to
// check for that.
MSpan*
-MHeap_LookupMaybe(MHeap *h, PageID p)
+runtime·MHeap_LookupMaybe(MHeap *h, PageID p)
{
MSpan *s;
- s = MHeapMap_GetMaybe(&h->map, p);
+ s = runtime·MHeapMap_GetMaybe(&h->map, p);
if(s == nil || p < s->start || p - s->start >= s->npages)
return nil;
if(s->state != MSpanInUse)
// Free the span back into the heap.
void
-MHeap_Free(MHeap *h, MSpan *s, int32 acct)
+runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
{
- lock(h);
+ runtime·lock(h);
mstats.heap_alloc += m->mcache->local_alloc;
m->mcache->local_alloc = 0;
mstats.heap_objects += m->mcache->local_objects;
mstats.heap_objects--;
}
MHeap_FreeLocked(h, s);
- unlock(h);
+ runtime·unlock(h);
}
static void
MSpan *t;
if(s->state != MSpanInUse || s->ref != 0) {
- printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
- throw("MHeap_FreeLocked - invalid free");
+ runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
+ runtime·throw("MHeap_FreeLocked - invalid free");
}
s->state = MSpanFree;
- MSpanList_Remove(s);
+ runtime·MSpanList_Remove(s);
// Coalesce with earlier, later spans.
- if((t = MHeapMap_Get(&h->map, s->start - 1)) != nil && t->state != MSpanInUse) {
+ if((t = runtime·MHeapMap_Get(&h->map, s->start - 1)) != nil && t->state != MSpanInUse) {
s->start = t->start;
s->npages += t->npages;
- MHeapMap_Set(&h->map, s->start, s);
- MSpanList_Remove(t);
+ runtime·MHeapMap_Set(&h->map, s->start, s);
+ runtime·MSpanList_Remove(t);
t->state = MSpanDead;
- FixAlloc_Free(&h->spanalloc, t);
+ runtime·FixAlloc_Free(&h->spanalloc, t);
mstats.mspan_inuse = h->spanalloc.inuse;
mstats.mspan_sys = h->spanalloc.sys;
}
- if((t = MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) {
+ if((t = runtime·MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) {
s->npages += t->npages;
- MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
- MSpanList_Remove(t);
+ runtime·MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
+ runtime·MSpanList_Remove(t);
t->state = MSpanDead;
- FixAlloc_Free(&h->spanalloc, t);
+ runtime·FixAlloc_Free(&h->spanalloc, t);
mstats.mspan_inuse = h->spanalloc.inuse;
mstats.mspan_sys = h->spanalloc.sys;
}
// Insert s into appropriate list.
if(s->npages < nelem(h->free))
- MSpanList_Insert(&h->free[s->npages], s);
+ runtime·MSpanList_Insert(&h->free[s->npages], s);
else
- MSpanList_Insert(&h->large, s);
+ runtime·MSpanList_Insert(&h->large, s);
// TODO(rsc): IncrementalScavenge() to return memory to OS.
}
// Initialize a new span with the given start and npages.
void
-MSpan_Init(MSpan *span, PageID start, uintptr npages)
+runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages)
{
span->next = nil;
span->prev = nil;
// Initialize an empty doubly-linked list.
void
-MSpanList_Init(MSpan *list)
+runtime·MSpanList_Init(MSpan *list)
{
list->state = MSpanListHead;
list->next = list;
}
void
-MSpanList_Remove(MSpan *span)
+runtime·MSpanList_Remove(MSpan *span)
{
if(span->prev == nil && span->next == nil)
return;
}
bool
-MSpanList_IsEmpty(MSpan *list)
+runtime·MSpanList_IsEmpty(MSpan *list)
{
return list->next == list;
}
void
-MSpanList_Insert(MSpan *list, MSpan *span)
+runtime·MSpanList_Insert(MSpan *list, MSpan *span)
{
if(span->next != nil || span->prev != nil)
- throw("MSpanList_Insert");
+ runtime·throw("MSpanList_Insert");
span->next = list->next;
span->prev = list;
span->next->prev = span;
// 3-level radix tree mapping page ids to Span*.
void
-MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
+runtime·MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
{
m->allocator = allocator;
}
MSpan*
-MHeapMap_Get(MHeapMap *m, PageID k)
+runtime·MHeapMap_Get(MHeapMap *m, PageID k)
{
int32 i1, i2;
i1 = k & MHeapMap_Level1Mask;
k >>= MHeapMap_Level1Bits;
if(k != 0)
- throw("MHeapMap_Get");
+ runtime·throw("MHeapMap_Get");
return m->p[i1]->s[i2];
}
MSpan*
-MHeapMap_GetMaybe(MHeapMap *m, PageID k)
+runtime·MHeapMap_GetMaybe(MHeapMap *m, PageID k)
{
int32 i1, i2;
MHeapMapNode2 *p2;
i1 = k & MHeapMap_Level1Mask;
k >>= MHeapMap_Level1Bits;
if(k != 0)
- throw("MHeapMap_Get");
+ runtime·throw("MHeapMap_Get");
p2 = m->p[i1];
if(p2 == nil)
}
void
-MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
+runtime·MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
{
int32 i1, i2;
i1 = k & MHeapMap_Level1Mask;
k >>= MHeapMap_Level1Bits;
if(k != 0)
- throw("MHeapMap_Set");
+ runtime·throw("MHeapMap_Set");
m->p[i1]->s[i2] = s;
}
// Allocate the storage required for entries [k, k+1, ..., k+len-1]
// so that Get and Set calls need not check for nil pointers.
bool
-MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
+runtime·MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
{
uintptr end;
int32 i1;
MSpan *s[1<<MHeapMap_Level2Bits];
};
-void MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
-bool MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
-MSpan* MHeapMap_Get(MHeapMap *m, PageID k);
-MSpan* MHeapMap_GetMaybe(MHeapMap *m, PageID k);
-void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
+void runtime·MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
+bool runtime·MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
+MSpan* runtime·MHeapMap_Get(MHeapMap *m, PageID k);
+MSpan* runtime·MHeapMap_GetMaybe(MHeapMap *m, PageID k);
+void runtime·MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
// 3-level radix tree mapping page ids to Span*.
void
-MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
+runtime·MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
{
m->allocator = allocator;
}
MSpan*
-MHeapMap_Get(MHeapMap *m, PageID k)
+runtime·MHeapMap_Get(MHeapMap *m, PageID k)
{
int32 i1, i2, i3;
i1 = k & MHeapMap_Level1Mask;
k >>= MHeapMap_Level1Bits;
if(k != 0)
- throw("MHeapMap_Get");
+ runtime·throw("MHeapMap_Get");
return m->p[i1]->p[i2]->s[i3];
}
MSpan*
-MHeapMap_GetMaybe(MHeapMap *m, PageID k)
+runtime·MHeapMap_GetMaybe(MHeapMap *m, PageID k)
{
int32 i1, i2, i3;
MHeapMapNode2 *p2;
i1 = k & MHeapMap_Level1Mask;
k >>= MHeapMap_Level1Bits;
if(k != 0)
- throw("MHeapMap_Get");
+ runtime·throw("MHeapMap_Get");
p2 = m->p[i1];
if(p2 == nil)
}
void
-MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
+runtime·MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
{
int32 i1, i2, i3;
i1 = k & MHeapMap_Level1Mask;
k >>= MHeapMap_Level1Bits;
if(k != 0)
- throw("MHeapMap_Set");
+ runtime·throw("MHeapMap_Set");
m->p[i1]->p[i2]->s[i3] = s;
}
// Allocate the storage required for entries [k, k+1, ..., k+len-1]
// so that Get and Set calls need not check for nil pointers.
bool
-MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
+runtime·MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
{
uintptr end;
int32 i1, i2;
MSpan *s[1<<MHeapMap_Level3Bits];
};
-void MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
-bool MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
-MSpan* MHeapMap_Get(MHeapMap *m, PageID k);
-MSpan* MHeapMap_GetMaybe(MHeapMap *m, PageID k);
-void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
+void runtime·MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
+bool runtime·MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
+MSpan* runtime·MHeapMap_Get(MHeapMap *m, PageID k);
+MSpan* runtime·MHeapMap_GetMaybe(MHeapMap *m, PageID k);
+void runtime·MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
Bucket *b;
if(buckhash == nil) {
- buckhash = SysAlloc(BuckHashSize*sizeof buckhash[0]);
+ buckhash = runtime·SysAlloc(BuckHashSize*sizeof buckhash[0]);
mstats.buckhash_sys += BuckHashSize*sizeof buckhash[0];
}
i = h%BuckHashSize;
for(b = buckhash[i]; b; b=b->next)
if(b->hash == h && b->nstk == nstk &&
- mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0)
+ runtime·mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0)
return b;
- b = mallocgc(sizeof *b + nstk*sizeof stk[0], RefNoProfiling, 0, 1);
+ b = runtime·mallocgc(sizeof *b + nstk*sizeof stk[0], RefNoProfiling, 0, 1);
bucketmem += sizeof *b + nstk*sizeof stk[0];
- memmove(b->stk, stk, nstk*sizeof stk[0]);
+ runtime·memmove(b->stk, stk, nstk*sizeof stk[0]);
b->hash = h;
b->nstk = nstk;
b->next = buckhash[i];
if(ah->addr == (addr>>20))
goto found;
- ah = mallocgc(sizeof *ah, RefNoProfiling, 0, 1);
+ ah = runtime·mallocgc(sizeof *ah, RefNoProfiling, 0, 1);
addrmem += sizeof *ah;
ah->next = addrhash[h];
ah->addr = addr>>20;
found:
if((e = addrfree) == nil) {
- e = mallocgc(64*sizeof *e, RefNoProfiling, 0, 0);
+ e = runtime·mallocgc(64*sizeof *e, RefNoProfiling, 0, 0);
addrmem += 64*sizeof *e;
for(i=0; i+1<64; i++)
e[i].next = &e[i+1];
// Called by malloc to record a profiled block.
void
-MProf_Malloc(void *p, uintptr size)
+runtime·MProf_Malloc(void *p, uintptr size)
{
int32 nstk;
uintptr stk[32];
return;
m->nomemprof++;
- nstk = callers(1, stk, 32);
- lock(&proflock);
+ nstk = runtime·callers(1, stk, 32);
+ runtime·lock(&proflock);
b = stkbucket(stk, nstk);
b->allocs++;
b->alloc_bytes += size;
setaddrbucket((uintptr)p, b);
- unlock(&proflock);
+ runtime·unlock(&proflock);
m->nomemprof--;
}
// Called when freeing a profiled block.
void
-MProf_Free(void *p, uintptr size)
+runtime·MProf_Free(void *p, uintptr size)
{
Bucket *b;
return;
m->nomemprof++;
- lock(&proflock);
+ runtime·lock(&proflock);
b = getaddrbucket((uintptr)p);
if(b != nil) {
b->frees++;
b->free_bytes += size;
}
- unlock(&proflock);
+ runtime·unlock(&proflock);
m->nomemprof--;
}
Bucket *b;
Record *r;
- lock(&proflock);
+ runtime·lock(&proflock);
n = 0;
for(b=buckets; b; b=b->allnext)
if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
record(r++, b);
}
- unlock(&proflock);
+ runtime·unlock(&proflock);
}
#include "runtime.h"
#include "malloc.h"
-int32 class_to_size[NumSizeClasses];
-int32 class_to_allocnpages[NumSizeClasses];
-int32 class_to_transfercount[NumSizeClasses];
+int32 runtime·class_to_size[NumSizeClasses];
+int32 runtime·class_to_allocnpages[NumSizeClasses];
+int32 runtime·class_to_transfercount[NumSizeClasses];
// The SizeToClass lookup is implemented using two arrays,
// one mapping sizes <= 1024 to their class and one mapping
static int32 size_to_class128[(MaxSmallSize-1024)/128 + 1];
int32
-SizeToClass(int32 size)
+runtime·SizeToClass(int32 size)
{
if(size > MaxSmallSize)
- throw("SizeToClass - invalid size");
+ runtime·throw("SizeToClass - invalid size");
if(size > 1024-8)
return size_to_class128[(size-1024+127) >> 7];
return size_to_class8[(size+7)>>3];
}
void
-InitSizes(void)
+runtime·InitSizes(void)
{
int32 align, sizeclass, size, osize, nextsize, n;
uint32 i;
uintptr allocsize, npages;
- // Initialize the class_to_size table (and choose class sizes in the process).
- class_to_size[0] = 0;
+ // Initialize the runtime·class_to_size table (and choose class sizes in the process).
+ runtime·class_to_size[0] = 0;
sizeclass = 1; // 0 means no class
align = 8;
for(size = align; size <= MaxSmallSize; size += align) {
align = 16; // required for x86 SSE instructions, if we want to use them
}
if((align&(align-1)) != 0)
- throw("InitSizes - bug");
+ runtime·throw("InitSizes - bug");
// Make the allocnpages big enough that
// the leftover is less than 1/8 of the total,
// use just this size instead of having two
// different sizes.
if(sizeclass > 1
- && npages == class_to_allocnpages[sizeclass-1]
- && allocsize/osize == allocsize/(class_to_size[sizeclass-1]+RefcountOverhead)) {
- class_to_size[sizeclass-1] = size;
+ && npages == runtime·class_to_allocnpages[sizeclass-1]
+ && allocsize/osize == allocsize/(runtime·class_to_size[sizeclass-1]+RefcountOverhead)) {
+ runtime·class_to_size[sizeclass-1] = size;
continue;
}
- class_to_allocnpages[sizeclass] = npages;
- class_to_size[sizeclass] = size;
+ runtime·class_to_allocnpages[sizeclass] = npages;
+ runtime·class_to_size[sizeclass] = size;
sizeclass++;
}
if(sizeclass != NumSizeClasses) {
- printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses);
- throw("InitSizes - bad NumSizeClasses");
+ runtime·printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses);
+ runtime·throw("InitSizes - bad NumSizeClasses");
}
// Initialize the size_to_class tables.
nextsize = 0;
for (sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
- for(; nextsize < 1024 && nextsize <= class_to_size[sizeclass]; nextsize+=8)
+ for(; nextsize < 1024 && nextsize <= runtime·class_to_size[sizeclass]; nextsize+=8)
size_to_class8[nextsize/8] = sizeclass;
if(nextsize >= 1024)
- for(; nextsize <= class_to_size[sizeclass]; nextsize += 128)
+ for(; nextsize <= runtime·class_to_size[sizeclass]; nextsize += 128)
size_to_class128[(nextsize-1024)/128] = sizeclass;
}
// Double-check SizeToClass.
if(0) {
for(n=0; n < MaxSmallSize; n++) {
- sizeclass = SizeToClass(n);
- if(sizeclass < 1 || sizeclass >= NumSizeClasses || class_to_size[sizeclass] < n) {
- printf("size=%d sizeclass=%d class_to_size=%d\n", n, sizeclass, class_to_size[sizeclass]);
- printf("incorrect SizeToClass");
+ sizeclass = runtime·SizeToClass(n);
+ if(sizeclass < 1 || sizeclass >= NumSizeClasses || runtime·class_to_size[sizeclass] < n) {
+ runtime·printf("size=%d sizeclass=%d runtime·class_to_size=%d\n", n, sizeclass, runtime·class_to_size[sizeclass]);
+ runtime·printf("incorrect SizeToClass");
goto dump;
}
- if(sizeclass > 1 && class_to_size[sizeclass-1] >= n) {
- printf("size=%d sizeclass=%d class_to_size=%d\n", n, sizeclass, class_to_size[sizeclass]);
- printf("SizeToClass too big");
+ if(sizeclass > 1 && runtime·class_to_size[sizeclass-1] >= n) {
+ runtime·printf("size=%d sizeclass=%d runtime·class_to_size=%d\n", n, sizeclass, runtime·class_to_size[sizeclass]);
+ runtime·printf("SizeToClass too big");
goto dump;
}
}
}
// Copy out for statistics table.
- for(i=0; i<nelem(class_to_size); i++)
- mstats.by_size[i].size = class_to_size[i];
+ for(i=0; i<nelem(runtime·class_to_size); i++)
+ mstats.by_size[i].size = runtime·class_to_size[i];
- // Initialize the class_to_transfercount table.
+ // Initialize the runtime·class_to_transfercount table.
for(sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
- n = 64*1024 / class_to_size[sizeclass];
+ n = 64*1024 / runtime·class_to_size[sizeclass];
if(n < 2)
n = 2;
if(n > 32)
n = 32;
- class_to_transfercount[sizeclass] = n;
+ runtime·class_to_transfercount[sizeclass] = n;
}
return;
dump:
if(1){
- printf("NumSizeClasses=%d\n", NumSizeClasses);
- printf("class_to_size:");
+ runtime·printf("NumSizeClasses=%d\n", NumSizeClasses);
+ runtime·printf("runtime·class_to_size:");
for(sizeclass=0; sizeclass<NumSizeClasses; sizeclass++)
- printf(" %d", class_to_size[sizeclass]);
- printf("\n\n");
- printf("size_to_class8:");
+ runtime·printf(" %d", runtime·class_to_size[sizeclass]);
+ runtime·printf("\n\n");
+ runtime·printf("size_to_class8:");
for(i=0; i<nelem(size_to_class8); i++)
- printf(" %d=>%d(%d)\n", i*8, size_to_class8[i], class_to_size[size_to_class8[i]]);
- printf("\n");
- printf("size_to_class128:");
+ runtime·printf(" %d=>%d(%d)\n", i*8, size_to_class8[i], runtime·class_to_size[size_to_class8[i]]);
+ runtime·printf("\n");
+ runtime·printf("size_to_class128:");
for(i=0; i<nelem(size_to_class128); i++)
- printf(" %d=>%d(%d)\n", i*128, size_to_class128[i], class_to_size[size_to_class128[i]]);
- printf("\n");
+ runtime·printf(" %d=>%d(%d)\n", i*128, size_to_class128[i], runtime·class_to_size[size_to_class128[i]]);
+ runtime·printf("\n");
}
- throw("InitSizes failed");
+ runtime·throw("InitSizes failed");
}
#define codeptr(p) *(ClosureData***)((byte*)(p)+2)
void
-finclosure(void *v)
+runtime·finclosure(void *v)
{
byte *p;
ClosureFreeList *f;
p = clos.code + f->index*ClosureSize;
*codeptr(p) = nil;
- lock(&clos);
+ runtime·lock(&clos);
f->next = clos.free;
clos.free = f;
- unlock(&clos);
+ runtime·unlock(&clos);
}
#pragma textflag 7
// fn func(arg0, arg1, arg2 *ptr, callerpc uintptr, xxx) yyy,
// arg0, arg1, arg2 *ptr) (func(xxx) yyy)
void
-·closure(int32 siz, byte *fn, byte *arg0)
+runtime·closure(int32 siz, byte *fn, byte *arg0)
{
byte *p, **ret;
int32 e, i, n, off;
ClosureFreeList *f;
if(siz < 0 || siz%4 != 0)
- throw("bad closure size");
+ runtime·throw("bad closure size");
ret = (byte**)((byte*)&arg0 + siz);
if(siz > 100) {
// TODO(rsc): implement stack growth preamble?
- throw("closure too big");
+ runtime·throw("closure too big");
}
- lock(&clos);
+ runtime·lock(&clos);
if(clos.free == nil) {
// Allocate more closures.
if(clos.code == nil) {
}
if(clos.ecode+ClosureChunk > rodata) {
// Last ditch effort: garbage collect and hope.
- unlock(&clos);
- gc(1);
- lock(&clos);
+ runtime·unlock(&clos);
+ runtime·gc(1);
+ runtime·lock(&clos);
if(clos.free != nil)
goto alloc;
- throw("ran out of room for closures in text segment");
+ runtime·throw("ran out of room for closures in text segment");
}
n = ClosureChunk/ClosureSize;
// Allocate the pointer block as opaque to the
// garbage collector. Finalizers will clean up.
- block = mallocgc(n*sizeof block[0], RefNoPointers, 1, 1);
+ block = runtime·mallocgc(n*sizeof block[0], RefNoPointers, 1, 1);
// Pointers into the pointer block are getting added
// to the text segment; keep a pointer here in the data
// segment so that the garbage collector doesn't free
// the block itself.
- l = mal(sizeof *l);
+ l = runtime·mal(sizeof *l);
l->block = block;
l->next = clos.datalist;
clos.datalist = l;
p = clos.buf;
off = (clos.ecode - clos.code)/ClosureSize;
for(i=0; i<n; i++) {
- f = mal(sizeof *f);
+ f = runtime·mal(sizeof *f);
f->index = off++;
f->next = clos.free;
clos.free = f;
// There are two hard-coded immediate values in
// the assembly that need to be pp+i, one 2 bytes in
// and one 2 bytes after the 32-byte boundary.
- mcpy(p, closasm, ClosureSize);
+ runtime·mcpy(p, closasm, ClosureSize);
*(ClosureData***)(p+2) = block+i;
*(ClosureData***)(p+32+2) = block+i;
p += ClosureSize;
}
if(p != clos.buf+sizeof clos.buf)
- throw("bad buf math in closure");
+ runtime·throw("bad buf math in closure");
- e = dyncode_copy(clos.ecode, clos.buf, ClosureChunk);
+ e = runtime·dyncode_copy(clos.ecode, clos.buf, ClosureChunk);
if(e != 0) {
fd = 2;
- printf("dyncode_copy: error %d\n", e);
- throw("dyncode_copy");
+ runtime·printf("dyncode_copy: error %d\n", e);
+ runtime·throw("dyncode_copy");
}
clos.ecode += ClosureChunk;
}
f->next = nil;
p = clos.code + f->index*ClosureSize;
- d = mal(sizeof(*d)+siz);
+ d = runtime·mal(sizeof(*d)+siz);
d->free = f;
d->fn = fn;
d->siz = siz;
- mcpy((byte*)(d+1), (byte*)&arg0, siz);
+ runtime·mcpy((byte*)(d+1), (byte*)&arg0, siz);
*codeptr(p) = d;
- addfinalizer(f, finclosure, 0);
- unlock(&clos);
+ runtime·addfinalizer(f, finclosure, 0);
+ runtime·unlock(&clos);
*ret = p;
}
// Native Client and Linux use the same linkage to main
-TEXT _rt0_386_nacl(SB),7,$0
+TEXT _rt0_386_nacl(SB),7,$0
JMP _rt0_386(SB)
#include "os.h"
void
-initsig(int32 queue)
+runtime·initsig(int32 queue)
{
}
#define SYSCALL(x) $(0x10000+SYS_/**/x * 32)
-TEXT exit(SB),7,$4
+TEXT runtime·exit(SB),7,$4
MOVL code+0(FP), AX
MOVL AX, 0(SP)
CALL SYSCALL(exit)
INT $3 // not reached
RET
-TEXT exit1(SB),7,$4
+TEXT runtime·exit1(SB),7,$4
MOVL code+0(FP), AX
MOVL AX, 0(SP)
CALL SYSCALL(thread_exit)
INT $3 // not reached
RET
-TEXT write(SB),7,$0
+TEXT runtime·write(SB),7,$0
JMP SYSCALL(write)
-TEXT close(SB),7,$0
+TEXT runtime·close(SB),7,$0
JMP SYSCALL(close)
-TEXT mutex_create(SB),7,$0
+TEXT runtime·mutex_create(SB),7,$0
JMP SYSCALL(mutex_create)
-TEXT mutex_lock(SB),7,$0
+TEXT runtime·mutex_lock(SB),7,$0
JMP SYSCALL(mutex_lock)
-TEXT mutex_unlock(SB),7,$0
+TEXT runtime·mutex_unlock(SB),7,$0
JMP SYSCALL(mutex_unlock)
-TEXT thread_create(SB),7,$0
+TEXT runtime·thread_create(SB),7,$0
JMP SYSCALL(thread_create)
-TEXT dyncode_copy(SB),7,$0
+TEXT runtime·dyncode_copy(SB),7,$0
JMP SYSCALL(dyncode_copy)
// For Native Client: a simple no-op function.
// Inserting a call to this no-op is a simple way
// to trigger an alignment.
-TEXT ·naclnop(SB),7,$0
+TEXT runtime·naclnop(SB),7,$0
RET
-TEXT ·mmap(SB),7,$24
+TEXT runtime·mmap(SB),7,$24
MOVL a1+0(FP), BX
MOVL a2+4(FP), CX // round up to 64 kB boundary; silences nacl warning
ADDL $(64*1024-1), CX
CMPL AX, $0xfffff001
JLS 6(PC)
MOVL $1, 0(SP)
- MOVL $mmap_failed(SB), 4(SP)
+ MOVL $runtime·mmap_failed(SB), 4(SP)
MOVL $12, 8(SP) // "mmap failed\n"
CALL SYSCALL(write)
INT $3
RET
-TEXT ·munmap(SB),7,$0
+TEXT runtime·munmap(SB),7,$0
JMP SYSCALL(munmap)
-TEXT gettime(SB),7,$32
+TEXT runtime·gettime(SB),7,$32
LEAL 8(SP), BX
MOVL BX, 0(SP)
MOVL $0, 4(SP)
RET
// setldt(int entry, int address, int limit)
-TEXT setldt(SB),7,$32
+TEXT runtime·setldt(SB),7,$32
// entry is ignored - nacl tells us the
// segment selector to use and stores it in GS.
MOVL address+4(FP), BX
CMPL AX, $0xfffff001
JLS 6(PC)
MOVL $1, 0(SP)
- MOVL $tls_init_failed(SB), 4(SP)
+ MOVL $runtime·tls_init_failed(SB), 4(SP)
MOVL $16, 8(SP) // "tls_init failed\n"
CALL SYSCALL(write)
INT $3
// broken NaCl process, so if something goes wrong,
// print an error string before dying.
-DATA mmap_failed(SB)/8, $"mmap fai"
+DATA runtime·mmap_failed(SB)/8, $"mmap fai"
DATA mmap_failed+8(SB)/4, $"led\n"
-GLOBL mmap_failed(SB), $12
+GLOBL runtime·mmap_failed(SB), $12
-DATA tls_init_failed(SB)/8, $"tls_init"
+DATA runtime·tls_init_failed(SB)/8, $"tls_init"
DATA tls_init_failed+8(SB)/8, $" failed\n"
-GLOBL tls_init_failed(SB), $16
+GLOBL runtime·tls_init_failed(SB), $16
};
void*
-SysAlloc(uintptr n)
+runtime·SysAlloc(uintptr n)
{
mstats.sys += n;
- return runtime_mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
+ return runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
}
void
-SysUnused(void *v, uintptr n)
+runtime·SysUnused(void *v, uintptr n)
{
USED(v);
USED(n);
}
void
-SysFree(void *v, uintptr n)
+runtime·SysFree(void *v, uintptr n)
{
// round to page size or else nacl prints annoying log messages
mstats.sys -= n;
n = (n+NaclPage-1) & ~(NaclPage-1);
- runtime_munmap(v, n);
+ runtime·munmap(v, n);
}
void
-SysMemInit(void)
+runtime·SysMemInit(void)
{
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-int32 thread_create(void(*fn)(void), void *stk, void *tls, int32 tlssize);
-void close(int32);
-int32 mutex_create(void);
-int32 mutex_lock(int32);
-int32 mutex_unlock(int32);
+int32 runtime·thread_create(void(*fn)(void), void *stk, void *tls, int32 tlssize);
+void runtime·close(int32);
+int32 runtime·mutex_create(void);
+int32 runtime·mutex_lock(int32);
+int32 runtime·mutex_unlock(int32);
if(*psema != 0) // already have one
return;
- sema = mutex_create();
+ sema = runtime·mutex_create();
if((int32)sema < 0) {
- printf("mutex_create failed\n");
- breakpoint();
+ runtime·printf("mutex_create failed\n");
+ runtime·breakpoint();
}
// mutex_create returns a file descriptor;
// shift it up and add the 1 bit so that can
sema = (sema<<1) | 1;
if(!cas(psema, 0, sema)){
// Someone else filled it in. Use theirs.
- close(sema);
+ runtime·close(sema);
return;
}
}
xlock(int32 fd)
{
if(mutex_lock(fd) < 0) {
- printf("mutex_lock failed\n");
- breakpoint();
+ runtime·printf("mutex_lock failed\n");
+ runtime·breakpoint();
}
}
xunlock(int32 fd)
{
if(mutex_unlock(fd) < 0) {
- printf("mutex_lock failed\n");
- breakpoint();
+ runtime·printf("mutex_lock failed\n");
+ runtime·breakpoint();
}
}
void
-lock(Lock *l)
+runtime·lock(Lock *l)
{
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
m->locks++;
if(l->sema == 0)
- initsema(&l->sema);
- xlock(l->sema>>1);
+ runtime·initsema(&l->sema);
+ runtime·xlock(l->sema>>1);
}
void
-unlock(Lock *l)
+runtime·unlock(Lock *l)
{
m->locks--;
if(m->locks < 0)
- throw("lock count");
- xunlock(l->sema>>1);
+ runtime·throw("lock count");
+ runtime·xunlock(l->sema>>1);
}
void
-destroylock(Lock*)
+runtime·destroylock(Lock*)
{
}
// a lock be the thread that releases the lock, so this is safe.
void
-noteclear(Note *n)
+runtime·noteclear(Note *n)
{
if(n->lock.sema == 0)
- initsema(&n->lock.sema);
- xlock(n->lock.sema>>1);
+ runtime·initsema(&n->lock.sema);
+ runtime·xlock(n->lock.sema>>1);
}
void
-notewakeup(Note *n)
+runtime·notewakeup(Note *n)
{
if(n->lock.sema == 0) {
- printf("notewakeup without noteclear");
- breakpoint();
+ runtime·printf("notewakeup without noteclear");
+ runtime·breakpoint();
}
- xunlock(n->lock.sema>>1);
+ runtime·xunlock(n->lock.sema>>1);
}
void
-notesleep(Note *n)
+runtime·notesleep(Note *n)
{
if(n->lock.sema == 0) {
- printf("notesleep without noteclear");
- breakpoint();
+ runtime·printf("notesleep without noteclear");
+ runtime·breakpoint();
}
- xlock(n->lock.sema>>1);
- xunlock(n->lock.sema>>1); // Let other sleepers find out too.
+ runtime·xlock(n->lock.sema>>1);
+ runtime·xunlock(n->lock.sema>>1); // Let other sleepers find out too.
}
void
-newosproc(M *m, G *g, void *stk, void (*fn)(void))
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
{
void **vstk;
vstk = stk;
*--vstk = nil;
if(thread_create(fn, vstk, m->tls, sizeof m->tls) < 0) {
- printf("thread_create failed\n");
- breakpoint();
+ runtime·printf("thread_create failed\n");
+ runtime·breakpoint();
}
}
void
-osinit(void)
+runtime·osinit(void)
{
}
// Called to initialize a new m (including the bootstrap m).
void
-minit(void)
+runtime·minit(void)
{
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-TEXT _rt0_386_plan9(SB),7, $0
+TEXT _rt0_386_plan9(SB),7, $0
MOVL AX, _tos(SB)
// move arguments down to make room for
JMP _rt0_386(SB)
-DATA isplan9+0(SB)/4, $1
-GLOBL isplan9(SB), $4
+DATA runtime·isplan9(SB)/4, $1
+GLOBL runtime·isplan9(SB), $4
GLOBL _tos(SB), $4
#include "runtime.h"
void
-gettime(int64*, int32*)
+runtime·gettime(int64*, int32*)
{
}
#include "386/asm.h"
// setldt(int entry, int address, int limit)
-TEXT setldt(SB),7,$0
+TEXT runtime·setldt(SB),7,$0
RET
-TEXT write(SB),7,$0
+TEXT runtime·write(SB),7,$0
MOVL $20, AX
INT $64
RET
-TEXT exits(SB),7,$0
+TEXT runtime·exits(SB),7,$0
MOVL $8, AX
INT $64
RET
-TEXT brk_(SB),7,$0
+TEXT runtime·brk_(SB),7,$0
MOVL $24, AX
INT $64
RET
-TEXT plan9_semacquire(SB),7,$0
+TEXT runtime·plan9_semacquire(SB),7,$0
MOVL $37, AX
INT $64
RET
-TEXT plan9_semrelease(SB),7,$0
+TEXT runtime·plan9_semrelease(SB),7,$0
MOVL $38, AX
INT $64
RET
-TEXT rfork(SB),7,$0
+TEXT runtime·rfork(SB),7,$0
MOVL $19, AX // rfork
INT $64
MOVL 0xdfffeff8, AX
MOVL AX, m_procid(BX) // save pid as m->procid
- CALL stackcheck(SB) // smashes AX, CX
+ CALL runtime·stackcheck(SB) // smashes AX, CX
MOVL 0(DX), DX // paranoia; check they are not nil
MOVL 0(BX), BX
// more paranoia; check that stack splitting code works
PUSHAL
- CALL emptyfunc(SB)
+ CALL runtime·emptyfunc(SB)
POPAL
CALL SI // fn()
- CALL exit(SB)
+ CALL runtime·exit(SB)
RET
};
void*
-SysAlloc(uintptr ask)
+runtime·SysAlloc(uintptr ask)
{
uintptr bl;
}
void
-SysFree(void *v, uintptr n)
+runtime·SysFree(void *v, uintptr n)
{
// from tiny/mem.c
// Push pointer back if this is a free
}
void
-SysUnused(void *v, uintptr n)
+runtime·SysUnused(void *v, uintptr n)
{
USED(v, n);
}
void
-SysMemInit(void)
+runtime·SysMemInit(void)
{
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-extern int32 write(int32 fd, void* buffer, int32 nbytes);
-extern void exits(int8* msg);
-extern int32 brk_(void*);
+extern int32 runtime·write(int32 fd, void* buffer, int32 nbytes);
+extern void runtime·exits(int8* msg);
+extern int32 runtime·brk_(void*);
/* rfork */
enum
RFREND = (1<<13),
RFNOMNT = (1<<14)
};
-extern int32 rfork(int32 flags, void *stk, M *m, G *g, void (*fn)(void));
-extern int32 plan9_semacquire(uint32 *addr, int32 block);
-extern int32 plan9_semrelease(uint32 *addr, int32 count);
+extern int32 runtime·rfork(int32 flags, void *stk, M *m, G *g, void (*fn)(void));
+extern int32 runtime·plan9_semacquire(uint32 *addr, int32 block);
+extern int32 runtime·plan9_semrelease(uint32 *addr, int32 count);
int8 *goos = "plan9";
void
-minit(void)
+runtime·minit(void)
{
}
void
-osinit(void)
+runtime·osinit(void)
{
}
void
-initsig(int32 queue)
+runtime·initsig(int32 queue)
{
}
void
-exit(int32)
+runtime·exit(int32)
{
- exits(nil);
+ runtime·exits(nil);
}
void
-newosproc(M *m, G *g, void *stk, void (*fn)(void))
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
{
USED(m, g, stk, fn);
m->tls[0] = m->id; // so 386 asm can find it
if(0){
- printf("newosproc stk=%p m=%p g=%p fn=%p rfork=%p id=%d/%d ostk=%p\n",
+ runtime·printf("newosproc stk=%p m=%p g=%p fn=%p rfork=%p id=%d/%d ostk=%p\n",
stk, m, g, fn, rfork, m->id, m->tls[0], &m);
}
if (rfork(RFPROC | RFMEM, stk, m, g, fn) < 0 )
- throw("newosproc: rfork failed");
+ runtime·throw("newosproc: rfork failed");
}
// Blocking locks.
// in Plan 9's user-level locks.
void
-lock(Lock *l)
+runtime·lock(Lock *l)
{
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
m->locks++;
if(xadd(&l->key, 1) == 1)
}
void
-unlock(Lock *l)
+runtime·unlock(Lock *l)
{
m->locks--;
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
if(xadd(&l->key, -1) == 0)
return; // changed from 1 -> 0: no contention
- plan9_semrelease(&l->sema, 1);
+ runtime·plan9_semrelease(&l->sema, 1);
}
void
-destroylock(Lock *l)
+runtime·destroylock(Lock *l)
{
// nothing
}
// but when it's time to block, fall back on the kernel semaphore k.
// This is the same algorithm used in Plan 9.
void
-usemacquire(Usema *s)
+runtime·usemacquire(Usema *s)
{
if((int32)xadd(&s->u, -1) < 0)
while(plan9_semacquire(&s->k, 1) < 0) {
}
void
-usemrelease(Usema *s)
+runtime·usemrelease(Usema *s)
{
if((int32)xadd(&s->u, 1) <= 0)
- plan9_semrelease(&s->k, 1);
+ runtime·plan9_semrelease(&s->k, 1);
}
// Event notifications.
void
-noteclear(Note *n)
+runtime·noteclear(Note *n)
{
n->wakeup = 0;
}
void
-notesleep(Note *n)
+runtime·notesleep(Note *n)
{
while(!n->wakeup)
- usemacquire(&n->sema);
+ runtime·usemacquire(&n->sema);
}
void
-notewakeup(Note *n)
+runtime·notewakeup(Note *n)
{
n->wakeup = 1;
- usemrelease(&n->sema);
+ runtime·usemrelease(&n->sema);
}
static void vprintf(int8*, byte*);
void
-dump(byte *p, int32 n)
+runtime·dump(byte *p, int32 n)
{
int32 i;
for(i=0; i<n; i++) {
- ·printpointer((byte*)(p[i]>>4));
- ·printpointer((byte*)(p[i]&0xf));
+ runtime·printpointer((byte*)(p[i]>>4));
+ runtime·printpointer((byte*)(p[i]&0xf));
if((i&15) == 15)
- prints("\n");
+ runtime·prints("\n");
else
- prints(" ");
+ runtime·prints(" ");
}
if(n & 15)
- prints("\n");
+ runtime·prints("\n");
}
void
-prints(int8 *s)
+runtime·prints(int8 *s)
{
- write(fd, s, findnull((byte*)s));
+ runtime·write(runtime·fd, s, runtime·findnull((byte*)s));
}
#pragma textflag 7
void
-printf(int8 *s, ...)
+runtime·printf(int8 *s, ...)
{
byte *arg;
if(*p != '%')
continue;
if(p > lp)
- write(fd, lp, p-lp);
+ runtime·write(runtime·fd, lp, p-lp);
p++;
narg = nil;
switch(*p) {
}
switch(*p) {
case 'a':
- ·printslice(*(Slice*)arg);
+ runtime·printslice(*(Slice*)arg);
break;
case 'd':
- ·printint(*(int32*)arg);
+ runtime·printint(*(int32*)arg);
break;
case 'D':
- ·printint(*(int64*)arg);
+ runtime·printint(*(int64*)arg);
break;
case 'e':
- ·printeface(*(Eface*)arg);
+ runtime·printeface(*(Eface*)arg);
break;
case 'f':
- ·printfloat(*(float64*)arg);
+ runtime·printfloat(*(float64*)arg);
break;
case 'C':
- ·printcomplex(*(Complex128*)arg);
+ runtime·printcomplex(*(Complex128*)arg);
break;
case 'i':
- ·printiface(*(Iface*)arg);
+ runtime·printiface(*(Iface*)arg);
break;
case 'p':
- ·printpointer(*(void**)arg);
+ runtime·printpointer(*(void**)arg);
break;
case 's':
- prints(*(int8**)arg);
+ runtime·prints(*(int8**)arg);
break;
case 'S':
- ·printstring(*(String*)arg);
+ runtime·printstring(*(String*)arg);
break;
case 't':
- ·printbool(*(bool*)arg);
+ runtime·printbool(*(bool*)arg);
break;
case 'U':
- ·printuint(*(uint64*)arg);
+ runtime·printuint(*(uint64*)arg);
break;
case 'x':
- ·printhex(*(uint32*)arg);
+ runtime·printhex(*(uint32*)arg);
break;
case 'X':
- ·printhex(*(uint64*)arg);
+ runtime·printhex(*(uint64*)arg);
break;
- case '!':
- panic(-1);
}
arg = narg;
lp = p+1;
}
if(p > lp)
- write(fd, lp, p-lp);
+ runtime·write(runtime·fd, lp, p-lp);
// unlock(&debuglock);
}
#pragma textflag 7
void
-·printf(String s, ...)
+runtime·goprintf(String s, ...)
{
// Can assume s has terminating NUL because only
- // the Go compiler generates calls to ·printf, using
+ // the Go compiler generates calls to runtime·goprintf, using
// string constants, and all the string constants have NULs.
vprintf((int8*)s.str, (byte*)(&s+1));
}
void
-·printpc(void *p)
+runtime·printpc(void *p)
{
- prints("PC=");
- ·printhex((uint64)·getcallerpc(p));
+ runtime·prints("PC=");
+ runtime·printhex((uint64)runtime·getcallerpc(p));
}
void
-·printbool(bool v)
+runtime·printbool(bool v)
{
if(v) {
- write(fd, (byte*)"true", 4);
+ runtime·write(runtime·fd, (byte*)"true", 4);
return;
}
- write(fd, (byte*)"false", 5);
+ runtime·write(runtime·fd, (byte*)"false", 5);
}
void
-·printfloat(float64 v)
+runtime·printfloat(float64 v)
{
byte buf[20];
int32 e, s, i, n;
float64 h;
- if(isNaN(v)) {
- write(fd, "NaN", 3);
+ if(runtime·isNaN(v)) {
+ runtime·write(runtime·fd, "NaN", 3);
return;
}
- if(isInf(v, 1)) {
- write(fd, "+Inf", 4);
+ if(runtime·isInf(v, 1)) {
+ runtime·write(runtime·fd, "+Inf", 4);
return;
}
- if(isInf(v, -1)) {
- write(fd, "-Inf", 4);
+ if(runtime·isInf(v, -1)) {
+ runtime·write(runtime·fd, "-Inf", 4);
return;
}
buf[n+4] = (e/100) + '0';
buf[n+5] = (e/10)%10 + '0';
buf[n+6] = (e%10) + '0';
- write(fd, buf, n+7);
+ runtime·write(runtime·fd, buf, n+7);
}
void
-·printcomplex(Complex128 v)
+runtime·printcomplex(Complex128 v)
{
- write(fd, "(", 1);
- ·printfloat(v.real);
- ·printfloat(v.imag);
- write(fd, "i)", 2);
+ runtime·write(runtime·fd, "(", 1);
+ runtime·printfloat(v.real);
+ runtime·printfloat(v.imag);
+ runtime·write(runtime·fd, "i)", 2);
}
void
-·printuint(uint64 v)
+runtime·printuint(uint64 v)
{
byte buf[100];
int32 i;
break;
v = v/10;
}
- write(fd, buf+i, nelem(buf)-i);
+ runtime·write(runtime·fd, buf+i, nelem(buf)-i);
}
void
-·printint(int64 v)
+runtime·printint(int64 v)
{
if(v < 0) {
- write(fd, "-", 1);
+ runtime·write(runtime·fd, "-", 1);
v = -v;
}
- ·printuint(v);
+ runtime·printuint(v);
}
void
-·printhex(uint64 v)
+runtime·printhex(uint64 v)
{
static int8 *dig = "0123456789abcdef";
byte buf[100];
buf[--i] = '0';
buf[--i] = 'x';
buf[--i] = '0';
- write(fd, buf+i, nelem(buf)-i);
+ runtime·write(runtime·fd, buf+i, nelem(buf)-i);
}
void
-·printpointer(void *p)
+runtime·printpointer(void *p)
{
- ·printhex((uint64)p);
+ runtime·printhex((uint64)p);
}
void
-·printstring(String v)
+runtime·printstring(String v)
{
- extern int32 maxstring;
+ extern int32 runtime·maxstring;
- if(v.len > maxstring) {
- write(fd, "[invalid string]", 16);
+ if(v.len > runtime·maxstring) {
+ runtime·write(runtime·fd, "[invalid string]", 16);
return;
}
if(v.len > 0)
- write(fd, v.str, v.len);
+ runtime·write(runtime·fd, v.str, v.len);
}
void
-·printsp(void)
+runtime·printsp(void)
{
- write(fd, " ", 1);
+ runtime·write(runtime·fd, " ", 1);
}
void
-·printnl(void)
+runtime·printnl(void)
{
- write(fd, "\n", 1);
+ runtime·write(runtime·fd, "\n", 1);
}
void
-·typestring(Eface e, String s)
+runtime·typestring(Eface e, String s)
{
s = *e.type->string;
FLUSH(&s);
typedef struct Sched Sched;
-M m0;
-G g0; // idle goroutine for m0
+M runtime·m0;
+G runtime·g0; // idle goroutine for m0
static int32 debug = 0;
-int32 gcwaiting;
+int32 runtime·gcwaiting;
// Go scheduler
//
//
// Even a program that can run without deadlock in a single process
// might use more ms if given the chance. For example, the prime
-// sieve will use as many ms as there are primes (up to sched.mmax),
+// sieve will use as many ms as there are primes (up to runtime·sched.mmax),
// allowing different stages of the pipeline to execute in parallel.
// We could revisit this choice, only kicking off new ms for blocking
// system calls, but that would limit the amount of parallel computation
int32 waitstop; // after setting this flag
};
-Sched sched;
+Sched runtime·sched;
// Scheduling helpers. Sched must be locked.
static void gput(G*); // put/get on ghead/gtail
// call osinit
// call schedinit
// make & queue new G
-// call mstart
+// call runtime·mstart
//
// The new G does:
//
// call initdone
// call main·main
void
-schedinit(void)
+runtime·schedinit(void)
{
int32 n;
byte *p;
- allm = m;
+ runtime·allm = m;
m->nomemprof++;
- mallocinit();
- goargs();
+ runtime·mallocinit();
+ runtime·goargs();
// For debugging:
// Allocate internal symbol table representation now,
// so that we don't need to call malloc when we crash.
// findfunc(0);
- sched.gomaxprocs = 1;
- p = getenv("GOMAXPROCS");
- if(p != nil && (n = atoi(p)) != 0)
- sched.gomaxprocs = n;
- sched.mcpumax = sched.gomaxprocs;
- sched.mcount = 1;
- sched.predawn = 1;
+ runtime·sched.gomaxprocs = 1;
+ p = runtime·getenv("GOMAXPROCS");
+ if(p != nil && (n = runtime·atoi(p)) != 0)
+ runtime·sched.gomaxprocs = n;
+ runtime·sched.mcpumax = runtime·sched.gomaxprocs;
+ runtime·sched.mcount = 1;
+ runtime·sched.predawn = 1;
m->nomemprof--;
}
// Called after main·init_function; main·main will be called on return.
void
-initdone(void)
+runtime·initdone(void)
{
// Let's go.
- sched.predawn = 0;
+ runtime·sched.predawn = 0;
mstats.enablegc = 1;
// If main·init_function started other goroutines,
// kick off new ms to handle them, like ready
// would have, had it not been pre-dawn.
- lock(&sched);
+ runtime·lock(&runtime·sched);
matchmg();
- unlock(&sched);
+ runtime·unlock(&runtime·sched);
}
void
-goexit(void)
+runtime·goexit(void)
{
g->status = Gmoribund;
- gosched();
+ runtime·gosched();
}
void
-tracebackothers(G *me)
+runtime·tracebackothers(G *me)
{
G *g;
- for(g = allg; g != nil; g = g->alllink) {
+ for(g = runtime·allg; g != nil; g = g->alllink) {
if(g == me || g->status == Gdead)
continue;
- printf("\ngoroutine %d [%d]:\n", g->goid, g->status);
- traceback(g->sched.pc, g->sched.sp, 0, g);
+ runtime·printf("\ngoroutine %d [%d]:\n", g->goid, g->status);
+ runtime·traceback(g->sched.pc, g->sched.sp, 0, g);
}
}
M *m;
// If g is wired, hand it off directly.
- if(sched.mcpu < sched.mcpumax && (m = g->lockedm) != nil) {
+ if(runtime·sched.mcpu < runtime·sched.mcpumax && (m = g->lockedm) != nil) {
mnextg(m, g);
return;
}
g->schedlink = nil;
- if(sched.ghead == nil)
- sched.ghead = g;
+ if(runtime·sched.ghead == nil)
+ runtime·sched.ghead = g;
else
- sched.gtail->schedlink = g;
- sched.gtail = g;
- sched.gwait++;
+ runtime·sched.gtail->schedlink = g;
+ runtime·sched.gtail = g;
+ runtime·sched.gwait++;
}
// Get from `g' queue. Sched must be locked.
{
G *g;
- g = sched.ghead;
+ g = runtime·sched.ghead;
if(g){
- sched.ghead = g->schedlink;
- if(sched.ghead == nil)
- sched.gtail = nil;
- sched.gwait--;
+ runtime·sched.ghead = g->schedlink;
+ if(runtime·sched.ghead == nil)
+ runtime·sched.gtail = nil;
+ runtime·sched.gwait--;
}
return g;
}
static void
mput(M *m)
{
- m->schedlink = sched.mhead;
- sched.mhead = m;
- sched.mwait++;
+ m->schedlink = runtime·sched.mhead;
+ runtime·sched.mhead = m;
+ runtime·sched.mwait++;
}
// Get an `m' to run `g'. Sched must be locked.
return m;
// otherwise use general m pool.
- if((m = sched.mhead) != nil){
- sched.mhead = m->schedlink;
- sched.mwait--;
+ if((m = runtime·sched.mhead) != nil){
+ runtime·sched.mhead = m->schedlink;
+ runtime·sched.mwait--;
}
return m;
}
// Mark g ready to run.
void
-ready(G *g)
+runtime·ready(G *g)
{
- lock(&sched);
+ runtime·lock(&runtime·sched);
readylocked(g);
- unlock(&sched);
+ runtime·unlock(&runtime·sched);
}
// Mark g ready to run. Sched is already locked.
// Mark runnable.
if(g->status == Grunnable || g->status == Grunning || g->status == Grecovery)
- throw("bad g->status in ready");
+ runtime·throw("bad g->status in ready");
g->status = Grunnable;
gput(g);
- if(!sched.predawn)
+ if(!runtime·sched.predawn)
matchmg();
}
static void
mnextg(M *m, G *g)
{
- sched.mcpu++;
+ runtime·sched.mcpu++;
m->nextg = g;
if(m->waitnextg) {
m->waitnextg = 0;
- notewakeup(&m->havenextg);
+ runtime·notewakeup(&m->havenextg);
}
}
{
G *gp;
- if(sched.mcpu < 0)
- throw("negative sched.mcpu");
+ if(runtime·sched.mcpu < 0)
+ runtime·throw("negative runtime·sched.mcpu");
// If there is a g waiting as m->nextg,
- // mnextg took care of the sched.mcpu++.
+ // mnextg took care of the runtime·sched.mcpu++.
if(m->nextg != nil) {
gp = m->nextg;
m->nextg = nil;
- unlock(&sched);
+ runtime·unlock(&runtime·sched);
return gp;
}
// We can only run one g, and it's not available.
// Make sure some other cpu is running to handle
// the ordinary run queue.
- if(sched.gwait != 0)
+ if(runtime·sched.gwait != 0)
matchmg();
} else {
// Look for work on global queue.
- while(sched.mcpu < sched.mcpumax && (gp=gget()) != nil) {
+ while(runtime·sched.mcpu < runtime·sched.mcpumax && (gp=gget()) != nil) {
if(gp->lockedm) {
mnextg(gp->lockedm, gp);
continue;
}
- sched.mcpu++; // this m will run gp
- unlock(&sched);
+ runtime·sched.mcpu++; // this m will run gp
+ runtime·unlock(&runtime·sched);
return gp;
}
// Otherwise, wait on global m queue.
mput(m);
}
- if(sched.mcpu == 0 && sched.msyscall == 0)
- throw("all goroutines are asleep - deadlock!");
+ if(runtime·sched.mcpu == 0 && runtime·sched.msyscall == 0)
+ runtime·throw("all goroutines are asleep - deadlock!");
m->nextg = nil;
m->waitnextg = 1;
- noteclear(&m->havenextg);
- if(sched.waitstop && sched.mcpu <= sched.mcpumax) {
- sched.waitstop = 0;
- notewakeup(&sched.stopped);
+ runtime·noteclear(&m->havenextg);
+ if(runtime·sched.waitstop && runtime·sched.mcpu <= runtime·sched.mcpumax) {
+ runtime·sched.waitstop = 0;
+ runtime·notewakeup(&runtime·sched.stopped);
}
- unlock(&sched);
+ runtime·unlock(&runtime·sched);
- notesleep(&m->havenextg);
+ runtime·notesleep(&m->havenextg);
if((gp = m->nextg) == nil)
- throw("bad m->nextg in nextgoroutine");
+ runtime·throw("bad m->nextg in nextgoroutine");
m->nextg = nil;
return gp;
}
// TODO(rsc): Remove. This is only temporary,
// for the mark and sweep collector.
void
-stoptheworld(void)
+runtime·stoptheworld(void)
{
- lock(&sched);
- gcwaiting = 1;
- sched.mcpumax = 1;
- while(sched.mcpu > 1) {
+ runtime·lock(&runtime·sched);
+ runtime·gcwaiting = 1;
+ runtime·sched.mcpumax = 1;
+ while(runtime·sched.mcpu > 1) {
// It would be unsafe for multiple threads to be using
// the stopped note at once, but there is only
// ever one thread doing garbage collection,
// so this is okay.
- noteclear(&sched.stopped);
- sched.waitstop = 1;
- unlock(&sched);
- notesleep(&sched.stopped);
- lock(&sched);
+ runtime·noteclear(&runtime·sched.stopped);
+ runtime·sched.waitstop = 1;
+ runtime·unlock(&runtime·sched);
+ runtime·notesleep(&runtime·sched.stopped);
+ runtime·lock(&runtime·sched);
}
- unlock(&sched);
+ runtime·unlock(&runtime·sched);
}
// TODO(rsc): Remove. This is only temporary,
// for the mark and sweep collector.
void
-starttheworld(void)
+runtime·starttheworld(void)
{
- lock(&sched);
- gcwaiting = 0;
- sched.mcpumax = sched.gomaxprocs;
+ runtime·lock(&runtime·sched);
+ runtime·gcwaiting = 0;
+ runtime·sched.mcpumax = runtime·sched.gomaxprocs;
matchmg();
- unlock(&sched);
+ runtime·unlock(&runtime·sched);
}
// Called to start an M.
void
-mstart(void)
+runtime·mstart(void)
{
if(g != m->g0)
- throw("bad mstart");
+ runtime·throw("bad runtime·mstart");
if(m->mcache == nil)
- m->mcache = allocmcache();
- minit();
+ m->mcache = runtime·allocmcache();
+ runtime·minit();
scheduler();
}
if(m->mallocing || m->gcing)
return;
- while(sched.mcpu < sched.mcpumax && (g = gget()) != nil){
+ while(runtime·sched.mcpu < runtime·sched.mcpumax && (g = gget()) != nil){
M *m;
// Find the m that will run g.
if((m = mget(g)) == nil){
- m = malloc(sizeof(M));
- // Add to allm so garbage collector doesn't free m
+ m = runtime·malloc(sizeof(M));
+ // Add to runtime·allm so garbage collector doesn't free m
// when it is just in a register (R14 on amd64).
- m->alllink = allm;
- allm = m;
- m->id = sched.mcount++;
+ m->alllink = runtime·allm;
+ runtime·allm = m;
+ m->id = runtime·sched.mcount++;
if(libcgo_thread_start != nil) {
CgoThreadStart ts;
// pthread_create will make us a stack.
- m->g0 = malg(-1);
+ m->g0 = runtime·malg(-1);
ts.m = m;
ts.g = m->g0;
- ts.fn = mstart;
- runcgo(libcgo_thread_start, &ts);
+ ts.fn = runtime·mstart;
+ runtime·runcgo(libcgo_thread_start, &ts);
} else {
if(Windows)
// windows will layout sched stack on os stack
- m->g0 = malg(-1);
+ m->g0 = runtime·malg(-1);
else
- m->g0 = malg(8192);
- newosproc(m, m->g0, m->g0->stackbase, mstart);
+ m->g0 = runtime·malg(8192);
+ runtime·newosproc(m, m->g0, m->g0->stackbase, runtime·mstart);
}
}
mnextg(m, g);
{
G* gp;
- lock(&sched);
- if(gosave(&m->sched) != 0){
+ runtime·lock(&runtime·sched);
+ if(runtime·gosave(&m->sched) != 0){
gp = m->curg;
if(gp->status == Grecovery) {
// switched to scheduler to get stack unwound.
// each call to deferproc.
// (the pc we're returning to does pop pop
// before it tests the return value.)
- gp->sched.sp = getcallersp(d->sp - 2*sizeof(uintptr));
+ gp->sched.sp = runtime·getcallersp(d->sp - 2*sizeof(uintptr));
gp->sched.pc = d->pc;
gp->status = Grunning;
- free(d);
- gogo(&gp->sched, 1);
+ runtime·free(d);
+ runtime·gogo(&gp->sched, 1);
}
- // Jumped here via gosave/gogo, so didn't
- // execute lock(&sched) above.
- lock(&sched);
+ // Jumped here via runtime·gosave/gogo, so didn't
+ // execute lock(&runtime·sched) above.
+ runtime·lock(&runtime·sched);
- if(sched.predawn)
- throw("init sleeping");
+ if(runtime·sched.predawn)
+ runtime·throw("init sleeping");
// Just finished running gp.
gp->m = nil;
- sched.mcpu--;
+ runtime·sched.mcpu--;
- if(sched.mcpu < 0)
- throw("sched.mcpu < 0 in scheduler");
+ if(runtime·sched.mcpu < 0)
+ runtime·throw("runtime·sched.mcpu < 0 in scheduler");
switch(gp->status){
case Grunnable:
case Gdead:
// Shouldn't have been running!
- throw("bad gp->status in sched");
+ runtime·throw("bad gp->status in sched");
case Grunning:
gp->status = Grunnable;
gput(gp);
}
unwindstack(gp, nil);
gfput(gp);
- if(--sched.gcount == 0)
- exit(0);
+ if(--runtime·sched.gcount == 0)
+ runtime·exit(0);
break;
}
if(gp->readyonstop){
}
}
- // Find (or wait for) g to run. Unlocks sched.
+ // Find (or wait for) g to run. Unlocks runtime·sched.
gp = nextgandunlock();
gp->readyonstop = 0;
gp->status = Grunning;
m->curg = gp;
gp->m = m;
- if(gp->sched.pc == (byte*)goexit) { // kickoff
- gogocall(&gp->sched, (void(*)(void))gp->entry);
+ if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff
+ runtime·gogocall(&gp->sched, (void(*)(void))gp->entry);
}
- gogo(&gp->sched, 1);
+ runtime·gogo(&gp->sched, 1);
}
// Enter scheduler. If g->status is Grunning,
// before running g again. If g->status is Gmoribund,
// kills off g.
void
-gosched(void)
+runtime·gosched(void)
{
if(m->locks != 0)
- throw("gosched holding locks");
+ runtime·throw("gosched holding locks");
if(g == m->g0)
- throw("gosched of g0");
- if(gosave(&g->sched) == 0)
- gogo(&m->sched, 1);
+ runtime·throw("gosched of g0");
+ if(runtime·gosave(&g->sched) == 0)
+ runtime·gogo(&m->sched, 1);
}
// The goroutine g is about to enter a system call.
// Record that it's not using the cpu anymore.
// This is called only from the go syscall library and cgocall,
// not from the low-level system calls used by the runtime.
-// Entersyscall cannot split the stack: the gosave must
+// Entersyscall cannot split the stack: the runtime·gosave must
// make g->sched refer to the caller's stack pointer.
#pragma textflag 7
void
-·entersyscall(void)
+runtime·entersyscall(void)
{
- lock(&sched);
+ runtime·lock(&runtime·sched);
// Leave SP around for gc and traceback.
// Do before notewakeup so that gc
// never sees Gsyscall with wrong stack.
- gosave(&g->sched);
- if(sched.predawn) {
- unlock(&sched);
+ runtime·gosave(&g->sched);
+ if(runtime·sched.predawn) {
+ runtime·unlock(&runtime·sched);
return;
}
g->status = Gsyscall;
- sched.mcpu--;
- sched.msyscall++;
- if(sched.gwait != 0)
+ runtime·sched.mcpu--;
+ runtime·sched.msyscall++;
+ if(runtime·sched.gwait != 0)
matchmg();
- if(sched.waitstop && sched.mcpu <= sched.mcpumax) {
- sched.waitstop = 0;
- notewakeup(&sched.stopped);
+ if(runtime·sched.waitstop && runtime·sched.mcpu <= runtime·sched.mcpumax) {
+ runtime·sched.waitstop = 0;
+ runtime·notewakeup(&runtime·sched.stopped);
}
- unlock(&sched);
+ runtime·unlock(&runtime·sched);
}
// The goroutine g exited its system call.
// This is called only from the go syscall library, not
// from the low-level system calls used by the runtime.
void
-·exitsyscall(void)
+runtime·exitsyscall(void)
{
- lock(&sched);
- if(sched.predawn) {
- unlock(&sched);
+ runtime·lock(&runtime·sched);
+ if(runtime·sched.predawn) {
+ runtime·unlock(&runtime·sched);
return;
}
- sched.msyscall--;
- sched.mcpu++;
+ runtime·sched.msyscall--;
+ runtime·sched.mcpu++;
// Fast path - if there's room for this m, we're done.
- if(sched.mcpu <= sched.mcpumax) {
+ if(runtime·sched.mcpu <= runtime·sched.mcpumax) {
g->status = Grunning;
- unlock(&sched);
+ runtime·unlock(&runtime·sched);
return;
}
// Tell scheduler to put g back on the run queue:
// but keeps the garbage collector from thinking
// that g is running right now, which it's not.
g->readyonstop = 1;
- unlock(&sched);
+ runtime·unlock(&runtime·sched);
// Slow path - all the cpus are taken.
// The scheduler will ready g and put this m to sleep.
// When the scheduler takes g away from m,
- // it will undo the sched.mcpu++ above.
- gosched();
+ // it will undo the runtime·sched.mcpu++ above.
+ runtime·gosched();
}
// Start scheduling g1 again for a cgo callback.
void
-startcgocallback(G* g1)
+runtime·startcgocallback(G* g1)
{
- lock(&sched);
+ runtime·lock(&runtime·sched);
g1->status = Grunning;
- sched.msyscall--;
- sched.mcpu++;
- unlock(&sched);
+ runtime·sched.msyscall--;
+ runtime·sched.mcpu++;
+ runtime·unlock(&runtime·sched);
}
// Stop scheduling g1 after a cgo callback.
void
-endcgocallback(G* g1)
+runtime·endcgocallback(G* g1)
{
- lock(&sched);
+ runtime·lock(&runtime·sched);
g1->status = Gsyscall;
- sched.mcpu--;
- sched.msyscall++;
- unlock(&sched);
+ runtime·sched.mcpu--;
+ runtime·sched.msyscall++;
+ runtime·unlock(&runtime·sched);
}
/*
};
void
-oldstack(void)
+runtime·oldstack(void)
{
Stktop *top, old;
uint32 args;
args = old.args;
if(args > 0) {
sp -= args;
- mcpy(top->fp, sp, args);
+ runtime·mcpy(top->fp, sp, args);
}
goid = old.gobuf.g->goid; // fault if g is bad, before gogo
if(old.free)
- stackfree(g1->stackguard - StackGuard);
+ runtime·stackfree(g1->stackguard - StackGuard);
g1->stackbase = old.stackbase;
g1->stackguard = old.stackguard;
- gogo(&old.gobuf, m->cret);
+ runtime·gogo(&old.gobuf, m->cret);
}
void
-newstack(void)
+runtime·newstack(void)
{
int32 frame, args;
Stktop *top;
g1 = m->curg;
if(m->morebuf.sp < g1->stackguard - StackGuard)
- throw("split stack overflow");
+ runtime·throw("split stack overflow");
if(frame == 1 && args > 0 && m->morebuf.sp - sizeof(Stktop) - args - 32 > g1->stackguard) {
// special case: called from reflect.call (frame == 1)
if(frame < StackBig)
frame = StackBig;
frame += 1024; // room for more functions, Stktop.
- stk = stackalloc(frame);
+ stk = runtime·stackalloc(frame);
top = (Stktop*)(stk+frame-sizeof(*top));
free = true;
}
sp = (byte*)top;
if(args > 0) {
sp -= args;
- mcpy(sp, m->morefp, args);
+ runtime·mcpy(sp, m->morefp, args);
}
// Continue as if lessstack had just called m->morepc
// (the PC that decided to grow the stack).
label.sp = sp;
- label.pc = (byte*)·lessstack;
+ label.pc = (byte*)runtime·lessstack;
label.g = m->curg;
- gogocall(&label, m->morepc);
+ runtime·gogocall(&label, m->morepc);
*(int32*)345 = 123; // never return
}
G*
-malg(int32 stacksize)
+runtime·malg(int32 stacksize)
{
G *g;
byte *stk;
- g = malloc(sizeof(G));
+ g = runtime·malloc(sizeof(G));
if(stacksize >= 0) {
- stk = stackalloc(stacksize + StackGuard);
+ stk = runtime·stackalloc(stacksize + StackGuard);
g->stack0 = stk;
g->stackguard = stk + StackGuard;
g->stackbase = stk + StackGuard + stacksize - sizeof(Stktop);
- runtime_memclr(g->stackbase, sizeof(Stktop));
+ runtime·memclr(g->stackbase, sizeof(Stktop));
}
return g;
}
*/
#pragma textflag 7
void
-·newproc(int32 siz, byte* fn, ...)
+runtime·newproc(int32 siz, byte* fn, ...)
{
- newproc1(fn, (byte*)(&fn+1), siz, 0);
+ runtime·newproc1(fn, (byte*)(&fn+1), siz, 0);
}
G*
-newproc1(byte *fn, byte *argp, int32 narg, int32 nret)
+runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret)
{
byte *sp;
G *newg;
siz = narg + nret;
siz = (siz+7) & ~7;
if(siz > 1024)
- throw("runtime.newproc: too many args");
+ runtime·throw("runtime.newproc: too many args");
- lock(&sched);
+ runtime·lock(&runtime·sched);
if((newg = gfget()) != nil){
newg->status = Gwaiting;
if(newg->stackguard - StackGuard != newg->stack0)
- throw("invalid stack in newg");
+ runtime·throw("invalid stack in newg");
} else {
- newg = malg(4096);
+ newg = runtime·malg(4096);
newg->status = Gwaiting;
- newg->alllink = allg;
- allg = newg;
+ newg->alllink = runtime·allg;
+ runtime·allg = newg;
}
sp = newg->stackbase;
sp -= siz;
- mcpy(sp, argp, narg);
+ runtime·mcpy(sp, argp, narg);
newg->sched.sp = sp;
- newg->sched.pc = (byte*)goexit;
+ newg->sched.pc = (byte*)runtime·goexit;
newg->sched.g = newg;
newg->entry = fn;
- sched.gcount++;
- goidgen++;
- newg->goid = goidgen;
+ runtime·sched.gcount++;
+ runtime·goidgen++;
+ newg->goid = runtime·goidgen;
newprocreadylocked(newg);
- unlock(&sched);
+ runtime·unlock(&runtime·sched);
return newg;
//printf(" goid=%d\n", newg->goid);
#pragma textflag 7
uintptr
-·deferproc(int32 siz, byte* fn, ...)
+runtime·deferproc(int32 siz, byte* fn, ...)
{
Defer *d;
- d = malloc(sizeof(*d) + siz - sizeof(d->args));
+ d = runtime·malloc(sizeof(*d) + siz - sizeof(d->args));
d->fn = fn;
d->sp = (byte*)(&fn+1);
d->siz = siz;
- d->pc = ·getcallerpc(&siz);
- mcpy(d->args, d->sp, d->siz);
+ d->pc = runtime·getcallerpc(&siz);
+ runtime·mcpy(d->args, d->sp, d->siz);
d->link = g->defer;
g->defer = d;
#pragma textflag 7
void
-·deferreturn(uintptr arg0)
+runtime·deferreturn(uintptr arg0)
{
Defer *d;
byte *sp, *fn;
d = g->defer;
if(d == nil)
return;
- sp = getcallersp(&arg0);
+ sp = runtime·getcallersp(&arg0);
if(d->sp != sp)
return;
- mcpy(d->sp, d->args, d->siz);
+ runtime·mcpy(d->sp, d->args, d->siz);
g->defer = d->link;
fn = d->fn;
- free(d);
- jmpdefer(fn, sp);
+ runtime·free(d);
+ runtime·jmpdefer(fn, sp);
}
static void
while((d = g->defer) != nil) {
g->defer = d->link;
reflect·call(d->fn, d->args, d->siz);
- free(d);
+ runtime·free(d);
}
}
// Must be called from a different goroutine, usually m->g0.
if(g == gp)
- throw("unwindstack on self");
+ runtime·throw("unwindstack on self");
while((top = (Stktop*)gp->stackbase) != nil && top->stackbase != nil) {
stk = gp->stackguard - StackGuard;
gp->stackbase = top->stackbase;
gp->stackguard = top->stackguard;
if(top->free)
- stackfree(stk);
+ runtime·stackfree(stk);
}
if(sp != nil && (sp < gp->stackguard - StackGuard || gp->stackbase < sp)) {
- printf("recover: %p not in [%p, %p]\n", sp, gp->stackguard - StackGuard, gp->stackbase);
- throw("bad unwindstack");
+ runtime·printf("recover: %p not in [%p, %p]\n", sp, gp->stackguard - StackGuard, gp->stackbase);
+ runtime·throw("bad unwindstack");
}
}
{
if(p->link) {
printpanics(p->link);
- printf("\t");
+ runtime·printf("\t");
}
- printf("panic: ");
- ·printany(p->arg);
+ runtime·printf("panic: ");
+ runtime·printany(p->arg);
if(p->recovered)
- printf(" [recovered]");
- printf("\n");
+ runtime·printf(" [recovered]");
+ runtime·printf("\n");
}
void
-·panic(Eface e)
+runtime·panic(Eface e)
{
Defer *d;
Panic *p;
- p = mal(sizeof *p);
+ p = runtime·mal(sizeof *p);
p->arg = e;
p->link = g->panic;
p->stackbase = g->stackbase;
reflect·call(d->fn, d->args, d->siz);
if(p->recovered) {
g->panic = p->link;
- free(p);
+ runtime·free(p);
// put recovering defer back on list
// for scheduler to find.
d->link = g->defer;
g->defer = d;
g->status = Grecovery;
- gosched();
- throw("recovery failed"); // gosched should not return
+ runtime·gosched();
+ runtime·throw("recovery failed"); // gosched should not return
}
- free(d);
+ runtime·free(d);
}
// ran out of deferred calls - old-school panic now
- fd = 2;
+ runtime·fd = 2;
printpanics(g->panic);
- panic(0);
+ runtime·dopanic(0);
}
#pragma textflag 7 /* no split, or else g->stackguard is not the stack for fp */
void
-·recover(byte *fp, Eface ret)
+runtime·recover(byte *fp, Eface ret)
{
Stktop *top, *oldtop;
Panic *p;
- fp = getcallersp(fp);
+ fp = runtime·getcallersp(fp);
// Must be a panic going on.
if((p = g->panic) == nil || p->recovered)
gfput(G *g)
{
if(g->stackguard - StackGuard != g->stack0)
- throw("invalid stack in gfput");
- g->schedlink = sched.gfree;
- sched.gfree = g;
+ runtime·throw("invalid stack in gfput");
+ g->schedlink = runtime·sched.gfree;
+ runtime·sched.gfree = g;
}
// Get from gfree list. Sched must be locked.
{
G *g;
- g = sched.gfree;
+ g = runtime·sched.gfree;
if(g)
- sched.gfree = g->schedlink;
+ runtime·sched.gfree = g->schedlink;
return g;
}
void
-·Breakpoint(void)
+runtime·Breakpoint(void)
{
- breakpoint();
+ runtime·breakpoint();
}
void
-·Goexit(void)
+runtime·Goexit(void)
{
rundefer();
- goexit();
+ runtime·goexit();
}
void
-·Gosched(void)
+runtime·Gosched(void)
{
- gosched();
+ runtime·gosched();
}
void
-·LockOSThread(void)
+runtime·LockOSThread(void)
{
- if(sched.predawn)
- throw("cannot wire during init");
+ if(runtime·sched.predawn)
+ runtime·throw("cannot wire during init");
m->lockedg = g;
g->lockedm = m;
}
// delete when scheduler is stronger
int32
-gomaxprocsfunc(int32 n)
+runtime·gomaxprocsfunc(int32 n)
{
int32 ret;
- lock(&sched);
- ret = sched.gomaxprocs;
+ runtime·lock(&runtime·sched);
+ ret = runtime·sched.gomaxprocs;
if (n <= 0)
n = ret;
- sched.gomaxprocs = n;
- sched.mcpumax = n;
+ runtime·sched.gomaxprocs = n;
+ runtime·sched.mcpumax = n;
// handle fewer procs?
- if(sched.mcpu > sched.mcpumax) {
- unlock(&sched);
+ if(runtime·sched.mcpu > runtime·sched.mcpumax) {
+ runtime·unlock(&runtime·sched);
// just give up the cpu.
// we'll only get rescheduled once the
// number has come down.
- gosched();
+ runtime·gosched();
return ret;
}
// handle more procs
matchmg();
- unlock(&sched);
+ runtime·unlock(&runtime·sched);
return ret;
}
void
-·UnlockOSThread(void)
+runtime·UnlockOSThread(void)
{
m->lockedg = nil;
g->lockedm = nil;
// for testing of wire, unwire
void
-·mid(uint32 ret)
+runtime·mid(uint32 ret)
{
ret = m->id;
FLUSH(&ret);
*/
func mapaccess(map *byte, key *byte, val *byte) (pres bool) {
- mapaccess((Hmap*)map, key, val, &pres);
+ runtime·mapaccess((Hmap*)map, key, val, &pres);
}
func mapassign(map *byte, key *byte, val *byte) {
- mapassign((Hmap*)map, key, val);
+ runtime·mapassign((Hmap*)map, key, val);
}
func maplen(map *byte) (len int32) {
}
func mapiterinit(map *byte) (it *byte) {
- it = (byte*)mapiterinit((Hmap*)map);
+ it = (byte*)runtime·newmapiterinit((Hmap*)map);
}
func mapiternext(it *byte) {
- mapiternext((struct hash_iter*)it);
+ runtime·mapiternext((struct hash_iter*)it);
}
func mapiterkey(it *byte, key *byte) (ok bool) {
- ok = mapiterkey((struct hash_iter*)it, key);
+ ok = runtime·mapiterkey((struct hash_iter*)it, key);
}
func makemap(typ *byte) (map *byte) {
MapType *t;
t = (MapType*)gettype(typ);
- map = (byte*)makemap(t->key, t->elem, 0);
+ map = (byte*)runtime·makemap_c(t->key, t->elem, 0);
}
/*
// in front of the raw ChanType. the -2 below backs up
// to the interface value header.
t = (ChanType*)gettype(typ);
- ch = (byte*)makechan(t->elem, size);
+ ch = (byte*)runtime·makechan_c(t->elem, size);
}
func chansend(ch *byte, val *byte, pres *bool) {
- chansend((Hchan*)ch, val, pres);
+ runtime·chansend((Hchan*)ch, val, pres);
}
func chanrecv(ch *byte, val *byte, pres *bool) {
- chanrecv((Hchan*)ch, val, pres);
+ runtime·chanrecv((Hchan*)ch, val, pres);
}
func chanclose(ch *byte) {
- chanclose((Hchan*)ch);
+ runtime·chanclose((Hchan*)ch);
}
func chanclosed(ch *byte) (r bool) {
- r = chanclosed((Hchan*)ch);
+ r = runtime·chanclosed((Hchan*)ch);
}
func chanlen(ch *byte) (r int32) {
- r = chanlen((Hchan*)ch);
+ r = runtime·chanlen((Hchan*)ch);
}
func chancap(ch *byte) (r int32) {
- r = chancap((Hchan*)ch);
+ r = runtime·chancap((Hchan*)ch);
}
((Iface*)ret)->data = nil;
return;
}
- ifaceE2I((InterfaceType*)gettype(typ), *(Eface*)x, (Iface*)ret);
+ runtime·ifaceE2I((InterfaceType*)gettype(typ), *(Eface*)x, (Iface*)ret);
}
Rune1 = (1<<(Bit1+0*Bitx))-1, /* 0000 0000 0111 1111 */
Rune2 = (1<<(Bit2+1*Bitx))-1, /* 0000 0111 1111 1111 */
Rune3 = (1<<(Bit3+2*Bitx))-1, /* 1111 1111 1111 1111 */
- Rune4 = (1<<(Bit4+3*Bitx))-1,
- /* 0001 1111 1111 1111 1111 1111 */
+ Rune4 = (1<<(Bit4+3*Bitx))-1, /* 0001 1111 1111 1111 1111 1111 */
Maskx = (1<<Bitx)-1, /* 0011 1111 */
Testx = Maskx ^ 0xFF, /* 1100 0000 */
* reasons, we return 1 instead of 0.
*/
int32
-charntorune(int32 *rune, uint8 *str, int32 length)
+runtime·charntorune(int32 *rune, uint8 *str, int32 length)
{
int32 c, c1, c2, c3, l;
}
int32
-runetochar(byte *str, int32 rune) /* note: in original, arg2 was pointer */
+runtime·runetochar(byte *str, int32 rune) /* note: in original, arg2 was pointer */
{
/* Runes are signed, so convert to unsigned for range check. */
uint32 c;
#include "runtime.h"
-int32 panicking = 0;
-int32 maxround = sizeof(uintptr);
-int32 fd = 1;
+enum {
+ maxround = sizeof(uintptr),
+};
+
+int32 runtime·panicking = 0;
+int32 runtime·fd = 1;
int32
-gotraceback(void)
+runtime·gotraceback(void)
{
byte *p;
- p = getenv("GOTRACEBACK");
+ p = runtime·getenv("GOTRACEBACK");
if(p == nil || p[0] == '\0')
return 1; // default is on
- return atoi(p);
+ return runtime·atoi(p);
}
void
-panic(int32 unused)
+runtime·dopanic(int32 unused)
{
- fd = 2;
- if(panicking) {
- printf("double panic\n");
- exit(3);
+ runtime·fd = 2;
+ if(runtime·panicking) {
+ runtime·printf("double panic\n");
+ runtime·exit(3);
}
- panicking++;
+ runtime·panicking++;
- printf("\npanic PC=%X\n", (uint64)(uintptr)&unused);
- if(gotraceback()){
- traceback(·getcallerpc(&unused), getcallersp(&unused), 0, g);
- tracebackothers(g);
+ runtime·printf("\npanic PC=%X\n", (uint64)(uintptr)&unused);
+ if(runtime·gotraceback()){
+ runtime·traceback(runtime·getcallerpc(&unused), runtime·getcallersp(&unused), 0, g);
+ runtime·tracebackothers(g);
}
- breakpoint(); // so we can grab it in a debugger
- exit(2);
+ runtime·breakpoint(); // so we can grab it in a debugger
+ runtime·exit(2);
}
void
-·panicindex(void)
+runtime·panicindex(void)
{
- panicstring("index out of range");
+ runtime·panicstring("index out of range");
}
void
-·panicslice(void)
+runtime·panicslice(void)
{
- panicstring("slice bounds out of range");
+ runtime·panicstring("slice bounds out of range");
}
void
-·throwreturn(void)
+runtime·throwreturn(void)
{
// can only happen if compiler is broken
- throw("no return at end of a typed function - compiler is broken");
+ runtime·throw("no return at end of a typed function - compiler is broken");
}
void
-·throwinit(void)
+runtime·throwinit(void)
{
// can only happen with linker skew
- throw("recursive call during initialization - linker skew");
+ runtime·throw("recursive call during initialization - linker skew");
}
void
-throw(int8 *s)
+runtime·throw(int8 *s)
{
- fd = 2;
- printf("throw: %s\n", s);
- panic(-1);
+ runtime·fd = 2;
+ runtime·printf("throw: %s\n", s);
+ runtime·dopanic(0);
*(int32*)0 = 0; // not reached
- exit(1); // even more not reached
+ runtime·exit(1); // even more not reached
}
void
-panicstring(int8 *s)
+runtime·panicstring(int8 *s)
{
Eface err;
- ·newErrorString(gostringnocopy((byte*)s), &err);
- ·panic(err);
+ runtime·newErrorString(runtime·gostringnocopy((byte*)s), &err);
+ runtime·panic(err);
}
void
-mcpy(byte *t, byte *f, uint32 n)
+runtime·mcpy(byte *t, byte *f, uint32 n)
{
while(n > 0) {
*t = *f;
}
int32
-mcmp(byte *s1, byte *s2, uint32 n)
+runtime·mcmp(byte *s1, byte *s2, uint32 n)
{
uint32 i;
byte c1, c2;
byte*
-mchr(byte *p, byte c, byte *ep)
+runtime·mchr(byte *p, byte c, byte *ep)
{
for(; p < ep; p++)
if(*p == c)
}
uint32
-rnd(uint32 n, uint32 m)
+runtime·rnd(uint32 n, uint32 m)
{
uint32 r;
Slice os·Envs;
void
-args(int32 c, uint8 **v)
+runtime·args(int32 c, uint8 **v)
{
argc = c;
argv = v;
}
-int32 isplan9;
+int32 runtime·isplan9;
void
-goargs(void)
+runtime·goargs(void)
{
String *gargv;
String *genvv;
int32 i, envc;
- if(isplan9)
+ if(runtime·isplan9)
envc=0;
else
for(envc=0; argv[argc+1+envc] != 0; envc++)
;
- gargv = malloc(argc*sizeof gargv[0]);
- genvv = malloc(envc*sizeof genvv[0]);
+ gargv = runtime·malloc(argc*sizeof gargv[0]);
+ genvv = runtime·malloc(envc*sizeof genvv[0]);
for(i=0; i<argc; i++)
- gargv[i] = gostringnocopy(argv[i]);
+ gargv[i] = runtime·gostringnocopy(argv[i]);
os·Args.array = (byte*)gargv;
os·Args.len = argc;
os·Args.cap = argc;
for(i=0; i<envc; i++)
- genvv[i] = gostringnocopy(argv[argc+1+i]);
+ genvv[i] = runtime·gostringnocopy(argv[argc+1+i]);
os·Envs.array = (byte*)genvv;
os·Envs.len = envc;
os·Envs.cap = envc;
// Atomic add and return new value.
uint32
-xadd(uint32 volatile *val, int32 delta)
+runtime·xadd(uint32 volatile *val, int32 delta)
{
uint32 oval, nval;
for(;;){
oval = *val;
nval = oval + delta;
- if(cas(val, oval, nval))
+ if(runtime·cas(val, oval, nval))
return nval;
}
}
byte*
-getenv(int8 *s)
+runtime·getenv(int8 *s)
{
int32 i, j, len;
byte *v, *bs;
int32 envc;
bs = (byte*)s;
- len = findnull(bs);
+ len = runtime·findnull(bs);
envv = (String*)os·Envs.array;
envc = os·Envs.len;
for(i=0; i<envc; i++){
}
void
-·getgoroot(String out)
+runtime·getgoroot(String out)
{
byte *p;
- p = getenv("GOROOT");
- out = gostringnocopy(p);
+ p = runtime·getenv("GOROOT");
+ out = runtime·gostringnocopy(p);
FLUSH(&out);
}
int32
-atoi(byte *p)
+runtime·atoi(byte *p)
{
int32 n;
}
void
-check(void)
+runtime·check(void)
{
int8 a;
uint8 b;
void* k;
uint16* l;
- if(sizeof(a) != 1) throw("bad a");
- if(sizeof(b) != 1) throw("bad b");
- if(sizeof(c) != 2) throw("bad c");
- if(sizeof(d) != 2) throw("bad d");
- if(sizeof(e) != 4) throw("bad e");
- if(sizeof(f) != 4) throw("bad f");
- if(sizeof(g) != 8) throw("bad g");
- if(sizeof(h) != 8) throw("bad h");
- if(sizeof(i) != 4) throw("bad i");
- if(sizeof(j) != 8) throw("bad j");
- if(sizeof(k) != sizeof(uintptr)) throw("bad k");
- if(sizeof(l) != sizeof(uintptr)) throw("bad l");
+ if(sizeof(a) != 1) runtime·throw("bad a");
+ if(sizeof(b) != 1) runtime·throw("bad b");
+ if(sizeof(c) != 2) runtime·throw("bad c");
+ if(sizeof(d) != 2) runtime·throw("bad d");
+ if(sizeof(e) != 4) runtime·throw("bad e");
+ if(sizeof(f) != 4) runtime·throw("bad f");
+ if(sizeof(g) != 8) runtime·throw("bad g");
+ if(sizeof(h) != 8) runtime·throw("bad h");
+ if(sizeof(i) != 4) runtime·throw("bad i");
+ if(sizeof(j) != 8) runtime·throw("bad j");
+ if(sizeof(k) != sizeof(uintptr)) runtime·throw("bad k");
+ if(sizeof(l) != sizeof(uintptr)) runtime·throw("bad l");
// prints(1"check ok\n");
uint32 z;
z = 1;
- if(!cas(&z, 1, 2))
- throw("cas1");
+ if(!runtime·cas(&z, 1, 2))
+ runtime·throw("cas1");
if(z != 2)
- throw("cas2");
+ runtime·throw("cas2");
z = 4;
- if(cas(&z, 5, 6))
- throw("cas3");
+ if(runtime·cas(&z, 5, 6))
+ runtime·throw("cas3");
if(z != 4)
- throw("cas4");
+ runtime·throw("cas4");
- initsig(0);
+ runtime·initsig(0);
}
/*
memequal(uint32 s, void *a, void *b)
{
byte *ba, *bb, *aend;
- uint32 i;
ba = a;
bb = b;
v = *(uint64*)a;
break;
}
- ·printint(v);
+ runtime·printint(v);
}
static void
strprint(uint32 s, String *a)
{
USED(s);
- ·printstring(*a);
+ runtime·printstring(*a);
}
static uintptr
interhash(uint32 s, Iface *a)
{
USED(s);
- return ifacehash(*a);
+ return runtime·ifacehash(*a);
}
static void
interprint(uint32 s, Iface *a)
{
USED(s);
- ·printiface(*a);
+ runtime·printiface(*a);
}
static uint32
interequal(uint32 s, Iface *a, Iface *b)
{
USED(s);
- return ifaceeq(*a, *b);
+ return runtime·ifaceeq_c(*a, *b);
}
static uintptr
nilinterhash(uint32 s, Eface *a)
{
USED(s);
- return efacehash(*a);
+ return runtime·efacehash(*a);
}
static void
nilinterprint(uint32 s, Eface *a)
{
USED(s);
- ·printeface(*a);
+ runtime·printeface(*a);
}
static uint32
nilinterequal(uint32 s, Eface *a, Eface *b)
{
USED(s);
- return efaceeq(*a, *b);
+ return runtime·efaceeq_c(*a, *b);
}
uintptr
-nohash(uint32 s, void *a)
+runtime·nohash(uint32 s, void *a)
{
USED(s);
USED(a);
- panicstring("hash of unhashable type");
+ runtime·panicstring("hash of unhashable type");
return 0;
}
uint32
-noequal(uint32 s, void *a, void *b)
+runtime·noequal(uint32 s, void *a, void *b)
{
USED(s);
USED(a);
USED(b);
- panicstring("comparing uncomparable types");
+ runtime·panicstring("comparing uncomparable types");
return 0;
}
Alg
-algarray[] =
+runtime·algarray[] =
{
[AMEM] { memhash, memequal, memprint, memcopy },
-[ANOEQ] { nohash, noequal, memprint, memcopy },
+[ANOEQ] { runtime·nohash, runtime·noequal, memprint, memcopy },
[ASTRING] { strhash, strequal, strprint, memcopy },
[AINTER] { interhash, interequal, interprint, memcopy },
[ANILINTER] { nilinterhash, nilinterequal, nilinterprint, memcopy },
[AMEMWORD] { memhash, memwordequal, memprint, memwordcopy },
};
-#pragma textflag 7
-void
-FLUSH(void *v)
-{
- USED(v);
-}
-
int64
-nanotime(void)
+runtime·nanotime(void)
{
int64 sec;
int32 usec;
sec = 0;
usec = 0;
- gettime(&sec, &usec);
+ runtime·gettime(&sec, &usec);
return sec*1000000000 + (int64)usec*1000;
}
void
-·Caller(int32 skip, uintptr retpc, String retfile, int32 retline, bool retbool)
+runtime·Caller(int32 skip, uintptr retpc, String retfile, int32 retline, bool retbool)
{
Func *f;
- if(callers(1+skip, &retpc, 1) == 0 || (f = findfunc(retpc-1)) == nil) {
- retfile = emptystring;
+ if(runtime·callers(1+skip, &retpc, 1) == 0 || (f = runtime·findfunc(retpc-1)) == nil) {
+ retfile = runtime·emptystring;
retline = 0;
retbool = false;
} else {
retfile = f->src;
- retline = funcline(f, retpc-1);
+ retline = runtime·funcline(f, retpc-1);
retbool = true;
}
FLUSH(&retfile);
}
void
-·Callers(int32 skip, Slice pc, int32 retn)
+runtime·Callers(int32 skip, Slice pc, int32 retn)
{
- retn = callers(skip, (uintptr*)pc.array, pc.len);
+ retn = runtime·callers(skip, (uintptr*)pc.array, pc.len);
FLUSH(&retn);
}
void
-·FuncForPC(uintptr pc, void *retf)
+runtime·FuncForPC(uintptr pc, void *retf)
{
- retf = findfunc(pc);
+ retf = runtime·findfunc(pc);
FLUSH(&retf);
}
/*
* external data
*/
-extern Alg algarray[Amax];
-extern String emptystring;
-G* allg;
-M* allm;
-int32 goidgen;
-extern int32 gomaxprocs;
-extern int32 panicking;
-extern int32 maxround;
-extern int32 fd; // usually 1; set to 2 when panicking
-extern int32 gcwaiting; // gc is waiting to run
-int8* goos;
+extern Alg runtime·algarray[Amax];
+extern String runtime·emptystring;
+G* runtime·allg;
+M* runtime·allm;
+int32 runtime·goidgen;
+extern int32 runtime·gomaxprocs;
+extern int32 runtime·panicking;
+extern int32 runtime·fd; // usually 1; set to 2 when panicking
+extern int32 runtime·gcwaiting; // gc is waiting to run
+int8* runtime·goos;
/*
* common functions and data
*/
-int32 strcmp(byte*, byte*);
-int32 findnull(byte*);
-int32 findnullw(uint16*);
-void dump(byte*, int32);
-int32 runetochar(byte*, int32);
-int32 charntorune(int32*, uint8*, int32);
+int32 runtime·strcmp(byte*, byte*);
+int32 runtime·findnull(byte*);
+int32 runtime·findnullw(uint16*);
+void runtime·dump(byte*, int32);
+int32 runtime·runetochar(byte*, int32);
+int32 runtime·charntorune(int32*, uint8*, int32);
/*
* very low level c-called
*/
-void gogo(Gobuf*, uintptr);
-void gogocall(Gobuf*, void(*)(void));
-uintptr gosave(Gobuf*);
-void ·lessstack(void);
-void goargs(void);
-void FLUSH(void*);
-void* getu(void);
-void throw(int8*);
-void panicstring(int8*);
-uint32 rnd(uint32, uint32);
-void prints(int8*);
-void printf(int8*, ...);
-byte* mchr(byte*, byte, byte*);
-void mcpy(byte*, byte*, uint32);
-int32 mcmp(byte*, byte*, uint32);
-void memmove(void*, void*, uint32);
-void* mal(uintptr);
-uint32 cmpstring(String, String);
-String catstring(String, String);
-String concatstring(int32, String*);
-String gostring(byte*);
-String gostringn(byte*, int32);
-String gostringnocopy(byte*);
-String gostringw(uint16*);
-void initsig(int32);
-int32 gotraceback(void);
-void traceback(uint8 *pc, uint8 *sp, uint8 *lr, G* gp);
-void tracebackothers(G*);
-int32 open(byte*, int32, ...);
-int32 write(int32, void*, int32);
-bool cas(uint32*, uint32, uint32);
-bool casp(void**, void*, void*);
-uint32 xadd(uint32 volatile*, int32);
-void jmpdefer(byte*, void*);
-void exit1(int32);
-void ready(G*);
-byte* getenv(int8*);
-int32 atoi(byte*);
-void newosproc(M *m, G *g, void *stk, void (*fn)(void));
-void signalstack(byte*, int32);
-G* malg(int32);
-void minit(void);
-Func* findfunc(uintptr);
-int32 funcline(Func*, uint64);
-void* stackalloc(uint32);
-void stackfree(void*);
-MCache* allocmcache(void);
-void mallocinit(void);
-bool ifaceeq(Iface, Iface);
-bool efaceeq(Eface, Eface);
-uintptr ifacehash(Iface);
-uintptr efacehash(Eface);
-uintptr nohash(uint32, void*);
-uint32 noequal(uint32, void*, void*);
-void* malloc(uintptr size);
-void free(void *v);
-void addfinalizer(void*, void(*fn)(void*), int32);
-void walkfintab(void (*fn)(void*));
-void runpanic(Panic*);
-void* getcallersp(void*);
-
-void exit(int32);
-void breakpoint(void);
-void gosched(void);
-void goexit(void);
-void runcgo(void (*fn)(void*), void*);
-void runcgocallback(G*, void*, void (*fn)());
-void ·entersyscall(void);
-void ·exitsyscall(void);
-void startcgocallback(G*);
-void endcgocallback(G*);
-G* newproc1(byte*, byte*, int32, int32);
-void siginit(void);
-bool sigsend(int32 sig);
-void gettime(int64*, int32*);
-int32 callers(int32, uintptr*, int32);
-int64 nanotime(void);
-void panic(int32);
-
-#pragma varargck argpos printf 1
-
+#define FLUSH(x) USED(x)
+
+void runtime·gogo(Gobuf*, uintptr);
+void runtime·gogocall(Gobuf*, void(*)(void));
+uintptr runtime·gosave(Gobuf*);
+void runtime·lessstack(void);
+void runtime·goargs(void);
+void* runtime·getu(void);
+void runtime·throw(int8*);
+void runtime·panicstring(int8*);
+uint32 runtime·rnd(uint32, uint32);
+void runtime·prints(int8*);
+void runtime·printf(int8*, ...);
+byte* runtime·mchr(byte*, byte, byte*);
+void runtime·mcpy(byte*, byte*, uint32);
+int32 runtime·mcmp(byte*, byte*, uint32);
+void runtime·memmove(void*, void*, uint32);
+void* runtime·mal(uintptr);
+String runtime·catstring(String, String);
+String runtime·gostring(byte*);
+String runtime·gostringn(byte*, int32);
+String runtime·gostringnocopy(byte*);
+String runtime·gostringw(uint16*);
+void runtime·initsig(int32);
+int32 runtime·gotraceback(void);
+void runtime·traceback(uint8 *pc, uint8 *sp, uint8 *lr, G* gp);
+void runtime·tracebackothers(G*);
+int32 runtime·write(int32, void*, int32);
+bool runtime·cas(uint32*, uint32, uint32);
+bool runtime·casp(void**, void*, void*);
+uint32 runtime·xadd(uint32 volatile*, int32);
+void runtime·jmpdefer(byte*, void*);
+void runtime·exit1(int32);
+void runtime·ready(G*);
+byte* runtime·getenv(int8*);
+int32 runtime·atoi(byte*);
+void runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void));
+void runtime·signalstack(byte*, int32);
+G* runtime·malg(int32);
+void runtime·minit(void);
+Func* runtime·findfunc(uintptr);
+int32 runtime·funcline(Func*, uint64);
+void* runtime·stackalloc(uint32);
+void runtime·stackfree(void*);
+MCache* runtime·allocmcache(void);
+void runtime·mallocinit(void);
+bool runtime·ifaceeq_c(Iface, Iface);
+bool runtime·efaceeq_c(Eface, Eface);
+uintptr runtime·ifacehash(Iface);
+uintptr runtime·efacehash(Eface);
+uintptr runtime·nohash(uint32, void*);
+uint32 runtime·noequal(uint32, void*, void*);
+void* runtime·malloc(uintptr size);
+void runtime·free(void *v);
+void runtime·addfinalizer(void*, void(*fn)(void*), int32);
+void runtime·walkfintab(void (*fn)(void*));
+void runtime·runpanic(Panic*);
+void* runtime·getcallersp(void*);
+
+void runtime·exit(int32);
+void runtime·breakpoint(void);
+void runtime·gosched(void);
+void runtime·goexit(void);
+void runtime·runcgo(void (*fn)(void*), void*);
+void runtime·runcgocallback(G*, void*, void (*fn)());
+void runtime·entersyscall(void);
+void runtime·exitsyscall(void);
+void runtime·startcgocallback(G*);
+void runtime·endcgocallback(G*);
+G* runtime·newproc1(byte*, byte*, int32, int32);
+void runtime·siginit(void);
+bool runtime·sigsend(int32 sig);
+void runtime·gettime(int64*, int32*);
+int32 runtime·callers(int32, uintptr*, int32);
+int64 runtime·nanotime(void);
+void runtime·dopanic(int32);
+
+#pragma varargck argpos runtime·printf 1
#pragma varargck type "d" int32
#pragma varargck type "d" uint32
#pragma varargck type "D" int64
// TODO(rsc): Remove. These are only temporary,
// for the mark and sweep collector.
-void stoptheworld(void);
-void starttheworld(void);
+void runtime·stoptheworld(void);
+void runtime·starttheworld(void);
/*
* mutual exclusion locks. in the uncontended case,
* but on the contention path they sleep in the kernel.
* a zeroed Lock is unlocked (no need to initialize each lock).
*/
-void lock(Lock*);
-void unlock(Lock*);
-void destroylock(Lock*);
+void runtime·lock(Lock*);
+void runtime·unlock(Lock*);
+void runtime·destroylock(Lock*);
/*
* sleep and wakeup on one-time events.
* once notewakeup has been called, all the notesleeps
* will return. future notesleeps will return immediately.
*/
-void noteclear(Note*);
-void notesleep(Note*);
-void notewakeup(Note*);
-
-/*
- * Redefine methods for the benefit of gcc, which does not support
- * UTF-8 characters in identifiers.
- */
-#ifndef __GNUC__
-#define runtime_memclr ·memclr
-#define runtime_getcallerpc ·getcallerpc
-#define runtime_mmap ·mmap
-#define runtime_munmap ·munmap
-#define runtime_printslice ·printslice
-#define runtime_printbool ·printbool
-#define runtime_printfloat ·printfloat
-#define runtime_printhex ·printhex
-#define runtime_printint ·printint
-#define runtime_printiface ·printiface
-#define runtime_printeface ·printeface
-#define runtime_printpc ·printpc
-#define runtime_printpointer ·printpointer
-#define runtime_printstring ·printstring
-#define runtime_printuint ·printuint
-#define runtime_printcomplex ·printcomplex
-#define runtime_setcallerpc ·setcallerpc
-#endif
+void runtime·noteclear(Note*);
+void runtime·notesleep(Note*);
+void runtime·notewakeup(Note*);
/*
* This is consistent across Linux and BSD.
/*
* low level go-called
*/
-uint8* runtime_mmap(byte*, uintptr, int32, int32, int32, uint32);
-void runtime_munmap(uint8*, uintptr);
-void runtime_memclr(byte*, uint32);
-void runtime_setcallerpc(void*, void*);
-void* runtime_getcallerpc(void*);
+uint8* runtime·mmap(byte*, uintptr, int32, int32, int32, uint32);
+void runtime·munmap(uint8*, uintptr);
+void runtime·memclr(byte*, uint32);
+void runtime·setcallerpc(void*, void*);
+void* runtime·getcallerpc(void*);
/*
* runtime go-called
*/
-void runtime_printbool(bool);
-void runtime_printfloat(float64);
-void runtime_printint(int64);
-void runtime_printiface(Iface);
-void runtime_printeface(Eface);
-void runtime_printstring(String);
-void runtime_printpc(void*);
-void runtime_printpointer(void*);
-void runtime_printuint(uint64);
-void runtime_printhex(uint64);
-void runtime_printslice(Slice);
-void runtime_printcomplex(Complex128);
+void runtime·printbool(bool);
+void runtime·printfloat(float64);
+void runtime·printint(int64);
+void runtime·printiface(Iface);
+void runtime·printeface(Eface);
+void runtime·printstring(String);
+void runtime·printpc(void*);
+void runtime·printpointer(void*);
+void runtime·printuint(uint64);
+void runtime·printhex(uint64);
+void runtime·printslice(Slice);
+void runtime·printcomplex(Complex128);
void reflect·call(byte*, byte*, uint32);
-void ·panic(Eface);
-void ·panicindex(void);
-void ·panicslice(void);
+void runtime·panic(Eface);
+void runtime·panicindex(void);
+void runtime·panicslice(void);
/*
* runtime c-called (but written in Go)
*/
-void ·newError(String, Eface*);
-void ·printany(Eface);
-void ·newTypeAssertionError(Type*, Type*, Type*, String*, String*, String*, String*, Eface*);
-void ·newErrorString(String, Eface*);
-void ·fadd64c(uint64, uint64, uint64*);
-void ·fsub64c(uint64, uint64, uint64*);
-void ·fmul64c(uint64, uint64, uint64*);
-void ·fdiv64c(uint64, uint64, uint64*);
-void ·fneg64c(uint64, uint64*);
-void ·f32to64c(uint32, uint64*);
-void ·f64to32c(uint64, uint32*);
-void ·fcmp64c(uint64, uint64, int32*, bool*);
-void ·fintto64c(int64, uint64*);
-void ·f64tointc(uint64, int64*, bool*);
+void runtime·newError(String, Eface*);
+void runtime·printany(Eface);
+void runtime·newTypeAssertionError(Type*, Type*, Type*, String*, String*, String*, String*, Eface*);
+void runtime·newErrorString(String, Eface*);
+void runtime·fadd64c(uint64, uint64, uint64*);
+void runtime·fsub64c(uint64, uint64, uint64*);
+void runtime·fmul64c(uint64, uint64, uint64*);
+void runtime·fdiv64c(uint64, uint64, uint64*);
+void runtime·fneg64c(uint64, uint64*);
+void runtime·f32to64c(uint32, uint64*);
+void runtime·f64to32c(uint64, uint32*);
+void runtime·fcmp64c(uint64, uint64, int32*, bool*);
+void runtime·fintto64c(int64, uint64*);
+void runtime·f64tointc(uint64, int64*, bool*);
/*
* wrapped for go users
*/
-float64 Inf(int32 sign);
-float64 NaN(void);
-float32 float32frombits(uint32 i);
-uint32 float32tobits(float32 f);
-float64 float64frombits(uint64 i);
-uint64 float64tobits(float64 f);
-float64 frexp(float64 d, int32 *ep);
-bool isInf(float64 f, int32 sign);
-bool isNaN(float64 f);
-float64 ldexp(float64 d, int32 e);
-float64 modf(float64 d, float64 *ip);
-void semacquire(uint32*);
-void semrelease(uint32*);
-String signame(int32 sig);
-int32 gomaxprocsfunc(int32 n);
-
-
-void mapassign(Hmap*, byte*, byte*);
-void mapaccess(Hmap*, byte*, byte*, bool*);
-struct hash_iter* mapiterinit(Hmap*);
-void mapiternext(struct hash_iter*);
-bool mapiterkey(struct hash_iter*, void*);
-void mapiterkeyvalue(struct hash_iter*, void*, void*);
-Hmap* makemap(Type*, Type*, int64);
-
-Hchan* makechan(Type*, int64);
-void chansend(Hchan*, void*, bool*);
-void chanrecv(Hchan*, void*, bool*);
-void chanclose(Hchan*);
-bool chanclosed(Hchan*);
-int32 chanlen(Hchan*);
-int32 chancap(Hchan*);
-
-void ifaceE2I(struct InterfaceType*, Eface, Iface*);
+float64 runtime·Inf(int32 sign);
+float64 runtime·NaN(void);
+float32 runtime·float32frombits(uint32 i);
+uint32 runtime·float32tobits(float32 f);
+float64 runtime·float64frombits(uint64 i);
+uint64 runtime·float64tobits(float64 f);
+float64 runtime·frexp(float64 d, int32 *ep);
+bool runtime·isInf(float64 f, int32 sign);
+bool runtime·isNaN(float64 f);
+float64 runtime·ldexp(float64 d, int32 e);
+float64 runtime·modf(float64 d, float64 *ip);
+void runtime·semacquire(uint32*);
+void runtime·semrelease(uint32*);
+String runtime·signame(int32 sig);
+int32 runtime·gomaxprocsfunc(int32 n);
+
+void runtime·mapassign(Hmap*, byte*, byte*);
+void runtime·mapaccess(Hmap*, byte*, byte*, bool*);
+struct hash_iter* runtime·newmapiterinit(Hmap*);
+void runtime·mapiternext(struct hash_iter*);
+bool runtime·mapiterkey(struct hash_iter*, void*);
+void runtime·mapiterkeyvalue(struct hash_iter*, void*, void*);
+Hmap* runtime·makemap_c(Type*, Type*, int64);
+
+Hchan* runtime·makechan_c(Type*, int64);
+void runtime·chansend(Hchan*, void*, bool*);
+void runtime·chanrecv(Hchan*, void*, bool*);
+void runtime·chanclose(Hchan*);
+bool runtime·chanclosed(Hchan*);
+int32 runtime·chanlen(Hchan*);
+int32 runtime·chancap(Hchan*);
+
+void runtime·ifaceE2I(struct InterfaceType*, Eface, Iface*);
#include "runtime.h"
func GOMAXPROCS(n int32) (ret int32) {
- ret = gomaxprocsfunc(n);
+ ret = runtime·gomaxprocsfunc(n);
}
s->addr = addr;
s->g = nil;
- lock(&semlock);
+ runtime·lock(&semlock);
s->prev = semlast;
s->next = nil;
if(semlast)
else
semfirst = s;
semlast = s;
- unlock(&semlock);
+ runtime·unlock(&semlock);
}
static void
semdequeue(Sema *s)
{
- lock(&semlock);
+ runtime·lock(&semlock);
if(s->next)
s->next->prev = s->prev;
else
semfirst = s->next;
s->prev = nil;
s->next = nil;
- unlock(&semlock);
+ runtime·unlock(&semlock);
}
static void
{
Sema *s;
- lock(&semlock);
+ runtime·lock(&semlock);
for(s=semfirst; s; s=s->next) {
if(s->addr == addr && s->g) {
- ready(s->g);
+ runtime·ready(s->g);
s->g = nil;
break;
}
}
- unlock(&semlock);
+ runtime·unlock(&semlock);
}
// Step 1 of sleep: make ourselves available for wakeup.
static void
semsleep1(Sema *s)
{
- lock(&semlock);
+ runtime·lock(&semlock);
s->g = g;
- unlock(&semlock);
+ runtime·unlock(&semlock);
}
// Decided not to go through with it: undo step 1.
static void
semsleepundo1(Sema *s)
{
- lock(&semlock);
+ runtime·lock(&semlock);
if(s->g != nil) {
s->g = nil; // back ourselves out
} else {
*(int32*)0x555 = 555;
g->readyonstop = 0;
}
- unlock(&semlock);
+ runtime·unlock(&semlock);
}
// Step 2: wait for the wakeup.
{
USED(s);
g->status = Gwaiting;
- gosched();
+ runtime·gosched();
}
static int32
uint32 v;
while((v = *addr) > 0)
- if(cas(addr, v, v-1))
+ if(runtime·cas(addr, v, v-1))
return 1;
return 0;
}
// For now has no return value.
// Might return an ok (not interrupted) bool in the future?
void
-semacquire(uint32 *addr)
+runtime·semacquire(uint32 *addr)
{
Sema s;
}
void
-semrelease(uint32 *addr)
+runtime·semrelease(uint32 *addr)
{
uint32 v;
for(;;) {
v = *addr;
- if(cas(addr, v, v+1))
+ if(runtime·cas(addr, v, v+1))
break;
}
semwakeup(addr);
}
func Semacquire(addr *uint32) {
- semacquire(addr);
+ runtime·semacquire(addr);
}
func Semrelease(addr *uint32) {
- semrelease(addr);
+ runtime·semrelease(addr);
}
} sig;
void
-siginit(void)
+runtime·siginit(void)
{
- noteclear(&sig);
+ runtime·noteclear(&sig);
}
// Called from sighandler to send a signal back out of the signal handling thread.
bool
-sigsend(int32 s)
+runtime·sigsend(int32 s)
{
uint32 bit, mask;
mask = sig.mask;
if(mask & bit)
break; // signal already in queue
- if(cas(&sig.mask, mask, mask|bit)) {
+ if(runtime·cas(&sig.mask, mask, mask|bit)) {
// Added to queue.
// Only send a wakeup for the first signal in each round.
if(mask == 0)
- notewakeup(&sig);
+ runtime·notewakeup(&sig);
break;
}
}
// Called to receive a bitmask of queued signals.
func Sigrecv() (m uint32) {
- ·entersyscall();
- notesleep(&sig);
- ·exitsyscall();
- noteclear(&sig);
+ runtime·entersyscall();
+ runtime·notesleep(&sig);
+ runtime·exitsyscall();
+ runtime·noteclear(&sig);
for(;;) {
m = sig.mask;
- if(cas(&sig.mask, m, 0))
+ if(runtime·cas(&sig.mask, m, 0))
break;
}
}
func Signame(sig int32) (name String) {
- name = signame(sig);
+ name = runtime·signame(sig);
}
func Siginit() {
- initsig(SigQueue);
+ runtime·initsig(SigQueue);
sig.inuse = true; // enable reception of signals; cannot disable
}
static int32 debug = 0;
-static void makeslice(SliceType*, int32, int32, Slice*);
- void ·slicecopy(Slice to, Slice fm, uintptr width, int32 ret);
+static void makeslice1(SliceType*, int32, int32, Slice*);
+ void runtime·slicecopy(Slice to, Slice fm, uintptr width, int32 ret);
// see also unsafe·NewArray
// makeslice(typ *Type, len, cap int64) (ary []any);
void
-·makeslice(SliceType *t, int64 len, int64 cap, Slice ret)
+runtime·makeslice(SliceType *t, int64 len, int64 cap, Slice ret)
{
if(len < 0 || (int32)len != len)
- panicstring("makeslice: len out of range");
+ runtime·panicstring("makeslice: len out of range");
if(cap < len || (int32)cap != cap || cap > ((uintptr)-1) / t->elem->size)
- panicstring("makeslice: cap out of range");
+ runtime·panicstring("makeslice: cap out of range");
- makeslice(t, len, cap, &ret);
+ makeslice1(t, len, cap, &ret);
if(debug) {
- printf("makeslice(%S, %D, %D); ret=",
+ runtime·printf("makeslice(%S, %D, %D); ret=",
*t->string, len, cap);
- ·printslice(ret);
+ runtime·printslice(ret);
}
}
static void
-makeslice(SliceType *t, int32 len, int32 cap, Slice *ret)
+makeslice1(SliceType *t, int32 len, int32 cap, Slice *ret)
{
uintptr size;
ret->cap = cap;
if((t->elem->kind&KindNoPointers))
- ret->array = mallocgc(size, RefNoPointers, 1, 1);
+ ret->array = runtime·mallocgc(size, RefNoPointers, 1, 1);
else
- ret->array = mal(size);
+ ret->array = runtime·mal(size);
}
-static void appendslice(SliceType*, Slice, Slice, Slice*);
+static void appendslice1(SliceType*, Slice, Slice, Slice*);
// append(type *Type, n int, old []T, ...,) []T
#pragma textflag 7
void
-·append(SliceType *t, int32 n, Slice old, ...)
+runtime·append(SliceType *t, int32 n, Slice old, ...)
{
Slice sl;
Slice *ret;
sl.len = n;
sl.array = (byte*)(&old+1);
ret = (Slice*)(sl.array + ((t->elem->size*n+sizeof(uintptr)-1) & ~(sizeof(uintptr)-1)));
- appendslice(t, old, sl, ret);
+ appendslice1(t, old, sl, ret);
}
// appendslice(type *Type, x, y, []T) []T
void
-·appendslice(SliceType *t, Slice x, Slice y, Slice ret)
+runtime·appendslice(SliceType *t, Slice x, Slice y, Slice ret)
{
- appendslice(t, x, y, &ret);
+ appendslice1(t, x, y, &ret);
}
static void
-appendslice(SliceType *t, Slice x, Slice y, Slice *ret)
+appendslice1(SliceType *t, Slice x, Slice y, Slice *ret)
{
Slice newx;
int32 m;
uintptr w;
if(x.len+y.len < x.len)
- throw("append: slice overflow");
+ runtime·throw("append: slice overflow");
w = t->elem->size;
if(x.len+y.len > x.cap) {
m += m/4;
} while(m < x.len+y.len);
}
- makeslice(t, x.len, m, &newx);
- memmove(newx.array, x.array, x.len*w);
+ makeslice1(t, x.len, m, &newx);
+ runtime·memmove(newx.array, x.array, x.len*w);
x = newx;
}
- memmove(x.array+x.len*w, y.array, y.len*w);
+ runtime·memmove(x.array+x.len*w, y.array, y.len*w);
x.len += y.len;
*ret = x;
}
// sliceslice(old []any, lb uint64, hb uint64, width uint64) (ary []any);
void
-·sliceslice(Slice old, uint64 lb, uint64 hb, uint64 width, Slice ret)
+runtime·sliceslice(Slice old, uint64 lb, uint64 hb, uint64 width, Slice ret)
{
if(hb > old.cap || lb > hb) {
if(debug) {
- prints("runtime.sliceslice: old=");
- ·printslice(old);
- prints("; lb=");
- ·printint(lb);
- prints("; hb=");
- ·printint(hb);
- prints("; width=");
- ·printint(width);
- prints("\n");
-
- prints("oldarray: nel=");
- ·printint(old.len);
- prints("; cap=");
- ·printint(old.cap);
- prints("\n");
+ runtime·prints("runtime.sliceslice: old=");
+ runtime·printslice(old);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; hb=");
+ runtime·printint(hb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("\n");
+
+ runtime·prints("oldarray: nel=");
+ runtime·printint(old.len);
+ runtime·prints("; cap=");
+ runtime·printint(old.cap);
+ runtime·prints("\n");
}
- ·panicslice();
+ runtime·panicslice();
}
// new array is inside old array
FLUSH(&ret);
if(debug) {
- prints("runtime.sliceslice: old=");
- ·printslice(old);
- prints("; lb=");
- ·printint(lb);
- prints("; hb=");
- ·printint(hb);
- prints("; width=");
- ·printint(width);
- prints("; ret=");
- ·printslice(ret);
- prints("\n");
+ runtime·prints("runtime.sliceslice: old=");
+ runtime·printslice(old);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; hb=");
+ runtime·printint(hb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("; ret=");
+ runtime·printslice(ret);
+ runtime·prints("\n");
}
}
// sliceslice1(old []any, lb uint64, width uint64) (ary []any);
void
-·sliceslice1(Slice old, uint64 lb, uint64 width, Slice ret)
+runtime·sliceslice1(Slice old, uint64 lb, uint64 width, Slice ret)
{
if(lb > old.len) {
if(debug) {
- prints("runtime.sliceslice: old=");
- ·printslice(old);
- prints("; lb=");
- ·printint(lb);
- prints("; width=");
- ·printint(width);
- prints("\n");
-
- prints("oldarray: nel=");
- ·printint(old.len);
- prints("; cap=");
- ·printint(old.cap);
- prints("\n");
+ runtime·prints("runtime.sliceslice: old=");
+ runtime·printslice(old);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("\n");
+
+ runtime·prints("oldarray: nel=");
+ runtime·printint(old.len);
+ runtime·prints("; cap=");
+ runtime·printint(old.cap);
+ runtime·prints("\n");
}
- ·panicslice();
+ runtime·panicslice();
}
// new array is inside old array
FLUSH(&ret);
if(debug) {
- prints("runtime.sliceslice: old=");
- ·printslice(old);
- prints("; lb=");
- ·printint(lb);
- prints("; width=");
- ·printint(width);
- prints("; ret=");
- ·printslice(ret);
- prints("\n");
+ runtime·prints("runtime.sliceslice: old=");
+ runtime·printslice(old);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("; ret=");
+ runtime·printslice(ret);
+ runtime·prints("\n");
}
}
// slicearray(old *any, nel uint64, lb uint64, hb uint64, width uint64) (ary []any);
void
-·slicearray(byte* old, uint64 nel, uint64 lb, uint64 hb, uint64 width, Slice ret)
+runtime·slicearray(byte* old, uint64 nel, uint64 lb, uint64 hb, uint64 width, Slice ret)
{
if(nel > 0 && old == nil) {
// crash if old == nil.
if(hb > nel || lb > hb) {
if(debug) {
- prints("runtime.slicearray: old=");
- ·printpointer(old);
- prints("; nel=");
- ·printint(nel);
- prints("; lb=");
- ·printint(lb);
- prints("; hb=");
- ·printint(hb);
- prints("; width=");
- ·printint(width);
- prints("\n");
+ runtime·prints("runtime.slicearray: old=");
+ runtime·printpointer(old);
+ runtime·prints("; nel=");
+ runtime·printint(nel);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; hb=");
+ runtime·printint(hb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("\n");
}
- ·panicslice();
+ runtime·panicslice();
}
// new array is inside old array
FLUSH(&ret);
if(debug) {
- prints("runtime.slicearray: old=");
- ·printpointer(old);
- prints("; nel=");
- ·printint(nel);
- prints("; lb=");
- ·printint(lb);
- prints("; hb=");
- ·printint(hb);
- prints("; width=");
- ·printint(width);
- prints("; ret=");
- ·printslice(ret);
- prints("\n");
+ runtime·prints("runtime.slicearray: old=");
+ runtime·printpointer(old);
+ runtime·prints("; nel=");
+ runtime·printint(nel);
+ runtime·prints("; lb=");
+ runtime·printint(lb);
+ runtime·prints("; hb=");
+ runtime·printint(hb);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("; ret=");
+ runtime·printslice(ret);
+ runtime·prints("\n");
}
}
// slicecopy(to any, fr any, wid uint32) int
void
-·slicecopy(Slice to, Slice fm, uintptr width, int32 ret)
+runtime·slicecopy(Slice to, Slice fm, uintptr width, int32 ret)
{
if(fm.len == 0 || to.len == 0 || width == 0) {
ret = 0;
if(ret == 1 && width == 1) { // common case worth about 2x to do here
*to.array = *fm.array; // known to be a byte pointer
} else {
- memmove(to.array, fm.array, ret*width);
+ runtime·memmove(to.array, fm.array, ret*width);
}
out:
FLUSH(&ret);
if(debug) {
- prints("main·copy: to=");
- ·printslice(to);
- prints("; fm=");
- ·printslice(fm);
- prints("; width=");
- ·printint(width);
- prints("; ret=");
- ·printint(ret);
- prints("\n");
+ runtime·prints("main·copy: to=");
+ runtime·printslice(to);
+ runtime·prints("; fm=");
+ runtime·printslice(fm);
+ runtime·prints("; width=");
+ runtime·printint(width);
+ runtime·prints("; ret=");
+ runtime·printint(ret);
+ runtime·prints("\n");
}
}
void
-·slicestringcopy(Slice to, String fm, int32 ret)
+runtime·slicestringcopy(Slice to, String fm, int32 ret)
{
if(fm.len == 0 || to.len == 0) {
ret = 0;
if(to.len < ret)
ret = to.len;
- memmove(to.array, fm.str, ret);
+ runtime·memmove(to.array, fm.str, ret);
out:
FLUSH(&ret);
}
void
-·printslice(Slice a)
+runtime·printslice(Slice a)
{
- prints("[");
- ·printint(a.len);
- prints("/");
- ·printint(a.cap);
- prints("]");
- ·printpointer(a.array);
+ runtime·prints("[");
+ runtime·printint(a.len);
+ runtime·prints("/");
+ runtime·printint(a.cap);
+ runtime·prints("]");
+ runtime·printpointer(a.array);
}
#include "runtime.h"
#include "malloc.h"
-String emptystring;
+String runtime·emptystring;
int32
-findnull(byte *s)
+runtime·findnull(byte *s)
{
int32 l;
}
int32
-findnullw(uint16 *s)
+runtime·findnullw(uint16 *s)
{
int32 l;
return l;
}
-int32 maxstring = 256;
+int32 runtime·maxstring = 256;
String
-gostringsize(int32 l)
+runtime·gostringsize(int32 l)
{
String s;
if(l == 0)
- return emptystring;
- s.str = mal(l+1); // leave room for NUL for C runtime (e.g., callers of getenv)
+ return runtime·emptystring;
+ s.str = runtime·mal(l+1); // leave room for NUL for C runtime (e.g., callers of getenv)
s.len = l;
- if(l > maxstring)
- maxstring = l;
+ if(l > runtime·maxstring)
+ runtime·maxstring = l;
return s;
}
String
-gostring(byte *str)
+runtime·gostring(byte *str)
{
int32 l;
String s;
- l = findnull(str);
- s = gostringsize(l);
- mcpy(s.str, str, l);
+ l = runtime·findnull(str);
+ s = runtime·gostringsize(l);
+ runtime·mcpy(s.str, str, l);
return s;
}
String
-gostringn(byte *str, int32 l)
+runtime·gostringn(byte *str, int32 l)
{
- String s;
+ String s;
- s = gostringsize(l);
- mcpy(s.str, str, l);
- return s;
+ s = runtime·gostringsize(l);
+ runtime·mcpy(s.str, str, l);
+ return s;
}
String
-gostringnocopy(byte *str)
+runtime·gostringnocopy(byte *str)
{
String s;
s.str = str;
- s.len = findnull(str);
+ s.len = runtime·findnull(str);
return s;
}
String
-gostringw(uint16 *str)
+runtime·gostringw(uint16 *str)
{
int32 n, i;
byte buf[8];
n = 0;
for(i=0; str[i]; i++)
- n += runetochar(buf, str[i]);
- s = gostringsize(n+4);
+ n += runtime·runetochar(buf, str[i]);
+ s = runtime·gostringsize(n+4);
n = 0;
for(i=0; str[i]; i++)
- n += runetochar(s.str+n, str[i]);
+ n += runtime·runetochar(s.str+n, str[i]);
s.len = n;
return s;
}
String
-catstring(String s1, String s2)
+runtime·catstring(String s1, String s2)
{
String s3;
if(s2.len == 0)
return s1;
- s3 = gostringsize(s1.len + s2.len);
- mcpy(s3.str, s1.str, s1.len);
- mcpy(s3.str+s1.len, s2.str, s2.len);
+ s3 = runtime·gostringsize(s1.len + s2.len);
+ runtime·mcpy(s3.str, s1.str, s1.len);
+ runtime·mcpy(s3.str+s1.len, s2.str, s2.len);
return s3;
}
-String
+static String
concatstring(int32 n, String *s)
{
int32 i, l;
l = 0;
for(i=0; i<n; i++) {
if(l + s[i].len < l)
- throw("string concatenation too long");
+ runtime·throw("string concatenation too long");
l += s[i].len;
}
- out = gostringsize(l);
+ out = runtime·gostringsize(l);
l = 0;
for(i=0; i<n; i++) {
- mcpy(out.str+l, s[i].str, s[i].len);
+ runtime·mcpy(out.str+l, s[i].str, s[i].len);
l += s[i].len;
}
return out;
(&s1)[n] = concatstring(n, &s1);
}
-uint32
+static int32
cmpstring(String s1, String s2)
{
uint32 i, l;
}
int32
-strcmp(byte *s1, byte *s2)
+runtime·strcmp(byte *s1, byte *s2)
{
uint32 i;
byte c1, c2;
if(lindex < 0 || lindex > si.len ||
hindex < lindex || hindex > si.len) {
- ·panicslice();
+ runtime·panicslice();
}
l = hindex-lindex;
int32 l;
if(lindex < 0 || lindex > si.len) {
- ·panicslice();
+ runtime·panicslice();
}
l = si.len-lindex;
}
func intstring(v int64) (s String) {
- s = gostringsize(8);
- s.len = runetochar(s.str, v);
+ s = runtime·gostringsize(8);
+ s.len = runtime·runetochar(s.str, v);
}
func slicebytetostring(b Slice) (s String) {
- s = gostringsize(b.len);
- mcpy(s.str, b.array, s.len);
+ s = runtime·gostringsize(b.len);
+ runtime·mcpy(s.str, b.array, s.len);
}
func stringtoslicebyte(s String) (b Slice) {
- b.array = mallocgc(s.len, RefNoPointers, 1, 1);
+ b.array = runtime·mallocgc(s.len, RefNoPointers, 1, 1);
b.len = s.len;
b.cap = s.len;
- mcpy(b.array, s.str, s.len);
+ runtime·mcpy(b.array, s.str, s.len);
}
func sliceinttostring(b Slice) (s String) {
a = (int32*)b.array;
siz1 = 0;
for(i=0; i<b.len; i++) {
- siz1 += runetochar(dum, a[i]);
+ siz1 += runtime·runetochar(dum, a[i]);
}
- s = gostringsize(siz1+4);
+ s = runtime·gostringsize(siz1+4);
siz2 = 0;
for(i=0; i<b.len; i++) {
// check for race
if(siz2 >= siz1)
break;
- siz2 += runetochar(s.str+siz2, a[i]);
+ siz2 += runtime·runetochar(s.str+siz2, a[i]);
}
s.len = siz2;
}
ep = s.str+s.len;
n = 0;
while(p < ep) {
- p += charntorune(&dum, p, ep-p);
+ p += runtime·charntorune(&dum, p, ep-p);
n++;
}
- b.array = mallocgc(n*sizeof(r[0]), RefNoPointers, 1, 1);
+ b.array = runtime·mallocgc(n*sizeof(r[0]), RefNoPointers, 1, 1);
b.len = n;
b.cap = n;
p = s.str;
r = (int32*)b.array;
while(p < ep)
- p += charntorune(r++, p, ep-p);
+ p += runtime·charntorune(r++, p, ep-p);
}
enum
}
// multi-char rune
- retk = k + charntorune(&l, s.str+k, s.len-k);
+ retk = k + runtime·charntorune(&l, s.str+k, s.len-k);
out:
}
}
// multi-char rune
- retk = k + charntorune(&retv, s.str+k, s.len-k);
+ retk = k + runtime·charntorune(&retv, s.str+k, s.len-k);
out:
}
}
p = q+2;
}else{
- q = mchr(p, '\0', ep);
+ q = runtime·mchr(p, '\0', ep);
if(q == nil)
break;
p = q+1;
case 'T':
case 'l':
case 'L':
- if(strcmp(sym->name, (byte*)"etext") == 0)
+ if(runtime·strcmp(sym->name, (byte*)"etext") == 0)
break;
if(func == nil) {
nfunc++;
break;
}
f = &func[nfunc++];
- f->name = gostringnocopy(sym->name);
+ f->name = runtime·gostringnocopy(sym->name);
f->entry = sym->value;
if(sym->symtype == 'L' || sym->symtype == 'l')
f->frame = -sizeof(uintptr);
if(fname == nil) {
if(sym->value >= nfname) {
if(sym->value >= 0x10000) {
- printf("invalid symbol file index %p\n", sym->value);
- throw("mangled symbol table");
+ runtime·printf("invalid symbol file index %p\n", sym->value);
+ runtime·throw("mangled symbol table");
}
nfname = sym->value+1;
}
if(n >= nfname)
break;
q = fname[n];
- len = findnull(q);
+ len = runtime·findnull(q);
if(p+1+len >= ep)
break;
if(p > buf && p[-1] != '/')
*p++ = '/';
- mcpy(p, q, len+1);
+ runtime·mcpy(p, q, len+1);
p += len;
}
}
switch(sym->symtype) {
case 't':
case 'T':
- if(strcmp(sym->name, (byte*)"etext") == 0)
+ if(runtime·strcmp(sym->name, (byte*)"etext") == 0)
break;
f = &func[nfunc++];
// find source file
nfile = 0;
if(nfile == nelem(files))
return;
- files[nfile].srcstring = gostring(srcbuf);
+ files[nfile].srcstring = runtime·gostring(srcbuf);
files[nfile].aline = 0;
files[nfile++].delta = 0;
} else {
incstart = sym->value;
if(nhist == 0 && nfile < nelem(files)) {
// new top-level file
- files[nfile].srcstring = gostring(srcbuf);
+ files[nfile].srcstring = runtime·gostring(srcbuf);
files[nfile].aline = sym->value;
// this is "line 0"
files[nfile++].delta = sym->value - 1;
// (Source file is f->src.)
// NOTE(rsc): If you edit this function, also edit extern.go:/FileLine
int32
-funcline(Func *f, uint64 targetpc)
+runtime·funcline(Func *f, uint64 targetpc)
{
byte *p, *ep;
uintptr pc;
walksymtab(dofunc);
// initialize tables
- func = mal((nfunc+1)*sizeof func[0]);
+ func = runtime·mal((nfunc+1)*sizeof func[0]);
func[nfunc].entry = (uint64)etext;
- fname = mal(nfname*sizeof fname[0]);
+ fname = runtime·mal(nfname*sizeof fname[0]);
nfunc = 0;
walksymtab(dofunc);
}
Func*
-findfunc(uintptr addr)
+runtime·findfunc(uintptr addr)
{
Func *f;
int32 nf, n;
- lock(&funclock);
+ runtime·lock(&funclock);
if(func == nil)
buildfuncs();
- unlock(&funclock);
+ runtime·unlock(&funclock);
if(nfunc == 0)
return nil;
// that the address was in the table bounds.
// this can only happen if the table isn't sorted
// by address or if the binary search above is buggy.
- prints("findfunc unreachable\n");
+ runtime·prints("findfunc unreachable\n");
return nil;
}
MOVL AX, SP
// Set up memory hardware.
- CALL msetup(SB)
+ CALL runtime·msetup(SB)
// _rt0_386 expects to find argc, argv, envv on stack.
// Set up argv=["kernel"] and envv=[].
SUBL $64, SP
MOVL $1, 0(SP)
- MOVL $kernel(SB), 4(SP)
+ MOVL $runtime·kernel(SB), 4(SP)
MOVL $0, 8(SP)
MOVL $0, 12(SP)
JMP _rt0_386(SB)
-DATA kernel+0(SB)/7, $"kernel\z"
-GLOBL kernel(SB), $7
+DATA runtime·kernel(SB)/7, $"kernel\z"
+GLOBL runtime·kernel(SB), $7
#include "runtime.h"
-extern void ·write(int32 fd, void *v, int32 len, int32 cap); // slice, spelled out
+extern void runtime·write(int32 fd, void *v, int32 len, int32 cap); // slice, spelled out
int32
-write(int32 fd, void *v, int32 len)
+runtime·write(int32 fd, void *v, int32 len)
{
- ·write(fd, v, len, len);
+ runtime·write(fd, v, len, len);
return len;
}
void
-gettime(int64*, int32*)
+runtime·gettime(int64*, int32*)
{
}
// Called to set up memory hardware.
// Already running in 32-bit mode thanks to boot block,
// but we need to install our new GDT that we can modify.
-TEXT msetup(SB), 7, $0
- MOVL gdtptr(SB), GDTR
+TEXT runtime·msetup(SB), 7, $0
+ MOVL runtime·gdtptr(SB), GDTR
MOVL $(1*8+0), AX
MOVW AX, DS
MOVW AX, ES
// long jmp to cs:mret
BYTE $0xEA
- LONG $mret(SB)
+ LONG $runtime·mret(SB)
WORD $(2*8+0)
-TEXT mret(SB), 7, $0
+TEXT runtime·mret(SB), 7, $0
RET
// GDT memory
-TEXT gdt(SB), 7, $0
+TEXT runtime·gdt(SB), 7, $0
// null segment
LONG $0
LONG $0
LONG $0
// GDT pseudo-descriptor
-TEXT gdtptr(SB), 7, $0
+TEXT runtime·gdtptr(SB), 7, $0
WORD $(4*8)
- LONG $gdt(SB)
+ LONG $runtime·gdt(SB)
// Called to establish the per-thread segment.
// Write to gdt[3] and reload the gdt register.
// setldt(int entry, int address, int limit)
-TEXT setldt(SB),7,$32
+TEXT runtime·setldt(SB),7,$32
MOVL address+4(FP), BX // aka base
MOVL limit+8(FP), CX
MOVB CX, 6(AX)
MOVB $0xF2, 5(AX) // r/w data descriptor, dpl=3, present
- MOVL gdtptr(SB), GDTR
+ MOVL runtime·gdtptr(SB), GDTR
// Compute segment selector - (entry*8+0)
MOVL $(3*8+0), AX
static byte *allocp;
void*
-SysAlloc(uintptr ask)
+runtime·SysAlloc(uintptr ask)
{
extern byte end[];
byte *q;
q = allocp;
allocp += ask;
- ·memclr(q, ask);
+ runtime·memclr(q, ask);
return q;
}
void
-SysFree(void *v, uintptr n)
+runtime·SysFree(void *v, uintptr n)
{
// Push pointer back if this is a free
// of the most recent SysAlloc.
}
void
-SysUnused(void *v, uintptr n)
+runtime·SysUnused(void *v, uintptr n)
{
USED(v, n);
}
void
-SysMemInit(void)
+runtime·SysMemInit(void)
{
}
int8 *goos = "tiny";
void
-minit(void)
+runtime·minit(void)
{
}
void
-osinit(void)
+runtime·osinit(void)
{
}
void
-initsig(int32 queue)
+runtime·initsig(int32 queue)
{
}
void
-exit(int32)
+runtime·exit(int32)
{
for(;;);
}
// so no need for real concurrency or atomicity
void
-newosproc(M *m, G *g, void *stk, void (*fn)(void))
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
{
USED(m, g, stk, fn);
- throw("newosproc");
+ runtime·throw("newosproc");
}
void
-lock(Lock *l)
+runtime·lock(Lock *l)
{
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
m->locks++;
if(l->key != 0)
- throw("deadlock");
+ runtime·throw("deadlock");
l->key = 1;
}
void
-unlock(Lock *l)
+runtime·unlock(Lock *l)
{
m->locks--;
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
if(l->key != 1)
- throw("unlock of unlocked lock");
+ runtime·throw("unlock of unlocked lock");
l->key = 0;
}
void
-destroylock(Lock *l)
+runtime·destroylock(Lock *l)
{
- // nothing
+ // nothing
}
void
-noteclear(Note *n)
+runtime·noteclear(Note *n)
{
n->lock.key = 0;
}
void
-notewakeup(Note *n)
+runtime·notewakeup(Note *n)
{
n->lock.key = 1;
}
void
-notesleep(Note *n)
+runtime·notesleep(Note *n)
{
if(n->lock.key != 1)
- throw("notesleep");
+ runtime·throw("notesleep");
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-TEXT _rt0_386_windows(SB),7,$0
+TEXT _rt0_386_windows(SB),7,$0
JMP _rt0_386(SB)
#include "runtime.h"
void
-initsig(int32)
+runtime·initsig(int32)
{
}
String
-signame(int32)
+runtime·signame(int32)
{
- return emptystring;
+ return runtime·emptystring;
}
#include "386/asm.h"
-TEXT get_kernel_module(SB),7,$0
+TEXT runtime·get_kernel_module(SB),7,$0
MOVL 0x30(FS), AX // get PEB
MOVL 0x0c(AX), AX // get PEB_LDR_DATA
MOVL 0x1c(AX), AX // get init order module list
RET
// void *stdcall_raw(void *fn, int32 count, uintptr *args)
-TEXT stdcall_raw(SB),7,$4
+TEXT runtime·stdcall_raw(SB),7,$4
// Copy arguments from stack.
MOVL fn+0(FP), AX
MOVL count+4(FP), CX // words
RET
// void tstart(M *newm);
-TEXT tstart(SB),7,$0
+TEXT runtime·tstart(SB),7,$0
MOVL newm+4(SP), CX // m
MOVL m_g0(CX), DX // g
PUSHL DI // original stack
- CALL stackcheck(SB) // clobbers AX,CX
+ CALL runtime·stackcheck(SB) // clobbers AX,CX
- CALL mstart(SB)
+ CALL runtime·mstart(SB)
POPL DI // original stack
MOVL DI, SP
RET
// uint32 tstart_stdcall(M *newm);
-TEXT tstart_stdcall(SB),7,$0
+TEXT runtime·tstart_stdcall(SB),7,$0
MOVL newm+4(SP), BX
PUSHL BX
- CALL tstart+0(SB)
+ CALL runtime·tstart(SB)
POPL BX
// Adjust stack for stdcall to return properly.
RET
// setldt(int entry, int address, int limit)
-TEXT setldt(SB),7,$0
+TEXT runtime·setldt(SB),7,$0
MOVL address+4(FP), CX
MOVL CX, 0x2c(FS)
RET
// for now, return 0,0. only used for internal performance monitoring.
-TEXT gettime(SB),7,$0
+TEXT runtime·gettime(SB),7,$0
MOVL sec+0(FP), DI
MOVL $0, (DI)
MOVL $0, 4(DI) // zero extend 32 -> 64 bits
};
void*
-SysAlloc(uintptr n)
+runtime·SysAlloc(uintptr n)
{
- return stdcall(VirtualAlloc, 4, nil, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
+ return runtime·stdcall(runtime·VirtualAlloc, 4, nil, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
}
void
-SysUnused(void *v, uintptr n)
+runtime·SysUnused(void *v, uintptr n)
{
USED(v);
USED(n);
}
void
-SysFree(void *v, uintptr n)
+runtime·SysFree(void *v, uintptr n)
{
- stdcall(VirtualFree, 3, v, n, MEM_RELEASE);
+ runtime·stdcall(runtime·VirtualFree, 3, v, n, MEM_RELEASE);
}
void
-SysMemInit(void)
+runtime·SysMemInit(void)
{
}
// The following function allows one to dynamically
// resolve DLL function names.
// The arguments are strings.
-void *get_proc_addr(void *library, void *name);
+void *runtime·get_proc_addr(void *library, void *name);
-extern void *VirtualAlloc;
-extern void *VirtualFree;
-extern void *LoadLibraryEx;
-extern void *GetProcAddress;
-extern void *GetLastError;
+extern void *runtime·VirtualAlloc;
+extern void *runtime·VirtualFree;
+extern void *runtime·LoadLibraryEx;
+extern void *runtime·GetProcAddress;
+extern void *runtime·GetLastError;
#define goargs windows_goargs
-void windows_goargs(void);
+void runtime·windows_goargs(void);
// Get start address of symbol data in memory.
-void *get_symdat_addr(void);
+void *runtime·get_symdat_addr(void);
// Call a Windows function with stdcall conventions,
// and switch to os stack during the call.
-void *stdcall_raw(void *fn, int32 count, uintptr *args);
-void *stdcall(void *fn, int32 count, ...);
+void *runtime·stdcall_raw(void *fn, int32 count, uintptr *args);
+void *runtime·stdcall(void *fn, int32 count, ...);
// Function to be called by windows CreateTread
// to start new os thread.
-uint32 tstart_stdcall(M *newm);
+uint32 runtime·tstart_stdcall(M *newm);
// Call stdcall Windows function StdcallParams.fn
// with params StdcallParams.args,
uintptr err;
};
-void syscall(StdcallParams *p);
+void runtime·syscall(StdcallParams *p);
func loadlibraryex(filename uintptr) (handle uint32) {
StdcallParams p;
- p.fn = (void*)LoadLibraryEx;
+ p.fn = (void*)runtime·LoadLibraryEx;
p.args[0] = filename;
p.args[1] = 0;
p.args[2] = 0;
p.n = 3;
- syscall(&p);
+ runtime·syscall(&p);
handle = p.r;
}
func getprocaddress(handle uint32, procname uintptr) (proc uintptr) {
StdcallParams p;
- p.fn = (void*)GetProcAddress;
+ p.fn = (void*)runtime·GetProcAddress;
p.args[0] = handle;
p.args[1] = procname;
p.n = 2;
- syscall(&p);
+ runtime·syscall(&p);
proc = p.r;
}
p.args[1] = a2;
p.args[2] = a3;
p.n = 3;
- syscall(&p);
+ runtime·syscall(&p);
r1 = p.r;
r2 = 0;
err = p.err;
p.args[4] = a5;
p.args[5] = a6;
p.n = 6;
- syscall(&p);
+ runtime·syscall(&p);
r1 = p.r;
r2 = 0;
err = p.err;
p.args[7] = a8;
p.args[8] = a9;
p.n = 9;
- syscall(&p);
+ runtime·syscall(&p);
r1 = p.r;
r2 = 0;
lasterr = p.err;
p.args[10] = a11;
p.args[11] = a12;
p.n = 12;
- syscall(&p);
+ runtime·syscall(&p);
r1 = p.r;
r2 = 0;
lasterr = p.err;
p.args[1] = a2;
p.args[2] = a3;
p.n = 3;
- syscall(&p);
+ runtime·syscall(&p);
r1 = p.r;
r2 = 0;
err = p.err;
#include "runtime.h"
#include "os.h"
-extern void *get_kernel_module(void);
+extern void *runtime·get_kernel_module(void);
// Also referenced by external packages
-void *CloseHandle;
-void *ExitProcess;
-void *GetStdHandle;
-void *SetEvent;
-void *WriteFile;
-void *VirtualAlloc;
-void *VirtualFree;
-void *LoadLibraryEx;
-void *GetProcAddress;
-void *GetLastError;
-void *SetLastError;
+void *runtime·CloseHandle;
+void *runtime·ExitProcess;
+void *runtime·GetStdHandle;
+void *runtime·SetEvent;
+void *runtime·WriteFile;
+void *runtime·VirtualAlloc;
+void *runtime·VirtualFree;
+void *runtime·LoadLibraryEx;
+void *runtime·GetProcAddress;
+void *runtime·GetLastError;
+void *runtime·SetLastError;
static void *CreateEvent;
static void *CreateThread;
ordinals = (uint16*)(base+*(uint32*)(exports+0x24));
for(i=0; i<entries; i++) {
byte *s = base+names[i];
- if(!strcmp(name, s))
+ if(runtime·strcmp(name, s) == 0)
break;
}
if(i == entries)
}
void
-osinit(void)
+runtime·osinit(void)
{
void *base;
- base = get_kernel_module();
- GetProcAddress = get_proc_addr2(base, (byte*)"GetProcAddress");
- LoadLibraryEx = get_proc_addr2(base, (byte*)"LoadLibraryExA");
- CloseHandle = get_proc_addr("kernel32.dll", "CloseHandle");
- CreateEvent = get_proc_addr("kernel32.dll", "CreateEventA");
- CreateThread = get_proc_addr("kernel32.dll", "CreateThread");
- ExitProcess = get_proc_addr("kernel32.dll", "ExitProcess");
- GetStdHandle = get_proc_addr("kernel32.dll", "GetStdHandle");
- SetEvent = get_proc_addr("kernel32.dll", "SetEvent");
- VirtualAlloc = get_proc_addr("kernel32.dll", "VirtualAlloc");
- VirtualFree = get_proc_addr("kernel32.dll", "VirtualFree");
- WaitForSingleObject = get_proc_addr("kernel32.dll", "WaitForSingleObject");
- WriteFile = get_proc_addr("kernel32.dll", "WriteFile");
- GetLastError = get_proc_addr("kernel32.dll", "GetLastError");
- SetLastError = get_proc_addr("kernel32.dll", "SetLastError");
+ base = runtime·get_kernel_module();
+ runtime·GetProcAddress = get_proc_addr2(base, (byte*)"GetProcAddress");
+ runtime·LoadLibraryEx = get_proc_addr2(base, (byte*)"LoadLibraryExA");
+ runtime·CloseHandle = runtime·get_proc_addr("kernel32.dll", "CloseHandle");
+ CreateEvent = runtime·get_proc_addr("kernel32.dll", "CreateEventA");
+ CreateThread = runtime·get_proc_addr("kernel32.dll", "CreateThread");
+ runtime·ExitProcess = runtime·get_proc_addr("kernel32.dll", "ExitProcess");
+ runtime·GetStdHandle = runtime·get_proc_addr("kernel32.dll", "GetStdHandle");
+ runtime·SetEvent = runtime·get_proc_addr("kernel32.dll", "SetEvent");
+ runtime·VirtualAlloc = runtime·get_proc_addr("kernel32.dll", "VirtualAlloc");
+ runtime·VirtualFree = runtime·get_proc_addr("kernel32.dll", "VirtualFree");
+ WaitForSingleObject = runtime·get_proc_addr("kernel32.dll", "WaitForSingleObject");
+ runtime·WriteFile = runtime·get_proc_addr("kernel32.dll", "WriteFile");
+ runtime·GetLastError = runtime·get_proc_addr("kernel32.dll", "GetLastError");
+ runtime·SetLastError = runtime·get_proc_addr("kernel32.dll", "SetLastError");
}
// The arguments are strings.
void*
-get_proc_addr(void *library, void *name)
+runtime·get_proc_addr(void *library, void *name)
{
void *base;
- base = stdcall(LoadLibraryEx, 3, library, 0, 0);
- return stdcall(GetProcAddress, 2, base, name);
+ base = runtime·stdcall(runtime·LoadLibraryEx, 3, library, 0, 0);
+ return runtime·stdcall(runtime·GetProcAddress, 2, base, name);
}
void
-windows_goargs(void)
+runtime·windows_goargs(void)
{
extern Slice os·Args;
extern Slice os·Envs;
int32 i, argc, envc;
uint16 *envp;
- gcl = get_proc_addr("kernel32.dll", "GetCommandLineW");
- clta = get_proc_addr("shell32.dll", "CommandLineToArgvW");
- ges = get_proc_addr("kernel32.dll", "GetEnvironmentStringsW");
- fes = get_proc_addr("kernel32.dll", "FreeEnvironmentStringsW");
+ gcl = runtime·get_proc_addr("kernel32.dll", "GetCommandLineW");
+ clta = runtime·get_proc_addr("shell32.dll", "CommandLineToArgvW");
+ ges = runtime·get_proc_addr("kernel32.dll", "GetEnvironmentStringsW");
+ fes = runtime·get_proc_addr("kernel32.dll", "FreeEnvironmentStringsW");
- cmd = stdcall(gcl, 0);
- env = stdcall(ges, 0);
- argv = stdcall(clta, 2, cmd, &argc);
+ cmd = runtime·stdcall(gcl, 0);
+ env = runtime·stdcall(ges, 0);
+ argv = runtime·stdcall(clta, 2, cmd, &argc);
envc = 0;
for(envp=env; *envp; envc++)
- envp += findnullw(envp)+1;
+ envp += runtime·findnullw(envp)+1;
- gargv = malloc(argc*sizeof gargv[0]);
- genvv = malloc(envc*sizeof genvv[0]);
+ gargv = runtime·malloc(argc*sizeof gargv[0]);
+ genvv = runtime·malloc(envc*sizeof genvv[0]);
for(i=0; i<argc; i++)
- gargv[i] = gostringw(argv[i]);
+ gargv[i] = runtime·gostringw(argv[i]);
os·Args.array = (byte*)gargv;
os·Args.len = argc;
os·Args.cap = argc;
envp = env;
for(i=0; i<envc; i++) {
- genvv[i] = gostringw(envp);
- envp += findnullw(envp)+1;
+ genvv[i] = runtime·gostringw(envp);
+ envp += runtime·findnullw(envp)+1;
}
os·Envs.array = (byte*)genvv;
os·Envs.len = envc;
os·Envs.cap = envc;
- stdcall(fes, 1, env);
+ runtime·stdcall(fes, 1, env);
}
void
-exit(int32 code)
+runtime·exit(int32 code)
{
- stdcall(ExitProcess, 1, code);
+ runtime·stdcall(runtime·ExitProcess, 1, code);
}
int32
-write(int32 fd, void *buf, int32 n)
+runtime·write(int32 fd, void *buf, int32 n)
{
void *handle;
uint32 written;
written = 0;
switch(fd) {
case 1:
- handle = stdcall(GetStdHandle, 1, -11);
+ handle = runtime·stdcall(runtime·GetStdHandle, 1, -11);
break;
case 2:
- handle = stdcall(GetStdHandle, 1, -12);
+ handle = runtime·stdcall(runtime·GetStdHandle, 1, -12);
break;
default:
return -1;
}
- stdcall(WriteFile, 5, handle, buf, n, &written, 0);
+ runtime·stdcall(runtime·WriteFile, 5, handle, buf, n, &written, 0);
return written;
}
{
void *event;
- event = stdcall(CreateEvent, 4, 0, 0, 0, 0);
- if(!casp(pevent, 0, event)) {
+ event = runtime·stdcall(CreateEvent, 4, 0, 0, 0, 0);
+ if(!runtime·casp(pevent, 0, event)) {
// Someone else filled it in. Use theirs.
- stdcall(CloseHandle, 1, event);
+ runtime·stdcall(runtime·CloseHandle, 1, event);
}
}
if(l->event == 0)
initevent(&l->event);
- if(xadd(&l->key, 1) > 1) // someone else has it; wait
- stdcall(WaitForSingleObject, 2, l->event, -1);
+ if(runtime·xadd(&l->key, 1) > 1) // someone else has it; wait
+ runtime·stdcall(WaitForSingleObject, 2, l->event, -1);
}
static void
eventunlock(Lock *l)
{
- if(xadd(&l->key, -1) > 0) // someone else is waiting
- stdcall(SetEvent, 1, l->event);
+ if(runtime·xadd(&l->key, -1) > 0) // someone else is waiting
+ runtime·stdcall(runtime·SetEvent, 1, l->event);
}
void
-lock(Lock *l)
+runtime·lock(Lock *l)
{
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
m->locks++;
eventlock(l);
}
void
-unlock(Lock *l)
+runtime·unlock(Lock *l)
{
m->locks--;
if(m->locks < 0)
- throw("lock count");
+ runtime·throw("lock count");
eventunlock(l);
}
void
-destroylock(Lock *l)
+runtime·destroylock(Lock *l)
{
if(l->event != 0)
- stdcall(CloseHandle, 1, l->event);
+ runtime·stdcall(runtime·CloseHandle, 1, l->event);
}
void
-noteclear(Note *n)
+runtime·noteclear(Note *n)
{
eventlock(&n->lock);
}
void
-notewakeup(Note *n)
+runtime·notewakeup(Note *n)
{
eventunlock(&n->lock);
}
void
-notesleep(Note *n)
+runtime·notesleep(Note *n)
{
eventlock(&n->lock);
eventunlock(&n->lock); // Let other sleepers find out too.
}
void
-newosproc(M *m, G *g, void *stk, void (*fn)(void))
+runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
{
USED(stk);
USED(g); // assuming g = m->g0
USED(fn); // assuming fn = mstart
- stdcall(CreateThread, 6, 0, 0, tstart_stdcall, m, 0, 0);
+ runtime·stdcall(CreateThread, 6, 0, 0, runtime·tstart_stdcall, m, 0, 0);
}
// Called to initialize a new m (including the bootstrap m).
void
-minit(void)
+runtime·minit(void)
{
}
// Calling stdcall on os stack.
#pragma textflag 7
void *
-stdcall(void *fn, int32 count, ...)
+runtime·stdcall(void *fn, int32 count, ...)
{
- return stdcall_raw(fn, count, (uintptr*)(&count + 1));
+ return runtime·stdcall_raw(fn, count, (uintptr*)(&count + 1));
}
void
-syscall(StdcallParams *p)
+runtime·syscall(StdcallParams *p)
{
uintptr a;
- ·entersyscall();
+ runtime·entersyscall();
// TODO(brainman): Move calls to SetLastError and GetLastError
// to stdcall_raw to speed up syscall.
a = 0;
- stdcall_raw(SetLastError, 1, &a);
- p->r = (uintptr)stdcall_raw((void*)p->fn, p->n, p->args);
- p->err = (uintptr)stdcall_raw(GetLastError, 0, &a);
- ·exitsyscall();
+ runtime·stdcall_raw(runtime·SetLastError, 1, &a);
+ p->r = (uintptr)runtime·stdcall_raw((void*)p->fn, p->n, p->args);
+ p->err = (uintptr)runtime·stdcall_raw(runtime·GetLastError, 0, &a);
+ runtime·exitsyscall();
}