short type;
short version;
uchar dupok;
- uchar cfunc;
uchar external;
uchar nosplit;
uchar reachable;
errorexit();
}
}
-
-void
-checkgo(void)
-{
- LSym *s;
- Reloc *r;
- int i;
- int changed;
-
- if(!debug['C'])
- return;
-
- // TODO(rsc,khr): Eventually we want to get to no Go-called C functions at all,
- // which would simplify this logic quite a bit.
-
- // Mark every Go-called C function with cfunc=2, recursively.
- do {
- changed = 0;
- for(s = ctxt->textp; s != nil; s = s->next) {
- if(s->cfunc == 0 || (s->cfunc == 2 && s->nosplit)) {
- for(i=0; i<s->nr; i++) {
- r = &s->r[i];
- if(r->sym == nil)
- continue;
- if((r->type == R_CALL || r->type == R_CALLARM) && r->sym->type == STEXT) {
- if(r->sym->cfunc == 1) {
- changed = 1;
- r->sym->cfunc = 2;
- }
- }
- }
- }
- }
- }while(changed);
-
- // Complain about Go-called C functions that can split the stack
- // (that can be preempted for garbage collection or trigger a stack copy).
- for(s = ctxt->textp; s != nil; s = s->next) {
- if(s->cfunc == 0 || (s->cfunc == 2 && s->nosplit)) {
- for(i=0; i<s->nr; i++) {
- r = &s->r[i];
- if(r->sym == nil)
- continue;
- if((r->type == R_CALL || r->type == R_CALLARM) && r->sym->type == STEXT) {
- if(s->cfunc == 0 && r->sym->cfunc == 2 && !r->sym->nosplit)
- print("Go %s calls C %s\n", s->name, r->sym->name);
- else if(s->cfunc == 2 && s->nosplit && !r->sym->nosplit)
- print("Go calls C %s calls %s\n", s->name, r->sym->name);
- }
- }
- }
- }
-}
uint32 be32(uchar *b);
uint64 be64(uchar *b);
void callgraph(void);
-void checkgo(void);
void cflush(void);
void codeblk(int64 addr, int64 size);
vlong cpos(void);
mark(linklookup(ctxt, "runtime.read_tls_fallback", 0));
}
- checkgo();
deadcode();
callgraph();
paramspace = "SP"; /* (FP) now (SP) on output */
p->as = AMOVW;
p->from.type = D_OREG;
p->from.reg = REGG;
- p->from.offset = 4*ctxt->arch->ptrsize; // G.panic
+ p->from.offset = 3*ctxt->arch->ptrsize; // G.panic
p->to.type = D_REG;
p->to.reg = 1;
p->as = AMOVW;
p->from.type = D_OREG;
p->from.reg = REGG;
- p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
- if(ctxt->cursym->cfunc)
- p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
+ p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
p->to.type = D_REG;
p->to.reg = 1;
p->as = ABL;
p->scond = C_SCOND_LS;
p->to.type = D_BRANCH;
- if(ctxt->cursym->cfunc)
- p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
- else
- p->to.sym = ctxt->symmorestack[noctxt];
+ p->to.sym = ctxt->symmorestack[noctxt];
// BLS start
p = appendp(ctxt, p);
p = appendp(ctxt, p);
p->as = AMOVQ;
p->from.type = D_INDIR+D_CX;
- p->from.offset = 4*ctxt->arch->ptrsize; // G.panic
+ p->from.offset = 3*ctxt->arch->ptrsize; // G.panic
p->to.type = D_BX;
if(ctxt->headtype == Hnacl) {
p->as = AMOVL;
p->as = cmp;
p->from.type = D_SP;
indir_cx(ctxt, &p->to);
- p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
- if(ctxt->cursym->cfunc)
- p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
+ p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
} else if(framesize <= StackBig) {
// large stack: SP-framesize <= stackguard-StackSmall
// LEAQ -xxx(SP), AX
p->as = cmp;
p->from.type = D_AX;
indir_cx(ctxt, &p->to);
- p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
- if(ctxt->cursym->cfunc)
- p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
+ p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
} else {
// Such a large stack we need to protect against wraparound.
// If SP is close to zero:
p = appendp(ctxt, p);
p->as = mov;
indir_cx(ctxt, &p->from);
- p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
- if(ctxt->cursym->cfunc)
- p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
+ p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
p->to.type = D_SI;
p = appendp(ctxt, p);
p = appendp(ctxt, p);
p->as = ACALL;
p->to.type = D_BRANCH;
- if(ctxt->cursym->cfunc)
- p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
- else
- p->to.sym = ctxt->symmorestack[noctxt];
+ p->to.sym = ctxt->symmorestack[noctxt];
p = appendp(ctxt, p);
p->as = AJMP;
p = appendp(ctxt, p);
p->as = AMOVL;
p->from.type = D_INDIR+D_CX;
- p->from.offset = 4*ctxt->arch->ptrsize; // G.panic
+ p->from.offset = 3*ctxt->arch->ptrsize; // G.panic
p->to.type = D_BX;
p = appendp(ctxt, p);
p->as = ACMPL;
p->from.type = D_SP;
p->to.type = D_INDIR+D_CX;
- p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
- if(ctxt->cursym->cfunc)
- p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
+ p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
} else if(framesize <= StackBig) {
// large stack: SP-framesize <= stackguard-StackSmall
// LEAL -(framesize-StackSmall)(SP), AX
p->as = ACMPL;
p->from.type = D_AX;
p->to.type = D_INDIR+D_CX;
- p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
- if(ctxt->cursym->cfunc)
- p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
+ p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
} else {
// Such a large stack we need to protect against wraparound
// if SP is close to zero.
p->as = AMOVL;
p->from.type = D_INDIR+D_CX;
p->from.offset = 0;
- p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
- if(ctxt->cursym->cfunc)
- p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
+ p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
p->to.type = D_SI;
p = appendp(ctxt, p);
p = appendp(ctxt, p);
p->as = ACALL;
p->to.type = D_BRANCH;
- if(ctxt->cursym->cfunc)
- p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
- else
- p->to.sym = ctxt->symmorestack[noctxt];
+ p->to.sym = ctxt->symmorestack[noctxt];
p = appendp(ctxt, p);
p->as = AJMP;
q->as = AMOVD;
q->from.type = D_OREG;
q->from.reg = REGG;
- q->from.offset = 4*ctxt->arch->ptrsize; // G.panic
+ q->from.offset = 3*ctxt->arch->ptrsize; // G.panic
q->to.type = D_REG;
q->to.reg = 3;
p->as = AMOVD;
p->from.type = D_OREG;
p->from.reg = REGG;
- p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
- if(ctxt->cursym->cfunc)
- p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
+ p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
p->to.type = D_REG;
p->to.reg = 3;
p = appendp(ctxt, p);
p->as = ABL;
p->to.type = D_BRANCH;
- if(ctxt->cursym->cfunc)
- p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
- else
- p->to.sym = ctxt->symmorestack[noctxt];
+ p->to.sym = ctxt->symmorestack[noctxt];
// BR start
p = appendp(ctxt, p);
Bprint(ctxt->bso, "t=%d ", s->type);
if(s->dupok)
Bprint(ctxt->bso, "dupok ");
- if(s->cfunc)
- Bprint(ctxt->bso, "cfunc ");
if(s->nosplit)
Bprint(ctxt->bso, "nosplit ");
Bprint(ctxt->bso, "size=%lld value=%lld", (vlong)s->size, (vlong)s->value);
wrint(b, s->args);
wrint(b, s->locals);
wrint(b, s->nosplit);
- wrint(b, s->leaf | s->cfunc<<1);
+ wrint(b, s->leaf);
n = 0;
for(a = s->autom; a != nil; a = a->link)
n++;
s->nosplit = rdint(f);
v = rdint(f);
s->leaf = v&1;
- s->cfunc = v&2;
n = rdint(f);
for(i=0; i<n; i++) {
a = emallocz(sizeof *a);
Bprint(ctxt->bso, "t=%d ", s->type);
if(s->dupok)
Bprint(ctxt->bso, "dupok ");
- if(s->cfunc)
- Bprint(ctxt->bso, "cfunc ");
if(s->nosplit)
Bprint(ctxt->bso, "nosplit ");
Bprint(ctxt->bso, "size=%lld value=%lld", (vlong)s->size, (vlong)s->value);
// _cgo_init may update stackguard.
MOVL $runtime·g0(SB), BP
LEAL (-64*1024+104)(SP), BX
- MOVL BX, g_stackguard0(BP)
- MOVL BX, g_stackguard1(BP)
+ MOVL BX, g_stackguard(BP)
MOVL BX, (g_stack+stack_lo)(BP)
MOVL SP, (g_stack+stack_hi)(BP)
MOVL $runtime·g0(SB), CX
MOVL (g_stack+stack_lo)(CX), AX
ADDL $const__StackGuard, AX
- MOVL AX, g_stackguard0(CX)
- MOVL AX, g_stackguard1(CX)
+ MOVL AX, g_stackguard(CX)
// skip runtime·ldt0setup(SB) and tls test after _cgo_init for non-windows
CMPL runtime·iswindows(SB), $0
// _cgo_init may update stackguard.
MOVQ $runtime·g0(SB), DI
LEAQ (-64*1024+104)(SP), BX
- MOVQ BX, g_stackguard0(DI)
- MOVQ BX, g_stackguard1(DI)
+ MOVQ BX, g_stackguard(DI)
MOVQ BX, (g_stack+stack_lo)(DI)
MOVQ SP, (g_stack+stack_hi)(DI)
MOVQ $runtime·g0(SB), CX
MOVQ (g_stack+stack_lo)(CX), AX
ADDQ $const__StackGuard, AX
- MOVQ AX, g_stackguard0(CX)
- MOVQ AX, g_stackguard1(CX)
+ MOVQ AX, g_stackguard(CX)
CMPL runtime·iswindows(SB), $0
JEQ ok
// create istack out of the given (operating system) stack.
MOVL $runtime·g0(SB), DI
LEAL (-64*1024+104)(SP), BX
- MOVL BX, g_stackguard0(DI)
- MOVL BX, g_stackguard1(DI)
+ MOVL BX, g_stackguard(DI)
MOVL BX, (g_stack+stack_lo)(DI)
MOVL SP, (g_stack+stack_hi)(DI)
// create istack out of the OS stack
MOVW $(-8192+104)(R13), R0
- MOVW R0, g_stackguard0(g)
- MOVW R0, g_stackguard1(g)
+ MOVW R0, g_stackguard(g)
MOVW R0, (g_stack+stack_lo)(g)
MOVW R13, (g_stack+stack_hi)(g)
// update stackguard after _cgo_init
MOVW (g_stack+stack_lo)(g), R0
ADD $const__StackGuard, R0
- MOVW R0, g_stackguard0(g)
- MOVW R0, g_stackguard1(g)
+ MOVW R0, g_stackguard(g)
BL runtime·checkgoarm(SB)
BL runtime·check(SB)
MOVD $runtime·g0(SB), g
MOVD $(-64*1024), R31
ADD R31, R1, R3
- MOVD R3, g_stackguard0(g)
- MOVD R3, g_stackguard1(g)
+ MOVD R3, g_stackguard(g)
MOVD R3, (g_stack+stack_lo)(g)
MOVD R1, (g_stack+stack_hi)(g)
throw("runtime·unlock: lock count")
}
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
- gp.stackguard0 = stackPreempt
+ gp.stackguard = stackPreempt
}
}
throw("runtime·unlock: lock count")
}
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
- gp.stackguard0 = stackPreempt
+ gp.stackguard = stackPreempt
}
}
}
mp.mallocing = 1
if mp.curg != nil {
- mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad
+ mp.curg.stackguard = ^uintptr(0xfff) | 0xbad
}
}
}
mp.mallocing = 0
if mp.curg != nil {
- mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
+ mp.curg.stackguard = mp.curg.stack.lo + _StackGuard
}
// Note: one releasem for the acquirem just above.
// The other for the acquirem at start of malloc.
}
mp.mallocing = 0
if mp.curg != nil {
- mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
+ mp.curg.stackguard = mp.curg.stack.lo + _StackGuard
}
// Note: one releasem for the acquirem just above.
// The other for the acquirem at start of malloc.
sched.mcount++
checkmcount()
mpreinit(mp)
- if mp.gsignal != nil {
- mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
- }
// Add to allm so garbage collector doesn't free g->m
// when it is just in a register or thread-local storage.
}
_g_.m.locks--
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
+ _g_.stackguard = stackPreempt
}
}
if !gp.gcworkdone {
gp.preemptscan = true
gp.preempt = true
- gp.stackguard0 = stackPreempt
+ gp.stackguard = stackPreempt
}
// Unclaim.
gp.gcworkdone = true // scan is a noop
break
}
- if status == _Grunning && gp.stackguard0 == uintptr(stackPreempt) && notetsleep(&sched.stopnote, 100*1000) { // nanosecond arg
+ if status == _Grunning && gp.stackguard == uintptr(stackPreempt) && notetsleep(&sched.stopnote, 100*1000) { // nanosecond arg
noteclear(&sched.stopnote)
} else {
stopscanstart(gp)
}
_g_.m.locks--
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
+ _g_.stackguard = stackPreempt
}
}
}
// Initialize stack guards so that we can start calling
// both Go and C functions with stack growth prologues.
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
- _g_.stackguard1 = _g_.stackguard0
+ _g_.stackguard = _g_.stack.lo + _StackGuard
mstart1()
}
}
_g_.m.locks--
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
+ _g_.stackguard = stackPreempt
}
return mp
_g_ := getg()
_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ _g_.stackguard = _g_.stack.lo + _StackGuard
// Initialize this thread to use the m.
asminit()
casgstatus(gp, _Grunnable, _Grunning)
gp.waitsince = 0
gp.preempt = false
- gp.stackguard0 = gp.stack.lo + _StackGuard
+ gp.stackguard = gp.stack.lo + _StackGuard
_g_.m.p.schedtick++
_g_.m.curg = gp
gp.m = _g_.m
// (See details in comment above.)
// Catch calls that might, by replacing the stack guard with something that
// will trip any stack check and leaving a flag to tell newstack to die.
- _g_.stackguard0 = stackPreempt
+ _g_.stackguard = stackPreempt
_g_.throwsplit = true
// Leave SP around for GC and traceback.
// Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
// We set _StackGuard to StackPreempt so that first split stack check calls morestack.
// Morestack detects this case and throws.
- _g_.stackguard0 = stackPreempt
+ _g_.stackguard = stackPreempt
_g_.m.locks--
}
_g_.m.locks++ // see comment in entersyscall
_g_.throwsplit = true
- _g_.stackguard0 = stackPreempt // see comment in entersyscall
+ _g_.stackguard = stackPreempt // see comment in entersyscall
// Leave SP around for GC and traceback.
pc := getcallerpc(unsafe.Pointer(&dummy))
_g_.m.locks--
if _g_.preempt {
// restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
+ _g_.stackguard = stackPreempt
} else {
// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ _g_.stackguard = _g_.stack.lo + _StackGuard
}
_g_.throwsplit = false
return
// Code between fork and exec must not allocate memory nor even try to grow stack.
// Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
// runtime_AfterFork will undo this in parent process, but not in child.
- gp.stackguard0 = stackFork
+ gp.stackguard = stackFork
}
// Called from syscall package before fork.
gp := getg().m.curg
// See the comment in beforefork.
- gp.stackguard0 = gp.stack.lo + _StackGuard
+ gp.stackguard = gp.stack.lo + _StackGuard
hz := sched.profilehz
if hz != 0 {
systemstack(func() {
newg.stack = stackalloc(uint32(stacksize))
})
- newg.stackguard0 = newg.stack.lo + _StackGuard
- newg.stackguard1 = ^uintptr(0)
+ newg.stackguard = newg.stack.lo + _StackGuard
}
return newg
}
}
_g_.m.locks--
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
+ _g_.stackguard = stackPreempt
}
return newg
}
stackfree(gp.stack)
gp.stack.lo = 0
gp.stack.hi = 0
- gp.stackguard0 = 0
+ gp.stackguard = 0
}
gp.schedlink = _p_.gfree
systemstack(func() {
gp.stack = stackalloc(_FixedStack)
})
- gp.stackguard0 = gp.stack.lo + _StackGuard
+ gp.stackguard = gp.stack.lo + _StackGuard
} else {
if raceenabled {
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
gp.preempt = true
// Every call in a go routine checks for stack overflow by
- // comparing the current stack pointer to gp->stackguard0.
- // Setting gp->stackguard0 to StackPreempt folds
+ // comparing the current stack pointer to gp->stackguard.
+ // Setting gp->stackguard to StackPreempt folds
// preemption into the normal stack overflow check.
- gp.stackguard0 = stackPreempt
+ gp.stackguard = stackPreempt
return true
}
mp.locks--
if mp.locks == 0 && _g_.preempt {
// restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
+ _g_.stackguard = stackPreempt
}
}
type g struct {
// Stack parameters.
// stack describes the actual stack memory: [stack.lo, stack.hi).
- // stackguard0 is the stack pointer compared in the Go stack growth prologue.
+ // stackguard is the stack pointer compared in the Go stack growth prologue.
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
- // stackguard1 is the stack pointer compared in the C stack growth prologue.
- // It is stack.lo+StackGuard on g0 and gsignal stacks.
- // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
- stack stack // offset known to runtime/cgo
- stackguard0 uintptr // offset known to liblink
- stackguard1 uintptr // offset known to liblink
+ stack stack // offset known to runtime/cgo
+ stackguard uintptr // offset known to liblink
_panic *_panic // innermost panic - offset known to liblink
_defer *_defer // innermost defer
waitreason string // if status==gwaiting
schedlink *g
issystem bool // do not output in stack dump, ignore in deadlock detector
- preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
+ preempt bool // preemption signal, duplicates stackguard = stackpreempt
paniconfault bool // panic (instead of crash) on unexpected fault address
preemptscan bool // preempted g does scan for gc
gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
poisonStack = uintptrMask & 0x6868686868686868
// Goroutine preemption request.
- // Stored into g->stackguard0 to cause split stack check failure.
+ // Stored into g->stackguard to cause split stack check failure.
// Must be greater than any real sp.
// 0xfffffade in hex.
stackPreempt = uintptrMask & -1314
// Thread is forking.
- // Stored into g->stackguard0 to cause split stack check failure.
+ // Stored into g->stackguard to cause split stack check failure.
// Must be greater than any real sp.
stackFork = uintptrMask & -1234
)
// Swap out old stack for new one
gp.stack = new
- gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
+ gp.stackguard = new.lo + _StackGuard // NOTE: might clobber a preempt request
gp.sched.sp = new.hi - used
// free old stack
func newstack() {
thisg := getg()
// TODO: double check all gp. shouldn't be getg().
- if thisg.m.morebuf.g.stackguard0 == stackFork {
+ if thisg.m.morebuf.g.stackguard == stackFork {
throw("stack growth after fork")
}
if thisg.m.morebuf.g != thisg.m.curg {
writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
}
- if gp.stackguard0 == stackPreempt {
+ if gp.stackguard == stackPreempt {
if gp == thisg.m.g0 {
throw("runtime: preempt g0")
}
gcphasework(gp)
casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
casgstatus(gp, _Gwaiting, _Grunning)
- gp.stackguard0 = gp.stack.lo + _StackGuard
+ gp.stackguard = gp.stack.lo + _StackGuard
gp.preempt = false
gp.preemptscan = false // Tells the GC premption was successful.
gogo(&gp.sched) // never return
if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.gcing != 0 || thisg.m.p.status != _Prunning {
// Let the goroutine keep running for now.
// gp->preempt is set, so it will be preempted next time.
- gp.stackguard0 = gp.stack.lo + _StackGuard
+ gp.stackguard = gp.stack.lo + _StackGuard
casgstatus(gp, _Gwaiting, _Grunning)
gogo(&gp.sched) // never return
}
s = t
}
}
-
-//go:nosplit
-func morestackc() {
- systemstack(func() {
- throw("attempt to execute C code on Go stack")
- })
-}
)
// Goroutine preemption request.
-// Stored into g->stackguard0 to cause split stack check failure.
+// Stored into g->stackguard to cause split stack check failure.
// Must be greater than any real sp.
// 0xfffffade in hex.
const (
MOVL AX, (g_stack+stack_hi)(DX)
SUBL $(64*1024), AX // stack size
MOVL AX, (g_stack+stack_lo)(DX)
- MOVL AX, g_stackguard0(DX)
- MOVL AX, g_stackguard1(DX)
+ MOVL AX, g_stackguard(DX)
// Initialize procid from TOS struct.
MOVL _tos(SB), AX
MOVQ AX, (g_stack+stack_hi)(DX)
SUBQ $(64*1024), AX // stack size
MOVQ AX, (g_stack+stack_lo)(DX)
- MOVQ AX, g_stackguard0(DX)
- MOVQ AX, g_stackguard1(DX)
+ MOVQ AX, g_stackguard(DX)
// Initialize procid from TOS struct.
MOVQ _tos(SB), AX
SUBQ $(0x100000), AX // stack size
MOVQ AX, (g_stack+stack_lo)(DX)
ADDQ $const__StackGuard, AX
- MOVQ AX, g_stackguard0(DX)
- MOVQ AX, g_stackguard1(DX)
+ MOVQ AX, g_stackguard(DX)
// Someday the convention will be D is always cleared.
CLD
LEAL -8192(SP), CX
MOVL CX, (g_stack+stack_lo)(SP)
ADDL $const__StackGuard, CX
- MOVL CX, g_stackguard0(SP)
- MOVL CX, g_stackguard1(SP)
+ MOVL CX, g_stackguard(SP)
MOVL DX, (g_stack+stack_hi)(SP)
PUSHL 16(BP) // arg for handler
SUBL $(64*1024), AX // stack size
MOVL AX, (g_stack+stack_lo)(DX)
ADDL $const__StackGuard, AX
- MOVL AX, g_stackguard0(DX)
- MOVL AX, g_stackguard1(DX)
+ MOVL AX, g_stackguard(DX)
// Set up tls.
LEAL m_tls(CX), SI
LEAQ -8192(SP), CX
MOVQ CX, (g_stack+stack_lo)(SP)
ADDQ $const__StackGuard, CX
- MOVQ CX, g_stackguard0(SP)
- MOVQ CX, g_stackguard1(SP)
+ MOVQ CX, g_stackguard(SP)
MOVQ DX, (g_stack+stack_hi)(SP)
PUSHQ 32(BP) // arg for handler
SUBQ $(64*1024), AX // stack size
MOVQ AX, (g_stack+stack_lo)(DX)
ADDQ $const__StackGuard, AX
- MOVQ AX, g_stackguard0(DX)
- MOVQ AX, g_stackguard1(DX)
+ MOVQ AX, g_stackguard(DX)
// Set up tls.
LEAQ m_tls(CX), SI