This reverts commit CL 486381.
Submitted out of order and breaks bootstrap.
Change-Id: Ia472111cb966e884a48f8ee3893b3bf4b4f4f875
Reviewed-on: https://go-review.googlesource.com/c/go/+/486915
Reviewed-by: David Chase <drchase@google.com>
TryBot-Bypass: Austin Clements <austin@google.com>
// update stackguard after _cgo_init
MOVL $runtime·g0(SB), CX
MOVL (g_stack+stack_lo)(CX), AX
- ADDL $const_stackGuard, AX
+ ADDL $const__StackGuard, AX
MOVL AX, g_stackguard0(CX)
MOVL AX, g_stackguard1(CX)
// update stackguard after _cgo_init
MOVQ $runtime·g0(SB), CX
MOVQ (g_stack+stack_lo)(CX), AX
- ADDQ $const_stackGuard, AX
+ ADDQ $const__StackGuard, AX
MOVQ AX, g_stackguard0(CX)
MOVQ AX, g_stackguard1(CX)
// update stackguard after _cgo_init
MOVW (g_stack+stack_lo)(g), R0
- ADD $const_stackGuard, R0
+ ADD $const__StackGuard, R0
MOVW R0, g_stackguard0(g)
MOVW R0, g_stackguard1(g)
BL runtime·save_g(SB)
// update stackguard after _cgo_init
MOVD (g_stack+stack_lo)(g), R0
- ADD $const_stackGuard, R0
+ ADD $const__StackGuard, R0
MOVD R0, g_stackguard0(g)
MOVD R0, g_stackguard1(g)
nocgo:
// update stackguard after _cgo_init
MOVV (g_stack+stack_lo)(g), R19
- ADDV $const_stackGuard, R19
+ ADDV $const__StackGuard, R19
MOVV R19, g_stackguard0(g)
MOVV R19, g_stackguard1(g)
nocgo:
// update stackguard after _cgo_init
MOVV (g_stack+stack_lo)(g), R1
- ADDV $const_stackGuard, R1
+ ADDV $const__StackGuard, R1
MOVV R1, g_stackguard0(g)
MOVV R1, g_stackguard1(g)
nocgo:
// update stackguard after _cgo_init
MOVW (g_stack+stack_lo)(g), R1
- ADD $const_stackGuard, R1
+ ADD $const__StackGuard, R1
MOVW R1, g_stackguard0(g)
MOVW R1, g_stackguard1(g)
nocgo:
// update stackguard after _cgo_init
MOVD (g_stack+stack_lo)(g), R3
- ADD $const_stackGuard, R3
+ ADD $const__StackGuard, R3
MOVD R3, g_stackguard0(g)
MOVD R3, g_stackguard1(g)
nocgo:
// update stackguard after _cgo_init
MOV (g_stack+stack_lo)(g), T0
- ADD $const_stackGuard, T0
+ ADD $const__StackGuard, T0
MOV T0, g_stackguard0(g)
MOV T0, g_stackguard1(g)
nocgo:
// update stackguard after _cgo_init
MOVD (g_stack+stack_lo)(g), R2
- ADD $const_stackGuard, R2
+ ADD $const__StackGuard, R2
MOVD R2, g_stackguard0(g)
MOVD R2, g_stackguard1(g)
throw("bad g0 stack")
}
g0.stack.lo = base
- g0.stackguard0 = g0.stack.lo + stackGuard
+ g0.stackguard0 = g0.stack.lo + _StackGuard
g0.stackguard1 = g0.stackguard0
// Sanity check the SP.
stackcheck()
// _Gscan bit and thus own the stack.
gp.preemptStop = false
gp.preempt = false
- gp.stackguard0 = gp.stack.lo + stackGuard
+ gp.stackguard0 = gp.stack.lo + _StackGuard
// The goroutine was already at a safe-point
// and we've now locked that in.
mpreinit(mp)
if mp.gsignal != nil {
- mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
+ mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
}
// Add to allm so garbage collector doesn't free g->m
}
// Initialize stack guard so that we can start calling regular
// Go code.
- gp.stackguard0 = gp.stack.lo + stackGuard
+ gp.stackguard0 = gp.stack.lo + _StackGuard
// This is the g0, so we can also call go:systemstack
// functions, which check stackguard1.
gp.stackguard1 = gp.stackguard0
gp := getg()
gp.stack.hi = getcallersp() + 1024
gp.stack.lo = getcallersp() - 32*1024
- gp.stackguard0 = gp.stack.lo + stackGuard
+ gp.stackguard0 = gp.stack.lo + _StackGuard
// Initialize this thread to use the m.
asminit()
casgstatus(gp, _Grunnable, _Grunning)
gp.waitsince = 0
gp.preempt = false
- gp.stackguard0 = gp.stack.lo + stackGuard
+ gp.stackguard0 = gp.stack.lo + _StackGuard
if !inheritTime {
mp.p.ptr().schedtick++
}
// restore the preemption request in case we've cleared it in newstack
gp.stackguard0 = stackPreempt
} else {
- // otherwise restore the real stackGuard, we've spoiled it in entersyscall/entersyscallblock
- gp.stackguard0 = gp.stack.lo + stackGuard
+ // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
+ gp.stackguard0 = gp.stack.lo + _StackGuard
}
gp.throwsplit = false
// This function is called before fork in syscall package.
// Code between fork and exec must not allocate memory nor even try to grow stack.
- // Here we spoil g.stackguard0 to reliably detect any attempts to grow stack.
+ // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
// runtime_AfterFork will undo this in parent process, but not in child.
gp.stackguard0 = stackFork
}
gp := getg().m.curg
// See the comments in beforefork.
- gp.stackguard0 = gp.stack.lo + stackGuard
+ gp.stackguard0 = gp.stack.lo + _StackGuard
msigrestore(gp.m.sigmask)
func malg(stacksize int32) *g {
newg := new(g)
if stacksize >= 0 {
- stacksize = round2(stackSystem + stacksize)
+ stacksize = round2(_StackSystem + stacksize)
systemstack(func() {
newg.stack = stackalloc(uint32(stacksize))
})
- newg.stackguard0 = newg.stack.lo + stackGuard
+ newg.stackguard0 = newg.stack.lo + _StackGuard
newg.stackguard1 = ^uintptr(0)
// Clear the bottom word of the stack. We record g
// there on gsignal stack during VDSO on ARM and ARM64.
pp := mp.p.ptr()
newg := gfget(pp)
if newg == nil {
- newg = malg(stackMin)
+ newg = malg(_StackMin)
casgstatus(newg, _Gidle, _Gdead)
allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
}
systemstack(func() {
gp.stack = stackalloc(startingStackSize)
})
- gp.stackguard0 = gp.stack.lo + stackGuard
+ gp.stackguard0 = gp.stack.lo + _StackGuard
} else {
if raceenabled {
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
testAtomic64()
- if fixedStack != round2(fixedStack) {
+ if _FixedStack != round2(_FixedStack) {
throw("FixedStack is not power-of-2")
}
stsp := uintptr(unsafe.Pointer(st.ss_sp))
gp.m.gsignal.stack.lo = stsp
gp.m.gsignal.stack.hi = stsp + st.ss_size
- gp.m.gsignal.stackguard0 = stsp + stackGuard
- gp.m.gsignal.stackguard1 = stsp + stackGuard
+ gp.m.gsignal.stackguard0 = stsp + _StackGuard
+ gp.m.gsignal.stackguard1 = stsp + _StackGuard
}
// restoreGsignalStack restores the gsignal stack to the value it had
// g0 stack bounds so we have room to print the traceback. If
// this somehow overflows the stack, the OS will trap it.
g0.stack.lo = 0
- g0.stackguard0 = g0.stack.lo + stackGuard
+ g0.stackguard0 = g0.stack.lo + _StackGuard
g0.stackguard1 = g0.stackguard0
print("Exception ", hex(info.exceptioncode), " ", hex(info.exceptioninformation[0]), " ", hex(info.exceptioninformation[1]), " ", hex(r.ip()), "\n")
*/
const (
- // stackSystem is a number of additional bytes to add
+ // StackSystem is a number of additional bytes to add
// to each stack below the usual guard area for OS-specific
// purposes like signal handling. Used on Windows, Plan 9,
// and iOS because they do not use a separate stack.
- stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
+ _StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
// The minimum size of stack used by Go code
- stackMin = 2048
+ _StackMin = 2048
// The minimum stack size to allocate.
- // The hackery here rounds fixedStack0 up to a power of 2.
- fixedStack0 = stackMin + stackSystem
- fixedStack1 = fixedStack0 - 1
- fixedStack2 = fixedStack1 | (fixedStack1 >> 1)
- fixedStack3 = fixedStack2 | (fixedStack2 >> 2)
- fixedStack4 = fixedStack3 | (fixedStack3 >> 4)
- fixedStack5 = fixedStack4 | (fixedStack4 >> 8)
- fixedStack6 = fixedStack5 | (fixedStack5 >> 16)
- fixedStack = fixedStack6 + 1
+ // The hackery here rounds FixedStack0 up to a power of 2.
+ _FixedStack0 = _StackMin + _StackSystem
+ _FixedStack1 = _FixedStack0 - 1
+ _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
+ _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
+ _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
+ _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
+ _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
+ _FixedStack = _FixedStack6 + 1
// stackNosplit is the maximum number of bytes that a chain of NOSPLIT
// functions can use.
// The guard leaves enough room for a stackNosplit chain of NOSPLIT calls
// plus one stackSmall frame plus stackSystem bytes for the OS.
// This arithmetic must match that in cmd/internal/objabi/stack.go:StackLimit.
- stackGuard = stackNosplit + stackSystem + abi.StackSmall
+ _StackGuard = stackNosplit + _StackSystem + abi.StackSmall
)
const (
throw("bad manualFreeList")
}
osStackAlloc(s)
- s.elemsize = fixedStack << order
+ s.elemsize = _FixedStack << order
for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
x := gclinkptr(s.base() + i)
x.ptr().next = s.manualFreeList
x := stackpoolalloc(order)
x.ptr().next = list
list = x
- size += fixedStack << order
+ size += _FixedStack << order
}
unlock(&stackpool[order].item.mu)
c.stackcache[order].list = list
y := x.ptr().next
stackpoolfree(x, order)
x = y
- size -= fixedStack << order
+ size -= _FixedStack << order
}
unlock(&stackpool[order].item.mu)
c.stackcache[order].list = x
// If we need a stack of a bigger size, we fall back on allocating
// a dedicated span.
var v unsafe.Pointer
- if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
+ if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
- for n2 > fixedStack {
+ for n2 > _FixedStack {
order++
n2 >>= 1
}
if asanenabled {
asanpoison(v, n)
}
- if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
+ if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
- for n2 > fixedStack {
+ for n2 > _FixedStack {
order++
n2 >>= 1
}
// Swap out old stack for new one
gp.stack = new
- gp.stackguard0 = new.lo + stackGuard // NOTE: might clobber a preempt request
+ gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
gp.sched.sp = new.hi - used
gp.stktopsp += adjinfo.delta
if !canPreemptM(thisg.m) {
// Let the goroutine keep running for now.
// gp->preempt is set, so it will be preempted next time.
- gp.stackguard0 = gp.stack.lo + stackGuard
+ gp.stackguard0 = gp.stack.lo + _StackGuard
gogo(&gp.sched) // never return
}
}
// recheck the bounds on return.)
if f := findfunc(gp.sched.pc); f.valid() {
max := uintptr(funcMaxSPDelta(f))
- needed := max + stackGuard
+ needed := max + _StackGuard
used := gp.stack.hi - gp.sched.sp
for newsize-used < needed {
newsize *= 2
newsize := oldsize / 2
// Don't shrink the allocation below the minimum-sized stack
// allocation.
- if newsize < fixedStack {
+ if newsize < _FixedStack {
return
}
// Compute how much of the stack is currently in use and only
// It is a power of 2, and between _FixedStack and maxstacksize, inclusive.
// startingStackSize is updated every GC by tracking the average size of
// stacks scanned during the GC.
-var startingStackSize uint32 = fixedStack
+var startingStackSize uint32 = _FixedStack
func gcComputeStartingStackSize() {
if debug.adaptivestackstart == 0 {
p.scannedStacks = 0
}
if scannedStacks == 0 {
- startingStackSize = fixedStack
+ startingStackSize = _FixedStack
return
}
- avg := scannedStackSize/scannedStacks + stackGuard
- // Note: we add stackGuard to ensure that a goroutine that
+ avg := scannedStackSize/scannedStacks + _StackGuard
+ // Note: we add _StackGuard to ensure that a goroutine that
// uses the average space will not trigger a growth.
if avg > uint64(maxstacksize) {
avg = uint64(maxstacksize)
}
- if avg < fixedStack {
- avg = fixedStack
+ if avg < _FixedStack {
+ avg = _FixedStack
}
// Note: maxstacksize fits in 30 bits, so avg also does.
startingStackSize = uint32(round2(int32(avg)))
MOVD R3, (g_stack+stack_hi)(g)
SUB $(const_threadStackSize), R3 // stack size
MOVD R3, (g_stack+stack_lo)(g)
- ADD $const_stackGuard, R3
+ ADD $const__StackGuard, R3
MOVD R3, g_stackguard0(g)
MOVD R3, g_stackguard1(g)
MOVQ AX, (g_stack+stack_hi)(DX)
SUBQ $(0x100000), AX // stack size
MOVQ AX, (g_stack+stack_lo)(DX)
- ADDQ $const_stackGuard, AX
+ ADDQ $const__StackGuard, AX
MOVQ AX, g_stackguard0(DX)
MOVQ AX, g_stackguard1(DX)
MOVL AX, (g_stack+stack_hi)(DX)
SUBL $(64*1024), AX // initial stack size (adjusted later)
MOVL AX, (g_stack+stack_lo)(DX)
- ADDL $const_stackGuard, AX
+ ADDL $const__StackGuard, AX
MOVL AX, g_stackguard0(DX)
MOVL AX, g_stackguard1(DX)
MOVQ AX, (g_stack+stack_hi)(DX)
SUBQ $(64*1024), AX // initial stack size (adjusted later)
MOVQ AX, (g_stack+stack_lo)(DX)
- ADDQ $const_stackGuard, AX
+ ADDQ $const__StackGuard, AX
MOVQ AX, g_stackguard0(DX)
MOVQ AX, g_stackguard1(DX)