stackFromSystem = 0 // allocate stacks from system memory instead of the heap
stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
-
- stackCache = 1
+ stackNoCache = 0 // disable per-P small stack caches
// check the BP links during traceback.
debugCheckBP = false
// If we need a stack of a bigger size, we fall back on allocating
// a dedicated span.
var v unsafe.Pointer
- if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
+ if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
for n2 > _FixedStack {
}
var x gclinkptr
c := thisg.m.mcache
- if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
+ if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
// c == nil can happen in the guts of exitsyscall or
// procresize. Just get a stack from the global pool.
// Also don't touch stackcache during gc
if msanenabled {
msanfree(v, n)
}
- if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
+ if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
for n2 > _FixedStack {
}
x := gclinkptr(v)
c := gp.m.mcache
- if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
+ if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
lock(&stackpoolmu)
stackpoolfree(x, order)
unlock(&stackpoolmu)