_RootSpans = 3
_RootFlushCaches = 4
_RootCount = 5
+
+ // firstStackBarrierOffset is the approximate byte offset at
+ // which to place the first stack barrier from the current SP.
+ // This is a lower bound on how much stack will have to be
+ // re-scanned during mark termination. Subsequent barriers are
+ // placed at firstStackBarrierOffset * 2^n offsets.
+ //
+ // For debugging, this can be set to 0, which will install a
+ // stack barrier at every frame. If you do this, you may also
+ // have to raise _StackMin, since the stack barrier
+ // bookkeeping will use a large amount of each stack.
+ firstStackBarrierOffset = 1024
+ debugStackBarrier = false
)
// heapminimum is the minimum heap size at which to trigger GC.
}
}
+// gcMaxStackBarriers returns the maximum number of stack barriers
+// that can be installed in a stack of stackSize bytes.
+func gcMaxStackBarriers(stackSize int) (n int) {
+ if firstStackBarrierOffset == 0 {
+ // Special debugging case for inserting stack barriers
+ // at every frame. Steal half of the stack for the
+ // []stkbar. Technically, if the stack were to consist
+ // solely of return PCs we would need two thirds of
+ // the stack, but stealing that much breaks things and
+ // this doesn't happen in practice.
+ return stackSize / 2 / int(unsafe.Sizeof(stkbar{}))
+ }
+
+ offset := firstStackBarrierOffset
+ for offset < stackSize {
+ n++
+ offset *= 2
+ }
+ return n + 1
+}
+
// TODO(austin): Can we consolidate the gcDrain* functions?
// gcDrain scans objects in work buffers, blackening grey
if stacksize >= 0 {
stacksize = round2(_StackSystem + stacksize)
systemstack(func() {
- newg.stack = stackalloc(uint32(stacksize))
+ newg.stack, newg.stkbar = stackalloc(uint32(stacksize))
})
newg.stackguard0 = newg.stack.lo + _StackGuard
newg.stackguard1 = ^uintptr(0)
gp.stack.lo = 0
gp.stack.hi = 0
gp.stackguard0 = 0
+ gp.stkbar = nil
+ gp.stkbarPos = 0
}
gp.schedlink.set(_p_.gfree)
if gp.stack.lo == 0 {
// Stack was deallocated in gfput. Allocate a new one.
systemstack(func() {
- gp.stack = stackalloc(_FixedStack)
+ gp.stack, gp.stkbar = stackalloc(_FixedStack)
})
gp.stackguard0 = gp.stack.lo + _StackGuard
gp.stackAlloc = _FixedStack
hi uintptr
}
+// stkbar records the state of a G's stack barrier.
+type stkbar struct {
+ savedLRPtr uintptr // location overwritten by stack barrier PC
+ savedLRVal uintptr // value overwritten at savedLRPtr
+}
+
type g struct {
// Stack parameters.
// stack describes the actual stack memory: [stack.lo, stack.hi).
sched gobuf
syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
+ stkbar []stkbar // stack barriers, from low to high
+ stkbarPos uintptr // index of lowest stack barrier not hit
param unsafe.Pointer // passed parameter on wakeup
atomicstatus uint32
goid int64
unlock(&stackpoolmu)
}
-func stackalloc(n uint32) stack {
+func stackalloc(n uint32) (stack, []stkbar) {
// Stackalloc must be called on scheduler stack, so that we
// never try to grow the stack during the code that stackalloc runs.
// Doing so would cause a deadlock (issue 1547).
print("stackalloc ", n, "\n")
}
+ // Compute the size of stack barrier array.
+ maxstkbar := gcMaxStackBarriers(int(n))
+ nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar)
+
if debug.efence != 0 || stackFromSystem != 0 {
v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
if v == nil {
throw("out of memory (stackalloc)")
}
- return stack{uintptr(v), uintptr(v) + uintptr(n)}
+ top := uintptr(n) - nstkbar
+ stkbarSlice := slice{add(v, top), 0, maxstkbar}
+ return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
}
// Small stacks are allocated with a fixed-size free-list allocator.
if stackDebug >= 1 {
print(" allocated ", v, "\n")
}
- return stack{uintptr(v), uintptr(v) + uintptr(n)}
+ top := uintptr(n) - nstkbar
+ stkbarSlice := slice{add(v, top), 0, maxstkbar}
+ return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
}
func stackfree(stk stack, n uintptr) {
used := old.hi - gp.sched.sp
// allocate new stack
- new := stackalloc(uint32(newsize))
+ new, newstkbar := stackalloc(uint32(newsize))
if stackPoisonCopy != 0 {
fillstack(new, 0xfd)
}
}
memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
+ // copy old stack barriers to new stack barrier array
+ newstkbar = newstkbar[:len(gp.stkbar)]
+ copy(newstkbar, gp.stkbar)
+
// Swap out old stack for new one
gp.stack = new
gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
gp.sched.sp = new.hi - used
oldsize := gp.stackAlloc
gp.stackAlloc = newsize
+ gp.stkbar = newstkbar
// free old stack
if stackPoisonCopy != 0 {
stackfree(gp.stack, gp.stackAlloc)
gp.stack.lo = 0
gp.stack.hi = 0
+ gp.stkbar = nil
+ gp.stkbarPos = 0
}
return
}