}
// Called by malloc to record a profiled block.
-func mProf_Malloc(p unsafe.Pointer, size uintptr) {
- var stk [maxStack]uintptr
- nstk := callers(4, stk[:])
-
+func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) {
+ nstk := callers(4, mp.profStack)
index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
- b := stkbucket(memProfile, size, stk[:nstk], true)
- mp := b.mp()
- mpc := &mp.future[index]
+ b := stkbucket(memProfile, size, mp.profStack[:nstk], true)
+ mr := b.mp()
+ mpc := &mr.future[index]
lock(&profMemFutureLock[index])
mpc.allocs++
}
func saveblockevent(cycles, rate int64, skip int, which bucketType) {
- gp := getg()
var nstk int
- var stk [maxStack]uintptr
+ gp := getg()
+ mp := acquirem() // we must not be preempted while accessing profstack
if gp.m.curg == nil || gp.m.curg == gp {
- nstk = callers(skip, stk[:])
+ nstk = callers(skip, mp.profStack)
} else {
- nstk = gcallers(gp.m.curg, skip, stk[:])
+ nstk = gcallers(gp.m.curg, skip, mp.profStack)
}
- saveBlockEventStack(cycles, rate, stk[:nstk], which)
+ saveBlockEventStack(cycles, rate, mp.profStack[:nstk], which)
+ releasem(mp)
}
// lockTimer assists with profiling contention on runtime-internal locks.
}
type mLockProfile struct {
- waitTime atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank
- stack [maxStack]uintptr // stack that experienced contention in runtime.lockWithRank
- pending uintptr // *mutex that experienced contention (to be traceback-ed)
- cycles int64 // cycles attributable to "pending" (if set), otherwise to "stack"
- cyclesLost int64 // contention for which we weren't able to record a call stack
- disabled bool // attribute all time to "lost"
+ waitTime atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank
+ stack []uintptr // stack that experienced contention in runtime.lockWithRank
+ pending uintptr // *mutex that experienced contention (to be traceback-ed)
+ cycles int64 // cycles attributable to "pending" (if set), otherwise to "stack"
+ cyclesLost int64 // contention for which we weren't able to record a call stack
+ disabled bool // attribute all time to "lost"
}
func (prof *mLockProfile) recordLock(cycles int64, l *mutex) {
systemstack(func() {
var u unwinder
u.initAt(pc, sp, 0, gp, unwindSilentErrors|unwindJumpStack)
- nstk = tracebackPCs(&u, skip, prof.stack[:])
+ nstk = tracebackPCs(&u, skip, prof.stack)
})
if nstk < len(prof.stack) {
prof.stack[nstk] = 0
if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
mp.cgoCallers = new(cgoCallers)
}
+ mProfStackInit(mp)
+}
+
+// mProfStackInit is used to eagilery initialize stack trace buffers for
+// profiling. Lazy allocation would have to deal with reentrancy issues in
+// malloc and runtime locks for mLockProfile.
+// TODO(mknyszek): Implement lazy allocation if this becomes a problem.
+func mProfStackInit(mp *m) {
+ mp.profStack = make([]uintptr, maxStack)
+ mp.mLockProfile.stack = make([]uintptr, maxStack)
}
func (mp *m) becomeSpinning() {