asanunpoison(x, userSize)
}
+ // Note cache c only valid while m acquired; see #47302
+ //
+ // N.B. Use the full size because that matches how the GC
+ // will update the mem profile on the "free" side.
+ //
// TODO(mknyszek): We should really count the header as part
// of gc_sys or something. The code below just pretends it is
// internal fragmentation and matches the GC's accounting by
// using the whole allocation slot.
fullSize := span.elemsize
- if rate := MemProfileRate; rate > 0 {
- // Note cache c only valid while m acquired; see #47302
- //
- // N.B. Use the full size because that matches how the GC
- // will update the mem profile on the "free" side.
- if rate != 1 && fullSize < c.nextSample {
- c.nextSample -= fullSize
- } else {
- profilealloc(mp, x, fullSize)
- }
+ c.nextSample -= int64(fullSize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, fullSize)
}
mp.mallocing = 0
releasem(mp)
return newarray(typ, n)
}
+// profilealloc resets the current mcache's nextSample counter and
+// records a memory profile sample.
+//
+// The caller must be non-preemptible and have a P.
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
c := getMCache(mp)
if c == nil {
throw("profilealloc called without a P or outside bootstrapping")
}
+ c.memProfRate = MemProfileRate
c.nextSample = nextSample()
mProf_Malloc(mp, x, size)
}
// processes, the distance between two samples follows the exponential
// distribution (exp(MemProfileRate)), so the best return value is a random
// number taken from an exponential distribution whose mean is MemProfileRate.
-func nextSample() uintptr {
+func nextSample() int64 {
+ if MemProfileRate == 0 {
+ // Basically never sample.
+ return maxInt64
+ }
if MemProfileRate == 1 {
- // Callers assign our return value to
- // mcache.next_sample, but next_sample is not used
- // when the rate is 1. So avoid the math below and
- // just return something.
+ // Sample immediately.
return 0
}
if GOOS == "plan9" {
}
}
- return uintptr(fastexprand(MemProfileRate))
+ return int64(fastexprand(MemProfileRate))
}
// fastexprand returns a random number from an exponential distribution with
// nextSampleNoFP is similar to nextSample, but uses older,
// simpler code to avoid floating point.
-func nextSampleNoFP() uintptr {
+func nextSampleNoFP() int64 {
// Set first allocation sample size.
rate := MemProfileRate
if rate > 0x3fffffff { // make 2*rate not overflow
rate = 0x3fffffff
}
if rate != 0 {
- return uintptr(cheaprandn(uint32(2 * rate)))
+ return int64(cheaprandn(uint32(2 * rate)))
}
return 0
}
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
- nextSample uintptr // trigger heap sample after allocating this many bytes
- scanAlloc uintptr // bytes of scannable heap allocated
+ nextSample int64 // trigger heap sample after allocating this many bytes
+ memProfRate int // cached mem profile rate, used to detect changes
+ scanAlloc uintptr // bytes of scannable heap allocated
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.go.