package runtime
import (
+ "math/bits"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
+ for _, p := range allp {
+ pg := bits.OnesCount64(p.pcache.scav)
+ slow.HeapReleased += uint64(pg) * pageSize
+ }
// Unused space in the current arena also counts as released space.
slow.HeapReleased += uint64(mheap_.curArena.end - mheap_.curArena.base)
})
return
}
+
+func PageCachePagesLeaked() (leaked uintptr) {
+ stopTheWorld("PageCachePagesLeaked")
+
+ // Walk over destroyed Ps and look for unflushed caches.
+ deadp := allp[len(allp):cap(allp)]
+ for _, p := range deadp {
+ // Since we're going past len(allp) we may see nil Ps.
+ // Just ignore them.
+ if p != nil {
+ leaked += uintptr(bits.OnesCount64(p.pcache.cache))
+ }
+ }
+
+ startTheWorld()
+ return
+}
gp := getg()
base, scav := uintptr(0), uintptr(0)
- // Try to allocate a cached span.
- s = h.tryAllocMSpan()
+ // If the allocation is small enough, try the page cache!
+ pp := gp.m.p.ptr()
+ if pp != nil && npages < pageCachePages/4 {
+ c := &pp.pcache
- // We failed to do what we need to do without the lock.
- lock(&h.lock)
+ // If the cache is empty, refill it.
+ if c.empty() {
+ lock(&h.lock)
+ *c = h.pages.allocToCache()
+ unlock(&h.lock)
+ }
- // Try to acquire a base address.
- base, scav = h.pages.alloc(npages)
- if base != 0 {
- goto HaveBase
- }
- if !h.grow(npages) {
- unlock(&h.lock)
- return nil
- }
- base, scav = h.pages.alloc(npages)
- if base != 0 {
- goto HaveBase
+ // Try to allocate from the cache.
+ base, scav = c.alloc(npages)
+ if base != 0 {
+ s = h.tryAllocMSpan()
+
+ if s != nil && gcBlackenEnabled == 0 && (manual || spanclass.sizeclass() != 0) {
+ goto HaveSpan
+ }
+ // We're either running duing GC, failed to acquire a mspan,
+ // or the allocation is for a large object. This means we
+ // have to lock the heap and do a bunch of extra work,
+ // so go down the HaveBaseLocked path.
+ //
+ // We must do this during GC to avoid skew with heap_scan
+ // since we flush mcache stats whenever we lock.
+ //
+ // TODO(mknyszek): It would be nice to not have to
+ // lock the heap if it's a large allocation, but
+ // it's fine for now. The critical section here is
+ // short and large object allocations are relatively
+ // infrequent.
+ }
}
- throw("grew heap, but no adequate free space found")
-HaveBase:
+ // For one reason or another, we couldn't get the
+ // whole job done without the heap lock.
+ lock(&h.lock)
+
+ if base == 0 {
+ // Try to acquire a base address.
+ base, scav = h.pages.alloc(npages)
+ if base == 0 {
+ if !h.grow(npages) {
+ unlock(&h.lock)
+ return nil
+ }
+ base, scav = h.pages.alloc(npages)
+ if base == 0 {
+ throw("grew heap, but no adequate free space found")
+ }
+ }
+ }
if s == nil {
// We failed to get an mspan earlier, so grab
// one now that we have the heap lock.
}
unlock(&h.lock)
- // Initialize the span.
+HaveSpan:
+ // At this point, both s != nil and base != 0, and the heap
+ // lock is no longer held. Initialize the span.
s.init(base, npages)
if h.allocNeedsZero(base, npages) {
s.needzero = 1