"unsafe"
)
+const OldPageAllocator = oldPageAllocator
+
var Fadd64 = fadd64
var Fsub64 = fsub64
var Fmul64 = fmul64
slow.BySize[i].Frees = bySize[i].Frees
}
- for i := mheap_.free.start(0, 0); i.valid(); i = i.next() {
- slow.HeapReleased += uint64(i.span().released())
+ if oldPageAllocator {
+ for i := mheap_.free.start(0, 0); i.valid(); i = i.next() {
+ slow.HeapReleased += uint64(i.span().released())
+ }
+ } else {
+ for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
+ pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
+ slow.HeapReleased += uint64(pg) * pageSize
+ }
}
// Unused space in the current arena also counts as released space.
func PageBase(c ChunkIdx, pageIdx uint) uintptr {
return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
}
+
+type BitsMismatch struct {
+ Base uintptr
+ Got, Want uint64
+}
+
+func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
+ ok = true
+
+ // Run on the system stack to avoid stack growth allocation.
+ systemstack(func() {
+ getg().m.mallocing++
+
+ // Lock so that we can safely access the bitmap.
+ lock(&mheap_.lock)
+ chunkLoop:
+ for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
+ chunk := &mheap_.pages.chunks[i]
+ for j := 0; j < pallocChunkPages/64; j++ {
+ // Run over each 64-bit bitmap section and ensure
+ // scavenged is being cleared properly on allocation.
+ // If a used bit and scavenged bit are both set, that's
+ // an error, and could indicate a larger problem, or
+ // an accounting problem.
+ want := chunk.scavenged[j] &^ chunk.pallocBits[j]
+ got := chunk.scavenged[j]
+ if want != got {
+ ok = false
+ if n >= len(mismatches) {
+ break chunkLoop
+ }
+ mismatches[n] = BitsMismatch{
+ Base: chunkBase(i) + uintptr(j)*64*pageSize,
+ Got: got,
+ Want: want,
+ }
+ n++
+ }
+ }
+ }
+ unlock(&mheap_.lock)
+
+ getg().m.mallocing--
+ })
+ return
+}
// lock must only be acquired on the system stack, otherwise a g
// could self-deadlock if its stack grows with the lock held.
lock mutex
- free mTreap // free spans
- sweepgen uint32 // sweep generation, see comment in mspan
- sweepdone uint32 // all spans are swept
- sweepers uint32 // number of active sweepone calls
+ free mTreap // free spans
+ pages pageAlloc // page allocation data structure
+ sweepgen uint32 // sweep generation, see comment in mspan
+ sweepdone uint32 // all spans are swept
+ sweepers uint32 // number of active sweepone calls
// allspans is a slice of all mspans ever created. Each mspan
// appears exactly once.
for i := range h.central {
h.central[i].mcentral.init(spanClass(i))
}
+
+ if !oldPageAllocator {
+ h.pages.init(&h.lock, &memstats.gc_sys)
+ }
}
// reclaim sweeps and reclaims at least npage pages into the heap.
// The returned span has been removed from the
// free structures, but its state is still mSpanFree.
func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
+ if oldPageAllocator {
+ return h.allocSpanLockedOld(npage, stat)
+ }
+ base, scav := h.pages.alloc(npage)
+ if base != 0 {
+ goto HaveBase
+ }
+ if !h.grow(npage) {
+ return nil
+ }
+ base, scav = h.pages.alloc(npage)
+ if base != 0 {
+ goto HaveBase
+ }
+ throw("grew heap, but no adequate free space found")
+
+HaveBase:
+ if scav != 0 {
+ // sysUsed all the pages that are actually available
+ // in the span.
+ sysUsed(unsafe.Pointer(base), npage*pageSize)
+ memstats.heap_released -= uint64(scav)
+ }
+
+ s := (*mspan)(h.spanalloc.alloc())
+ s.init(base, npage)
+ // TODO(mknyszek): Add code to compute whether the newly-allocated
+ // region needs to be zeroed.
+ s.needzero = 1
+ h.setSpans(s.base(), npage, s)
+
+ *stat += uint64(npage << _PageShift)
+ memstats.heap_idle -= uint64(npage << _PageShift)
+
+ return s
+}
+
+// Allocates a span of the given size. h must be locked.
+// The returned span has been removed from the
+// free structures, but its state is still mSpanFree.
+func (h *mheap) allocSpanLockedOld(npage uintptr, stat *uint64) *mspan {
t := h.free.find(npage)
if t.valid() {
goto HaveSpan
// h must be locked.
func (h *mheap) grow(npage uintptr) bool {
ask := npage << _PageShift
+ if !oldPageAllocator {
+ // We must grow the heap in whole palloc chunks.
+ ask = alignUp(ask, pallocChunkBytes)
+ }
+ totalGrowth := uintptr(0)
nBase := alignUp(h.curArena.base+ask, physPageSize)
if nBase > h.curArena.end {
// Not enough room in the current arena. Allocate more
// remains of the current space and switch to
// the new space. This should be rare.
if size := h.curArena.end - h.curArena.base; size != 0 {
- h.growAddSpan(unsafe.Pointer(h.curArena.base), size)
+ if oldPageAllocator {
+ h.growAddSpan(unsafe.Pointer(h.curArena.base), size)
+ } else {
+ h.pages.grow(h.curArena.base, size)
+ }
+ totalGrowth += size
}
// Switch to the new space.
h.curArena.base = uintptr(av)
// Grow into the current arena.
v := h.curArena.base
h.curArena.base = nBase
- h.growAddSpan(unsafe.Pointer(v), nBase-v)
+ if oldPageAllocator {
+ h.growAddSpan(unsafe.Pointer(v), nBase-v)
+ } else {
+ h.pages.grow(v, nBase-v)
+ totalGrowth += nBase - v
+
+ // We just caused a heap growth, so scavenge down what will soon be used.
+ // By scavenging inline we deal with the failure to allocate out of
+ // memory fragments by scavenging the memory fragments that are least
+ // likely to be re-used.
+ if retained := heapRetained(); retained+uint64(totalGrowth) > h.scavengeGoal {
+ todo := totalGrowth
+ if overage := uintptr(retained + uint64(totalGrowth) - h.scavengeGoal); todo > overage {
+ todo = overage
+ }
+ h.pages.scavenge(todo, true)
+ }
+ }
return true
}
if acctidle {
memstats.heap_idle += uint64(s.npages << _PageShift)
}
- s.state.set(mSpanFree)
- // Coalesce span with neighbors.
- h.coalesce(s)
+ if oldPageAllocator {
+ s.state.set(mSpanFree)
- // Insert s into the treap.
- h.free.insert(s)
+ // Coalesce span with neighbors.
+ h.coalesce(s)
+
+ // Insert s into the treap.
+ h.free.insert(s)
+ return
+ }
+
+ // Mark the space as free.
+ h.pages.free(s.base(), s.npages)
+
+ // Free the span structure. We no longer have a use for it.
+ s.state.set(mSpanDead)
+ h.spanalloc.free(unsafe.Pointer(s))
}
// scavengeSplit takes t.span() and attempts to split off a span containing size
gp := getg()
gp.m.mallocing++
lock(&h.lock)
- released := h.scavengeLocked(^uintptr(0))
+ var released uintptr
+ if oldPageAllocator {
+ released = h.scavengeLocked(^uintptr(0))
+ } else {
+ released = h.pages.scavenge(^uintptr(0), true)
+ }
unlock(&h.lock)
gp.m.mallocing--