//
// Returns the amount of memory scavenged in bytes.
//
-// s.mheapLock must be held, but may be temporarily released if
+// p.mheapLock must be held, but may be temporarily released if
// mayUnlock == true.
//
-// Must run on the system stack because s.mheapLock must be held.
+// Must run on the system stack because p.mheapLock must be held.
//
//go:systemstack
-func (s *pageAlloc) scavenge(nbytes uintptr, mayUnlock bool) uintptr {
+func (p *pageAlloc) scavenge(nbytes uintptr, mayUnlock bool) uintptr {
var (
addrs addrRange
gen uint32
released := uintptr(0)
for released < nbytes {
if addrs.size() == 0 {
- if addrs, gen = s.scavengeReserve(); addrs.size() == 0 {
+ if addrs, gen = p.scavengeReserve(); addrs.size() == 0 {
break
}
}
- r, a := s.scavengeOne(addrs, nbytes-released, mayUnlock)
+ r, a := p.scavengeOne(addrs, nbytes-released, mayUnlock)
released += r
addrs = a
}
// Only unreserve the space which hasn't been scavenged or searched
// to ensure we always make progress.
- s.scavengeUnreserve(addrs, gen)
+ p.scavengeUnreserve(addrs, gen)
return released
}
// scavengeStartGen starts a new scavenge generation, resetting
// the scavenger's search space to the full in-use address space.
//
-// s.mheapLock must be held.
+// p.mheapLock must be held.
//
-// Must run on the system stack because s.mheapLock must be held.
+// Must run on the system stack because p.mheapLock must be held.
//
//go:systemstack
-func (s *pageAlloc) scavengeStartGen() {
+func (p *pageAlloc) scavengeStartGen() {
if debug.scavtrace > 0 {
- printScavTrace(s.scav.gen, s.scav.released, false)
+ printScavTrace(p.scav.gen, p.scav.released, false)
}
- s.inUse.cloneInto(&s.scav.inUse)
+ p.inUse.cloneInto(&p.scav.inUse)
// Pick the new starting address for the scavenger cycle.
var startAddr offAddr
- if s.scav.scavLWM.lessThan(s.scav.freeHWM) {
+ if p.scav.scavLWM.lessThan(p.scav.freeHWM) {
// The "free" high watermark exceeds the "scavenged" low watermark,
// so there are free scavengable pages in parts of the address space
// that the scavenger already searched, the high watermark being the
// highest one. Pick that as our new starting point to ensure we
// see those pages.
- startAddr = s.scav.freeHWM
+ startAddr = p.scav.freeHWM
} else {
// The "free" high watermark does not exceed the "scavenged" low
// watermark. This means the allocator didn't free any memory in
// the range we scavenged last cycle, so we might as well continue
// scavenging from where we were.
- startAddr = s.scav.scavLWM
+ startAddr = p.scav.scavLWM
}
- s.scav.inUse.removeGreaterEqual(startAddr.addr())
+ p.scav.inUse.removeGreaterEqual(startAddr.addr())
- // reservationBytes may be zero if s.inUse.totalBytes is small, or if
+ // reservationBytes may be zero if p.inUse.totalBytes is small, or if
// scavengeReservationShards is large. This case is fine as the scavenger
// will simply be turned off, but it does mean that scavengeReservationShards,
// in concert with pallocChunkBytes, dictates the minimum heap size at which
// the scavenger triggers. In practice this minimum is generally less than an
// arena in size, so virtually every heap has the scavenger on.
- s.scav.reservationBytes = alignUp(s.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards
- s.scav.gen++
- s.scav.released = 0
- s.scav.freeHWM = minOffAddr
- s.scav.scavLWM = maxOffAddr
+ p.scav.reservationBytes = alignUp(p.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards
+ p.scav.gen++
+ p.scav.released = 0
+ p.scav.freeHWM = minOffAddr
+ p.scav.scavLWM = maxOffAddr
}
// scavengeReserve reserves a contiguous range of the address space
//
// Returns the reserved range and the scavenge generation number for it.
//
-// s.mheapLock must be held.
+// p.mheapLock must be held.
//
-// Must run on the system stack because s.mheapLock must be held.
+// Must run on the system stack because p.mheapLock must be held.
//
//go:systemstack
-func (s *pageAlloc) scavengeReserve() (addrRange, uint32) {
+func (p *pageAlloc) scavengeReserve() (addrRange, uint32) {
// Start by reserving the minimum.
- r := s.scav.inUse.removeLast(s.scav.reservationBytes)
+ r := p.scav.inUse.removeLast(p.scav.reservationBytes)
// Return early if the size is zero; we don't want to use
// the bogus address below.
if r.size() == 0 {
- return r, s.scav.gen
+ return r, p.scav.gen
}
// The scavenger requires that base be aligned to a
newBase := alignDown(r.base.addr(), pallocChunkBytes)
// Remove from inUse however much extra we just pulled out.
- s.scav.inUse.removeGreaterEqual(newBase)
+ p.scav.inUse.removeGreaterEqual(newBase)
r.base = offAddr{newBase}
- return r, s.scav.gen
+ return r, p.scav.gen
}
// scavengeUnreserve returns an unscavenged portion of a range that was
// previously reserved with scavengeReserve.
//
-// s.mheapLock must be held.
+// p.mheapLock must be held.
//
-// Must run on the system stack because s.mheapLock must be held.
+// Must run on the system stack because p.mheapLock must be held.
//
//go:systemstack
-func (s *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
- if r.size() == 0 || gen != s.scav.gen {
+func (p *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
+ if r.size() == 0 || gen != p.scav.gen {
return
}
if r.base.addr()%pallocChunkBytes != 0 {
throw("unreserving unaligned region")
}
- s.scav.inUse.add(r)
+ p.scav.inUse.add(r)
}
// scavengeOne walks over address range work until it finds
//
// work's base address must be aligned to pallocChunkBytes.
//
-// s.mheapLock must be held, but may be temporarily released if
+// p.mheapLock must be held, but may be temporarily released if
// mayUnlock == true.
//
-// Must run on the system stack because s.mheapLock must be held.
+// Must run on the system stack because p.mheapLock must be held.
//
//go:systemstack
-func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (uintptr, addrRange) {
+func (p *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (uintptr, addrRange) {
// Defensively check if we've recieved an empty address range.
// If so, just return.
if work.size() == 0 {
// Helpers for locking and unlocking only if mayUnlock == true.
lockHeap := func() {
if mayUnlock {
- lock(s.mheapLock)
+ lock(p.mheapLock)
}
}
unlockHeap := func() {
if mayUnlock {
- unlock(s.mheapLock)
+ unlock(p.mheapLock)
}
}
// by subtracting 1.
maxAddr := work.limit.addr() - 1
maxChunk := chunkIndex(maxAddr)
- if s.summary[len(s.summary)-1][maxChunk].max() >= uint(minPages) {
+ if p.summary[len(p.summary)-1][maxChunk].max() >= uint(minPages) {
// We only bother looking for a candidate if there at least
// minPages free pages at all.
- base, npages := s.chunkOf(maxChunk).findScavengeCandidate(chunkPageIndex(maxAddr), minPages, maxPages)
+ base, npages := p.chunkOf(maxChunk).findScavengeCandidate(chunkPageIndex(maxAddr), minPages, maxPages)
// If we found something, scavenge it and return!
if npages != 0 {
- work.limit = offAddr{s.scavengeRangeLocked(maxChunk, base, npages)}
+ work.limit = offAddr{p.scavengeRangeLocked(maxChunk, base, npages)}
return uintptr(npages) * pageSize, work
}
}
// that's fine. We're being optimistic anyway.
// Check quickly if there are enough free pages at all.
- if s.summary[len(s.summary)-1][i].max() < uint(minPages) {
+ if p.summary[len(p.summary)-1][i].max() < uint(minPages) {
continue
}
// avoid races with heap growth. It may or may not be possible to also
// see a nil pointer in this case if we do race with heap growth, but
// just defensively ignore the nils. This operation is optimistic anyway.
- l2 := (*[1 << pallocChunksL2Bits]pallocData)(atomic.Loadp(unsafe.Pointer(&s.chunks[i.l1()])))
+ l2 := (*[1 << pallocChunksL2Bits]pallocData)(atomic.Loadp(unsafe.Pointer(&p.chunks[i.l1()])))
if l2 != nil && l2[i.l2()].hasScavengeCandidate(minPages) {
return i, true
}
}
// Find, verify, and scavenge if we can.
- chunk := s.chunkOf(candidateChunkIdx)
+ chunk := p.chunkOf(candidateChunkIdx)
base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
if npages > 0 {
- work.limit = offAddr{s.scavengeRangeLocked(candidateChunkIdx, base, npages)}
+ work.limit = offAddr{p.scavengeRangeLocked(candidateChunkIdx, base, npages)}
return uintptr(npages) * pageSize, work
}
//
// Returns the base address of the scavenged region.
//
-// s.mheapLock must be held.
-func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr {
- s.chunkOf(ci).scavenged.setRange(base, npages)
+// p.mheapLock must be held.
+func (p *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr {
+ p.chunkOf(ci).scavenged.setRange(base, npages)
// Compute the full address for the start of the range.
addr := chunkBase(ci) + uintptr(base)*pageSize
// Update the scavenge low watermark.
- if oAddr := (offAddr{addr}); oAddr.lessThan(s.scav.scavLWM) {
- s.scav.scavLWM = oAddr
+ if oAddr := (offAddr{addr}); oAddr.lessThan(p.scav.scavLWM) {
+ p.scav.scavLWM = oAddr
}
// Only perform the actual scavenging if we're not in a test.
// It's dangerous to do so otherwise.
- if s.test {
+ if p.test {
return addr
}
sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
test bool
}
-func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
+func (p *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
if levelLogPages[0] > logMaxPackedValue {
// We can't represent 1<<levelLogPages[0] pages, the maximum number
// of pages we need to represent at the root level, in a summary, which
print("runtime: summary max pages = ", maxPackedValue, "\n")
throw("root level max pages doesn't fit in summary")
}
- s.sysStat = sysStat
+ p.sysStat = sysStat
- // Initialize s.inUse.
- s.inUse.init(sysStat)
+ // Initialize p.inUse.
+ p.inUse.init(sysStat)
// System-dependent initialization.
- s.sysInit()
+ p.sysInit()
// Start with the searchAddr in a state indicating there's no free memory.
- s.searchAddr = maxSearchAddr
+ p.searchAddr = maxSearchAddr
// Set the mheapLock.
- s.mheapLock = mheapLock
+ p.mheapLock = mheapLock
// Initialize scavenge tracking state.
- s.scav.scavLWM = maxSearchAddr
+ p.scav.scavLWM = maxSearchAddr
}
// tryChunkOf returns the bitmap data for the given chunk.
//
// Returns nil if the chunk data has not been mapped.
-func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
- l2 := s.chunks[ci.l1()]
+func (p *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
+ l2 := p.chunks[ci.l1()]
if l2 == nil {
return nil
}
// chunkOf returns the chunk at the given chunk index.
//
// The chunk index must be valid or this method may throw.
-func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
- return &s.chunks[ci.l1()][ci.l2()]
+func (p *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
+ return &p.chunks[ci.l1()][ci.l2()]
}
// grow sets up the metadata for the address range [base, base+size).
-// It may allocate metadata, in which case *s.sysStat will be updated.
+// It may allocate metadata, in which case *p.sysStat will be updated.
//
-// s.mheapLock must be held.
-func (s *pageAlloc) grow(base, size uintptr) {
+// p.mheapLock must be held.
+func (p *pageAlloc) grow(base, size uintptr) {
// Round up to chunks, since we can't deal with increments smaller
// than chunks. Also, sysGrow expects aligned values.
limit := alignUp(base+size, pallocChunkBytes)
// Grow the summary levels in a system-dependent manner.
// We just update a bunch of additional metadata here.
- s.sysGrow(base, limit)
+ p.sysGrow(base, limit)
- // Update s.start and s.end.
+ // Update p.start and p.end.
// If no growth happened yet, start == 0. This is generally
// safe since the zero page is unmapped.
- firstGrowth := s.start == 0
+ firstGrowth := p.start == 0
start, end := chunkIndex(base), chunkIndex(limit)
- if firstGrowth || start < s.start {
- s.start = start
+ if firstGrowth || start < p.start {
+ p.start = start
}
- if end > s.end {
- s.end = end
+ if end > p.end {
+ p.end = end
}
// Note that [base, limit) will never overlap with any existing
// range inUse because grow only ever adds never-used memory
// regions to the page allocator.
- s.inUse.add(makeAddrRange(base, limit))
+ p.inUse.add(makeAddrRange(base, limit))
// A grow operation is a lot like a free operation, so if our
- // chunk ends up below s.searchAddr, update s.searchAddr to the
+ // chunk ends up below p.searchAddr, update p.searchAddr to the
// new address, just like in free.
- if b := (offAddr{base}); b.lessThan(s.searchAddr) {
- s.searchAddr = b
+ if b := (offAddr{base}); b.lessThan(p.searchAddr) {
+ p.searchAddr = b
}
// Add entries into chunks, which is sparse, if needed. Then,
// Newly-grown memory is always considered scavenged.
// Set all the bits in the scavenged bitmaps high.
for c := chunkIndex(base); c < chunkIndex(limit); c++ {
- if s.chunks[c.l1()] == nil {
+ if p.chunks[c.l1()] == nil {
// Create the necessary l2 entry.
//
// Store it atomically to avoid races with readers which
// don't acquire the heap lock.
- r := sysAlloc(unsafe.Sizeof(*s.chunks[0]), s.sysStat)
- atomic.StorepNoWB(unsafe.Pointer(&s.chunks[c.l1()]), r)
+ r := sysAlloc(unsafe.Sizeof(*p.chunks[0]), p.sysStat)
+ atomic.StorepNoWB(unsafe.Pointer(&p.chunks[c.l1()]), r)
}
- s.chunkOf(c).scavenged.setRange(0, pallocChunkPages)
+ p.chunkOf(c).scavenged.setRange(0, pallocChunkPages)
}
// Update summaries accordingly. The grow acts like a free, so
// we need to ensure this newly-free memory is visible in the
// summaries.
- s.update(base, size/pageSize, true, false)
+ p.update(base, size/pageSize, true, false)
}
// update updates heap metadata. It must be called each time the bitmap
// a contiguous allocation or free between addr and addr+npages. alloc indicates
// whether the operation performed was an allocation or a free.
//
-// s.mheapLock must be held.
-func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
+// p.mheapLock must be held.
+func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
// base, limit, start, and end are inclusive.
limit := base + npages*pageSize - 1
sc, ec := chunkIndex(base), chunkIndex(limit)
if sc == ec {
// Fast path: the allocation doesn't span more than one chunk,
// so update this one and if the summary didn't change, return.
- x := s.summary[len(s.summary)-1][sc]
- y := s.chunkOf(sc).summarize()
+ x := p.summary[len(p.summary)-1][sc]
+ y := p.chunkOf(sc).summarize()
if x == y {
return
}
- s.summary[len(s.summary)-1][sc] = y
+ p.summary[len(p.summary)-1][sc] = y
} else if contig {
// Slow contiguous path: the allocation spans more than one chunk
// and at least one summary is guaranteed to change.
- summary := s.summary[len(s.summary)-1]
+ summary := p.summary[len(p.summary)-1]
// Update the summary for chunk sc.
- summary[sc] = s.chunkOf(sc).summarize()
+ summary[sc] = p.chunkOf(sc).summarize()
// Update the summaries for chunks in between, which are
// either totally allocated or freed.
- whole := s.summary[len(s.summary)-1][sc+1 : ec]
+ whole := p.summary[len(p.summary)-1][sc+1 : ec]
if alloc {
// Should optimize into a memclr.
for i := range whole {
}
// Update the summary for chunk ec.
- summary[ec] = s.chunkOf(ec).summarize()
+ summary[ec] = p.chunkOf(ec).summarize()
} else {
// Slow general path: the allocation spans more than one chunk
// and at least one summary is guaranteed to change.
//
// We can't assume a contiguous allocation happened, so walk over
// every chunk in the range and manually recompute the summary.
- summary := s.summary[len(s.summary)-1]
+ summary := p.summary[len(p.summary)-1]
for c := sc; c <= ec; c++ {
- summary[c] = s.chunkOf(c).summarize()
+ summary[c] = p.chunkOf(c).summarize()
}
}
// Walk up the radix tree and update the summaries appropriately.
changed := true
- for l := len(s.summary) - 2; l >= 0 && changed; l-- {
+ for l := len(p.summary) - 2; l >= 0 && changed; l-- {
// Update summaries at level l from summaries at level l+1.
changed = false
// Iterate over each block, updating the corresponding summary in the less-granular level.
for i := lo; i < hi; i++ {
- children := s.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock]
+ children := p.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock]
sum := mergeSummaries(children, logMaxPages)
- old := s.summary[l][i]
+ old := p.summary[l][i]
if old != sum {
changed = true
- s.summary[l][i] = sum
+ p.summary[l][i] = sum
}
}
}
// Returns the amount of scavenged memory in bytes present in the
// allocated range.
//
-// s.mheapLock must be held.
-func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
+// p.mheapLock must be held.
+func (p *pageAlloc) allocRange(base, npages uintptr) uintptr {
limit := base + npages*pageSize - 1
sc, ec := chunkIndex(base), chunkIndex(limit)
si, ei := chunkPageIndex(base), chunkPageIndex(limit)
scav := uint(0)
if sc == ec {
// The range doesn't cross any chunk boundaries.
- chunk := s.chunkOf(sc)
+ chunk := p.chunkOf(sc)
scav += chunk.scavenged.popcntRange(si, ei+1-si)
chunk.allocRange(si, ei+1-si)
} else {
// The range crosses at least one chunk boundary.
- chunk := s.chunkOf(sc)
+ chunk := p.chunkOf(sc)
scav += chunk.scavenged.popcntRange(si, pallocChunkPages-si)
chunk.allocRange(si, pallocChunkPages-si)
for c := sc + 1; c < ec; c++ {
- chunk := s.chunkOf(c)
+ chunk := p.chunkOf(c)
scav += chunk.scavenged.popcntRange(0, pallocChunkPages)
chunk.allocAll()
}
- chunk = s.chunkOf(ec)
+ chunk = p.chunkOf(ec)
scav += chunk.scavenged.popcntRange(0, ei+1)
chunk.allocRange(0, ei+1)
}
- s.update(base, npages, true, true)
+ p.update(base, npages, true, true)
return uintptr(scav) * pageSize
}
// returned. If addr is higher than any mapped region, then
// it returns maxOffAddr.
//
-// s.mheapLock must be held.
-func (s *pageAlloc) findMappedAddr(addr offAddr) offAddr {
+// p.mheapLock must be held.
+func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr {
// If we're not in a test, validate first by checking mheap_.arenas.
// This is a fast path which is only safe to use outside of testing.
ai := arenaIndex(addr.addr())
- if s.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil {
- vAddr, ok := s.inUse.findAddrGreaterEqual(addr.addr())
+ if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil {
+ vAddr, ok := p.inUse.findAddrGreaterEqual(addr.addr())
if ok {
return offAddr{vAddr}
} else {
// find searches for the first (address-ordered) contiguous free region of
// npages in size and returns a base address for that region.
//
-// It uses s.searchAddr to prune its search and assumes that no palloc chunks
-// below chunkIndex(s.searchAddr) contain any free memory at all.
+// It uses p.searchAddr to prune its search and assumes that no palloc chunks
+// below chunkIndex(p.searchAddr) contain any free memory at all.
//
-// find also computes and returns a candidate s.searchAddr, which may or
-// may not prune more of the address space than s.searchAddr already does.
-// This candidate is always a valid s.searchAddr.
+// find also computes and returns a candidate p.searchAddr, which may or
+// may not prune more of the address space than p.searchAddr already does.
+// This candidate is always a valid p.searchAddr.
//
// find represents the slow path and the full radix tree search.
//
// Returns a base address of 0 on failure, in which case the candidate
// searchAddr returned is invalid and must be ignored.
//
-// s.mheapLock must be held.
-func (s *pageAlloc) find(npages uintptr) (uintptr, offAddr) {
+// p.mheapLock must be held.
+func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr) {
// Search algorithm.
//
// This algorithm walks each level l of the radix tree from the root level
lastSumIdx := -1
nextLevel:
- for l := 0; l < len(s.summary); l++ {
+ for l := 0; l < len(p.summary); l++ {
// For the root level, entriesPerBlock is the whole level.
entriesPerBlock := 1 << levelBits[l]
logMaxPages := levelLogPages[l]
i <<= levelBits[l]
// Slice out the block of entries we care about.
- entries := s.summary[l][i : i+entriesPerBlock]
+ entries := p.summary[l][i : i+entriesPerBlock]
// Determine j0, the first index we should start iterating from.
// The searchAddr may help us eliminate iterations if we followed the
// searchAddr on the previous level or we're on the root leve, in which
// case the searchAddr should be the same as i after levelShift.
j0 := 0
- if searchIdx := offAddrToLevelIndex(l, s.searchAddr); searchIdx&^(entriesPerBlock-1) == i {
+ if searchIdx := offAddrToLevelIndex(l, p.searchAddr); searchIdx&^(entriesPerBlock-1) == i {
j0 = searchIdx & (entriesPerBlock - 1)
}
// We found a sufficiently large run of free pages straddling
// some boundary, so compute the address and return it.
addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
- return addr, s.findMappedAddr(firstFree.base)
+ return addr, p.findMappedAddr(firstFree.base)
}
if l == 0 {
// We're at level zero, so that means we've exhausted our search.
// lied to us. In either case, dump some useful state and throw.
print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n")
print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n")
- print("runtime: s.searchAddr = ", hex(s.searchAddr.addr()), ", i = ", i, "\n")
+ print("runtime: p.searchAddr = ", hex(p.searchAddr.addr()), ", i = ", i, "\n")
print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n")
for j := 0; j < len(entries); j++ {
sum := entries[j]
// After iterating over all levels, i must contain a chunk index which
// is what the final level represents.
ci := chunkIdx(i)
- j, searchIdx := s.chunkOf(ci).find(npages, 0)
+ j, searchIdx := p.chunkOf(ci).find(npages, 0)
if j == ^uint(0) {
// We couldn't find any space in this chunk despite the summaries telling
// us it should be there. There's likely a bug, so dump some state and throw.
- sum := s.summary[len(s.summary)-1][i]
- print("runtime: summary[", len(s.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
+ sum := p.summary[len(p.summary)-1][i]
+ print("runtime: summary[", len(p.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
print("runtime: npages = ", npages, "\n")
throw("bad summary data")
}
// found an even narrower free window.
searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr)
- return addr, s.findMappedAddr(firstFree.base)
+ return addr, p.findMappedAddr(firstFree.base)
}
// alloc allocates npages worth of memory from the page heap, returning the base
// Returns a 0 base address on failure, in which case other returned values
// should be ignored.
//
-// s.mheapLock must be held.
-func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
+// p.mheapLock must be held.
+func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
- if chunkIndex(s.searchAddr.addr()) >= s.end {
+ if chunkIndex(p.searchAddr.addr()) >= p.end {
return 0, 0
}
// If npages has a chance of fitting in the chunk where the searchAddr is,
// search it directly.
searchAddr := minOffAddr
- if pallocChunkPages-chunkPageIndex(s.searchAddr.addr()) >= uint(npages) {
+ if pallocChunkPages-chunkPageIndex(p.searchAddr.addr()) >= uint(npages) {
// npages is guaranteed to be no greater than pallocChunkPages here.
- i := chunkIndex(s.searchAddr.addr())
- if max := s.summary[len(s.summary)-1][i].max(); max >= uint(npages) {
- j, searchIdx := s.chunkOf(i).find(npages, chunkPageIndex(s.searchAddr.addr()))
+ i := chunkIndex(p.searchAddr.addr())
+ if max := p.summary[len(p.summary)-1][i].max(); max >= uint(npages) {
+ j, searchIdx := p.chunkOf(i).find(npages, chunkPageIndex(p.searchAddr.addr()))
if j == ^uint(0) {
print("runtime: max = ", max, ", npages = ", npages, "\n")
- print("runtime: searchIdx = ", chunkPageIndex(s.searchAddr.addr()), ", s.searchAddr = ", hex(s.searchAddr.addr()), "\n")
+ print("runtime: searchIdx = ", chunkPageIndex(p.searchAddr.addr()), ", p.searchAddr = ", hex(p.searchAddr.addr()), "\n")
throw("bad summary data")
}
addr = chunkBase(i) + uintptr(j)*pageSize
}
// We failed to use a searchAddr for one reason or another, so try
// the slow path.
- addr, searchAddr = s.find(npages)
+ addr, searchAddr = p.find(npages)
if addr == 0 {
if npages == 1 {
// We failed to find a single free page, the smallest unit
// exhausted. Otherwise, the heap still might have free
// space in it, just not enough contiguous space to
// accommodate npages.
- s.searchAddr = maxSearchAddr
+ p.searchAddr = maxSearchAddr
}
return 0, 0
}
Found:
// Go ahead and actually mark the bits now that we have an address.
- scav = s.allocRange(addr, npages)
+ scav = p.allocRange(addr, npages)
// If we found a higher searchAddr, we know that all the
// heap memory before that searchAddr in an offset address space is
- // allocated, so bump s.searchAddr up to the new one.
- if s.searchAddr.lessThan(searchAddr) {
- s.searchAddr = searchAddr
+ // allocated, so bump p.searchAddr up to the new one.
+ if p.searchAddr.lessThan(searchAddr) {
+ p.searchAddr = searchAddr
}
return addr, scav
}
// free returns npages worth of memory starting at base back to the page heap.
//
-// s.mheapLock must be held.
-func (s *pageAlloc) free(base, npages uintptr) {
- // If we're freeing pages below the s.searchAddr, update searchAddr.
- if b := (offAddr{base}); b.lessThan(s.searchAddr) {
- s.searchAddr = b
+// p.mheapLock must be held.
+func (p *pageAlloc) free(base, npages uintptr) {
+ // If we're freeing pages below the p.searchAddr, update searchAddr.
+ if b := (offAddr{base}); b.lessThan(p.searchAddr) {
+ p.searchAddr = b
}
// Update the free high watermark for the scavenger.
limit := base + npages*pageSize - 1
- if offLimit := (offAddr{limit}); s.scav.freeHWM.lessThan(offLimit) {
- s.scav.freeHWM = offLimit
+ if offLimit := (offAddr{limit}); p.scav.freeHWM.lessThan(offLimit) {
+ p.scav.freeHWM = offLimit
}
if npages == 1 {
// Fast path: we're clearing a single bit, and we know exactly
// where it is, so mark it directly.
i := chunkIndex(base)
- s.chunkOf(i).free1(chunkPageIndex(base))
+ p.chunkOf(i).free1(chunkPageIndex(base))
} else {
// Slow path: we're clearing more bits so we may need to iterate.
sc, ec := chunkIndex(base), chunkIndex(limit)
if sc == ec {
// The range doesn't cross any chunk boundaries.
- s.chunkOf(sc).free(si, ei+1-si)
+ p.chunkOf(sc).free(si, ei+1-si)
} else {
// The range crosses at least one chunk boundary.
- s.chunkOf(sc).free(si, pallocChunkPages-si)
+ p.chunkOf(sc).free(si, pallocChunkPages-si)
for c := sc + 1; c < ec; c++ {
- s.chunkOf(c).freeAll()
+ p.chunkOf(c).freeAll()
}
- s.chunkOf(ec).free(0, ei+1)
+ p.chunkOf(ec).free(0, ei+1)
}
}
- s.update(base, npages, true, false)
+ p.update(base, npages, true, false)
}
const (