dumpfields(firstmoduledata.gcbssmask)
// MSpan.types
- allspans := mheap_.allspans
- for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
- s := allspans[spanidx]
+ for _, s := range mheap_.allspans {
if s.state == _MSpanInUse {
// Finalizers
for sp := s.specials; sp != nil; sp = sp.next {
var freemark [_PageSize / 8]bool
func dumpobjs() {
- for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
- s := mheap_.allspans[i]
+ for _, s := range mheap_.allspans {
if s.state != _MSpanInUse {
continue
}
func dumpmemprof() {
iterate_memprof(dumpmemprof_callback)
- allspans := mheap_.allspans
- for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
- s := allspans[spanidx]
+ for _, s := range mheap_.allspans {
if s.state != _MSpanInUse {
continue
}
func mdump() {
// make sure we're done sweeping
- for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
- s := mheap_.allspans[i]
+ for _, s := range mheap_.allspans {
if s.state == _MSpanInUse {
s.ensureSwept()
}
// must ensure that allocation cannot happen around the
// access (since that may free the backing store).
allspans []*mspan // all spans out there
- nspan uint32
// span lookup
spans **mspan
spans_mapped uintptr
- _ uint32 // align uint64 fields on 32-bit for atomics
-
// Proportional sweep
pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock
spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically
}
}
h.allspans = append(h.allspans, s)
- h.nspan = uint32(len(h.allspans))
}
// inheap reports whether b is a pointer into a (potentially dead) heap object.
// Scan all spans and count number of alive objects.
lock(&mheap_.lock)
- for i := uint32(0); i < mheap_.nspan; i++ {
- s := mheap_.allspans[i]
+ for _, s := range mheap_.allspans {
if s.state != mSpanInUse {
continue
}