From 66e849b168eef36a4159a4b038fe89eecd2f22e3 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Tue, 4 Oct 2016 15:56:19 -0400 Subject: [PATCH] runtime: eliminate mheap.nspan and use range loops This was necessary in the C days when allspans was an mspan**, but now that allspans is a Go slice, this is redundant with len(allspans) and we can use range loops over allspans. Change-Id: Ie1dc39611e574e29a896e01690582933f4c5be7e Reviewed-on: https://go-review.googlesource.com/30531 Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot Reviewed-by: Rick Hudson --- src/runtime/heapdump.go | 14 ++++---------- src/runtime/mheap.go | 4 ---- src/runtime/mstats.go | 3 +-- 3 files changed, 5 insertions(+), 16 deletions(-) diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 3ad83532cf..8cdccb877a 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -437,9 +437,7 @@ func dumproots() { dumpfields(firstmoduledata.gcbssmask) // MSpan.types - allspans := mheap_.allspans - for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ { - s := allspans[spanidx] + for _, s := range mheap_.allspans { if s.state == _MSpanInUse { // Finalizers for sp := s.specials; sp != nil; sp = sp.next { @@ -462,8 +460,7 @@ func dumproots() { var freemark [_PageSize / 8]bool func dumpobjs() { - for i := uintptr(0); i < uintptr(mheap_.nspan); i++ { - s := mheap_.allspans[i] + for _, s := range mheap_.allspans { if s.state != _MSpanInUse { continue } @@ -608,9 +605,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, func dumpmemprof() { iterate_memprof(dumpmemprof_callback) - allspans := mheap_.allspans - for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ { - s := allspans[spanidx] + for _, s := range mheap_.allspans { if s.state != _MSpanInUse { continue } @@ -631,8 +626,7 @@ var dumphdr = []byte("go1.7 heap dump\n") func mdump() { // make sure we're done sweeping - for i := uintptr(0); i < uintptr(mheap_.nspan); i++ { - s := mheap_.allspans[i] + for _, s := range mheap_.allspans { if s.state == _MSpanInUse { s.ensureSwept() } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 9d02343dbe..f6ad4a170e 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -48,14 +48,11 @@ type mheap struct { // must ensure that allocation cannot happen around the // access (since that may free the backing store). allspans []*mspan // all spans out there - nspan uint32 // span lookup spans **mspan spans_mapped uintptr - _ uint32 // align uint64 fields on 32-bit for atomics - // Proportional sweep pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically @@ -282,7 +279,6 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { } } h.allspans = append(h.allspans, s) - h.nspan = uint32(len(h.allspans)) } // inheap reports whether b is a pointer into a (potentially dead) heap object. diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go index 38ae45bd1d..f921f02f5a 100644 --- a/src/runtime/mstats.go +++ b/src/runtime/mstats.go @@ -528,8 +528,7 @@ func updatememstats(stats *gcstats) { // Scan all spans and count number of alive objects. lock(&mheap_.lock) - for i := uint32(0); i < mheap_.nspan; i++ { - s := mheap_.allspans[i] + for _, s := range mheap_.allspans { if s.state != mSpanInUse { continue } -- 2.48.1