From c8638498008f9874dc5a48734418e0fbea08cee9 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Fri, 24 Jul 2020 19:58:31 +0000 Subject: [PATCH] runtime: rename mcache fields to match Go style This change renames a bunch of malloc statistics stored in the mcache that are all named with the "local_" prefix. It also renames largeAlloc to allocLarge to prevent a naming conflict, and next_sample because it would be the last mcache field with the old C naming style. Change-Id: I29695cb83b397a435ede7e9ad5c3c9be72767ea3 Reviewed-on: https://go-review.googlesource.com/c/go/+/246969 Trust: Michael Knyszek Run-TryBot: Michael Knyszek TryBot-Result: Go Bot Reviewed-by: Michael Pratt --- src/runtime/export_test.go | 14 +++--- src/runtime/malloc.go | 12 ++--- src/runtime/mcache.go | 78 ++++++++++++++++----------------- src/runtime/mgc.go | 8 ++-- src/runtime/mgcsweep.go | 6 +-- src/runtime/mstats.go | 22 +++++----- src/runtime/pprof/mprof_test.go | 2 +- 7 files changed, 71 insertions(+), 71 deletions(-) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index d71b180f76..47cbc286f6 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -346,18 +346,18 @@ func ReadMemStatsSlow() (base, slow MemStats) { continue } // Collect large allocation stats. - largeFree += uint64(c.local_largefree) - slow.Frees += uint64(c.local_nlargefree) + largeFree += uint64(c.largeFree) + slow.Frees += uint64(c.largeFreeCount) // Collect tiny allocation stats. - tinyAllocs += uint64(c.local_tinyallocs) + tinyAllocs += uint64(c.tinyAllocCount) // Collect per-sizeclass stats. for i := 0; i < _NumSizeClasses; i++ { - slow.Frees += uint64(c.local_nsmallfree[i]) - bySize[i].Frees += uint64(c.local_nsmallfree[i]) - bySize[i].Mallocs += uint64(c.local_nsmallfree[i]) - smallFree += uint64(c.local_nsmallfree[i]) * uint64(class_to_size[i]) + slow.Frees += uint64(c.smallFreeCount[i]) + bySize[i].Frees += uint64(c.smallFreeCount[i]) + bySize[i].Mallocs += uint64(c.smallFreeCount[i]) + smallFree += uint64(c.smallFreeCount[i]) * uint64(class_to_size[i]) } } slow.Frees += tinyAllocs diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index ec601ccb39..0f48d7f68e 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -1040,7 +1040,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // The object fits into existing tiny block. x = unsafe.Pointer(c.tiny + off) c.tinyoffset = off + size - c.local_tinyallocs++ + c.tinyAllocCount++ mp.mallocing = 0 releasem(mp) return x @@ -1082,7 +1082,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } } else { shouldhelpgc = true - span = c.largeAlloc(size, needzero, noscan) + span = c.allocLarge(size, needzero, noscan) span.freeindex = 1 span.allocCount = 1 x = unsafe.Pointer(span.base()) @@ -1111,7 +1111,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } else { scanSize = typ.ptrdata } - c.local_scan += scanSize + c.scanAlloc += scanSize } // Ensure that the stores above that initialize x to @@ -1153,8 +1153,8 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } if rate := MemProfileRate; rate > 0 { - if rate != 1 && size < c.next_sample { - c.next_sample -= size + if rate != 1 && size < c.nextSample { + c.nextSample -= size } else { mp := acquirem() profilealloc(mp, x, size) @@ -1221,7 +1221,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { throw("profilealloc called with no P") } } - c.next_sample = nextSample() + c.nextSample = nextSample() mProf_Malloc(x, size) } diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index b8e388cc4f..c3e0e5e1f7 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -20,8 +20,8 @@ import ( type mcache struct { // The following members are accessed on every malloc, // so they are grouped here for better caching. - next_sample uintptr // trigger heap sample after allocating this many bytes - local_scan uintptr // bytes of scannable heap allocated + nextSample uintptr // trigger heap sample after allocating this many bytes + scanAlloc uintptr // bytes of scannable heap allocated // Allocator cache for tiny objects w/o pointers. // See "Tiny allocator" comment in malloc.go. @@ -48,13 +48,13 @@ type mcache struct { // When read with stats from other mcaches and with the world // stopped, the result will accurately reflect the state of the // application. - local_tinyallocs uintptr // number of tiny allocs not counted in other stats - local_largealloc uintptr // bytes allocated for large objects - local_nlargealloc uintptr // number of large object allocations - local_nsmallalloc [_NumSizeClasses]uintptr // number of allocs for small objects - local_largefree uintptr // bytes freed for large objects (>maxsmallsize) - local_nlargefree uintptr // number of frees for large objects (>maxsmallsize) - local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize) + tinyAllocCount uintptr // number of tiny allocs not counted in other stats + largeAlloc uintptr // bytes allocated for large objects + largeAllocCount uintptr // number of large object allocations + smallAllocCount [_NumSizeClasses]uintptr // number of allocs for small objects + largeFree uintptr // bytes freed for large objects (>maxSmallSize) + largeFreeCount uintptr // number of frees for large objects (>maxSmallSize) + smallFreeCount [_NumSizeClasses]uintptr // number of frees for small objects (<=maxSmallSize) // flushGen indicates the sweepgen during which this mcache // was last flushed. If flushGen != mheap_.sweepgen, the spans @@ -103,7 +103,7 @@ func allocmcache() *mcache { for i := range c.alloc { c.alloc[i] = &emptymspan } - c.next_sample = nextSample() + c.nextSample = nextSample() return c } @@ -134,26 +134,26 @@ func freemcache(c *mcache, recipient *mcache) { // donate flushes data and resources which have no global // pool to another mcache. func (c *mcache) donate(d *mcache) { - // local_scan is handled separately because it's not + // scanAlloc is handled separately because it's not // like these stats -- it's used for GC pacing. - d.local_largealloc += c.local_largealloc - c.local_largealloc = 0 - d.local_nlargealloc += c.local_nlargealloc - c.local_nlargealloc = 0 - for i := range c.local_nsmallalloc { - d.local_nsmallalloc[i] += c.local_nsmallalloc[i] - c.local_nsmallalloc[i] = 0 + d.largeAlloc += c.largeAlloc + c.largeAlloc = 0 + d.largeAllocCount += c.largeAllocCount + c.largeAllocCount = 0 + for i := range c.smallAllocCount { + d.smallAllocCount[i] += c.smallAllocCount[i] + c.smallAllocCount[i] = 0 } - d.local_largefree += c.local_largefree - c.local_largefree = 0 - d.local_nlargefree += c.local_nlargefree - c.local_nlargefree = 0 - for i := range c.local_nsmallfree { - d.local_nsmallfree[i] += c.local_nsmallfree[i] - c.local_nsmallfree[i] = 0 + d.largeFree += c.largeFree + c.largeFree = 0 + d.largeFreeCount += c.largeFreeCount + c.largeFreeCount = 0 + for i := range c.smallFreeCount { + d.smallFreeCount[i] += c.smallFreeCount[i] + c.smallFreeCount[i] = 0 } - d.local_tinyallocs += c.local_tinyallocs - c.local_tinyallocs = 0 + d.tinyAllocCount += c.tinyAllocCount + c.tinyAllocCount = 0 } // refill acquires a new span of span class spc for c. This span will @@ -192,16 +192,16 @@ func (c *mcache) refill(spc spanClass) { // Assume all objects from this span will be allocated in the // mcache. If it gets uncached, we'll adjust this. - c.local_nsmallalloc[spc.sizeclass()] += uintptr(s.nelems) - uintptr(s.allocCount) + c.smallAllocCount[spc.sizeclass()] += uintptr(s.nelems) - uintptr(s.allocCount) // Update heap_live with the same assumption. usedBytes := uintptr(s.allocCount) * s.elemsize atomic.Xadd64(&memstats.heap_live, int64(s.npages*pageSize)-int64(usedBytes)) - // While we're here, flush local_scan, since we have to call + // While we're here, flush scanAlloc, since we have to call // revise anyway. - atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan)) - c.local_scan = 0 + atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc)) + c.scanAlloc = 0 if trace.enabled { // heap_live changed. @@ -215,8 +215,8 @@ func (c *mcache) refill(spc spanClass) { c.alloc[spc] = s } -// largeAlloc allocates a span for a large object. -func (c *mcache) largeAlloc(size uintptr, needzero bool, noscan bool) *mspan { +// allocLarge allocates a span for a large object. +func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan { if size+_PageSize < size { throw("out of memory") } @@ -235,8 +235,8 @@ func (c *mcache) largeAlloc(size uintptr, needzero bool, noscan bool) *mspan { if s == nil { throw("out of memory") } - c.local_largealloc += npages * pageSize - c.local_nlargealloc++ + c.largeAlloc += npages * pageSize + c.largeAllocCount++ // Update heap_live and revise pacing if needed. atomic.Xadd64(&memstats.heap_live, int64(npages*pageSize)) @@ -257,9 +257,9 @@ func (c *mcache) largeAlloc(size uintptr, needzero bool, noscan bool) *mspan { } func (c *mcache) releaseAll() { - // Take this opportunity to flush local_scan. - atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan)) - c.local_scan = 0 + // Take this opportunity to flush scanAlloc. + atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc)) + c.scanAlloc = 0 sg := mheap_.sweepgen for i := range c.alloc { @@ -267,7 +267,7 @@ func (c *mcache) releaseAll() { if s != &emptymspan { // Adjust nsmallalloc in case the span wasn't fully allocated. n := uintptr(s.nelems) - uintptr(s.allocCount) - c.local_nsmallalloc[spanClass(i).sizeclass()] -= n + c.smallAllocCount[spanClass(i).sizeclass()] -= n if s.sweepgen != sg+1 { // refill conservatively counted unallocated slots in heap_live. // Undo this. diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 55554c117c..540c376f1c 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -2086,16 +2086,16 @@ func gcMark(start_time int64) { // Update the marked heap stat. memstats.heap_marked = work.bytesMarked - // Flush local_scan from each mcache since we're about to modify - // heap_scan directly. If we were to flush this later, then local_scan + // Flush scanAlloc from each mcache since we're about to modify + // heap_scan directly. If we were to flush this later, then scanAlloc // might have incorrect information. for _, p := range allp { c := p.mcache if c == nil { continue } - memstats.heap_scan += uint64(c.local_scan) - c.local_scan = 0 + memstats.heap_scan += uint64(c.scanAlloc) + c.scanAlloc = 0 } // Update other GC heap size stats. This must happen after diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index 6b8c56ce35..7103b08455 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -503,7 +503,7 @@ func (s *mspan) sweep(preserve bool) bool { // wasn't totally filled, but then swept, still has all of its // free slots zeroed. s.needzero = 1 - c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed) + c.smallFreeCount[spc.sizeclass()] += uintptr(nfreed) } if !preserve { // The caller may not have removed this span from whatever @@ -548,8 +548,8 @@ func (s *mspan) sweep(preserve bool) bool { } else { mheap_.freeSpan(s) } - c.local_nlargefree++ - c.local_largefree += size + c.largeFreeCount++ + c.largeFree += size return true } diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go index 5eeb173640..64687c24e5 100644 --- a/src/runtime/mstats.go +++ b/src/runtime/mstats.go @@ -565,25 +565,25 @@ func updatememstats() { continue } // Collect large allocation stats. - memstats.nmalloc += uint64(c.local_nlargealloc) - totalAlloc += uint64(c.local_largealloc) - totalFree += uint64(c.local_largefree) - memstats.nfree += uint64(c.local_nlargefree) + memstats.nmalloc += uint64(c.largeAllocCount) + totalAlloc += uint64(c.largeAlloc) + totalFree += uint64(c.largeFree) + memstats.nfree += uint64(c.largeFreeCount) // Collect tiny allocation stats. - memstats.tinyallocs += uint64(c.local_tinyallocs) + memstats.tinyallocs += uint64(c.tinyAllocCount) // Collect per-sizeclass stats. for i := 0; i < _NumSizeClasses; i++ { // Malloc stats. - memstats.nmalloc += uint64(c.local_nsmallalloc[i]) - memstats.by_size[i].nmalloc += uint64(c.local_nsmallalloc[i]) - totalAlloc += uint64(c.local_nsmallalloc[i]) * uint64(class_to_size[i]) + memstats.nmalloc += uint64(c.smallAllocCount[i]) + memstats.by_size[i].nmalloc += uint64(c.smallAllocCount[i]) + totalAlloc += uint64(c.smallAllocCount[i]) * uint64(class_to_size[i]) // Free stats. - memstats.nfree += uint64(c.local_nsmallfree[i]) - memstats.by_size[i].nfree += uint64(c.local_nsmallfree[i]) - smallFree += uint64(c.local_nsmallfree[i]) * uint64(class_to_size[i]) + memstats.nfree += uint64(c.smallFreeCount[i]) + memstats.by_size[i].nfree += uint64(c.smallFreeCount[i]) + smallFree += uint64(c.smallFreeCount[i]) * uint64(class_to_size[i]) } } diff --git a/src/runtime/pprof/mprof_test.go b/src/runtime/pprof/mprof_test.go index f253f07def..c11a45fd69 100644 --- a/src/runtime/pprof/mprof_test.go +++ b/src/runtime/pprof/mprof_test.go @@ -70,7 +70,7 @@ func TestMemoryProfiler(t *testing.T) { runtime.MemProfileRate = oldRate }() - // Allocate a meg to ensure that mcache.next_sample is updated to 1. + // Allocate a meg to ensure that mcache.nextSample is updated to 1. for i := 0; i < 1024; i++ { memSink = make([]byte, 1024) } -- 2.48.1