}
} else {
shouldhelpgc = true
- systemstack(func() {
- span = largeAlloc(size, needzero, noscan)
- })
+ span = c.largeAlloc(size, needzero, noscan)
span.freeindex = 1
span.allocCount = 1
x = unsafe.Pointer(span.base())
return x
}
-func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
- // print("largeAlloc size=", size, "\n")
-
- if size+_PageSize < size {
- throw("out of memory")
- }
- npages := size >> _PageShift
- if size&_PageMask != 0 {
- npages++
- }
-
- // Deduct credit for this span allocation and sweep if
- // necessary. mHeap_Alloc will also sweep npages, so this only
- // pays the debt down to npage pages.
- deductSweepCredit(npages*_PageSize, npages)
-
- spc := makeSpanClass(0, noscan)
- s := mheap_.alloc(npages, spc, needzero)
- if s == nil {
- throw("out of memory")
- }
- // Put the large span in the mcentral swept list so that it's
- // visible to the background sweeper.
- mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
- s.limit = s.base() + size
- heapBitsForAddr(s.base()).initSpan(s)
- return s
-}
-
// implementation of new builtin
// compiler (both frontend and SSA backend) knows the signature
// of this function
)
// Per-thread (in Go, per-P) cache for small objects.
+// This includes a small object cache and local allocation stats.
// No locking needed because it is per-thread (per-P).
//
// mcaches are allocated from non-GC'd memory, so any heap pointers
// When read with stats from other mcaches and with the world
// stopped, the result will accurately reflect the state of the
// application.
- local_largefree uintptr // bytes freed for large objects (>maxsmallsize)
- local_nlargefree uintptr // number of frees for large objects (>maxsmallsize)
- local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
+ local_largealloc uintptr // bytes allocated for large objects
+ local_nlargealloc uintptr // number of large object allocations
+ local_largefree uintptr // bytes freed for large objects (>maxsmallsize)
+ local_nlargefree uintptr // number of frees for large objects (>maxsmallsize)
+ local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
// flushGen indicates the sweepgen during which this mcache
// was last flushed. If flushGen != mheap_.sweepgen, the spans
// donate flushes data and resources which have no global
// pool to another mcache.
func (c *mcache) donate(d *mcache) {
+ d.local_largealloc += c.local_largealloc
+ c.local_largealloc = 0
+ d.local_nlargealloc += c.local_nlargealloc
+ c.local_nlargealloc = 0
d.local_largefree += c.local_largefree
c.local_largefree = 0
d.local_nlargefree += c.local_nlargefree
c.alloc[spc] = s
}
+// largeAlloc allocates a span for a large object.
+func (c *mcache) largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
+ if size+_PageSize < size {
+ throw("out of memory")
+ }
+ npages := size >> _PageShift
+ if size&_PageMask != 0 {
+ npages++
+ }
+
+ // Deduct credit for this span allocation and sweep if
+ // necessary. mHeap_Alloc will also sweep npages, so this only
+ // pays the debt down to npage pages.
+ deductSweepCredit(npages*_PageSize, npages)
+
+ spc := makeSpanClass(0, noscan)
+ s := mheap_.alloc(npages, spc, needzero)
+ if s == nil {
+ throw("out of memory")
+ }
+ c.local_largealloc += npages * pageSize
+ c.local_nlargealloc++
+
+ // Update heap_live and revise pacing if needed.
+ atomic.Xadd64(&memstats.heap_live, int64(npages*pageSize))
+ if trace.enabled {
+ // Trace that a heap alloc occurred because heap_live changed.
+ traceHeapAlloc()
+ }
+ if gcBlackenEnabled != 0 {
+ gcController.revise()
+ }
+
+ // Put the large span in the mcentral swept list so that it's
+ // visible to the background sweeper.
+ mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
+ s.limit = s.base() + size
+ heapBitsForAddr(s.base()).initSpan(s)
+ return s
+}
+
func (c *mcache) releaseAll() {
for i := range c.alloc {
s := c.alloc[i]
// This is accessed atomically.
reclaimCredit uintptr
- // Malloc stats.
- largealloc uint64 // bytes allocated for large objects
- nlargealloc uint64 // number of large object allocations
-
// arenas is the heap arena map. It points to the metadata for
// the heap for every arena frame of the entire usable virtual
// address space.
memstats.tinyallocs += uint64(c.local_tinyallocs)
c.local_tinyallocs = 0
- // Do some additional accounting if it's a large allocation.
- if spanclass.sizeclass() == 0 {
- mheap_.largealloc += uint64(npages * pageSize)
- mheap_.nlargealloc++
- atomic.Xadd64(&memstats.heap_live, int64(npages*pageSize))
- }
-
- // Either heap_live or heap_scan could have been updated.
+ // heap_scan was been updated.
if gcBlackenEnabled != 0 {
gcController.revise()
}
// Update related page sweeper stats.
atomic.Xadd64(&h.pagesInUse, int64(npages))
-
- if trace.enabled {
- // Trace that a heap alloc occurred.
- traceHeapAlloc()
- }
}
// Make sure the newly allocated span will be observed