if freeIndex == s.nelems {
// The span is full.
- if uintptr(s.ref) != s.nelems {
- throw("s.ref != s.nelems && freeIndex == s.nelems")
+ if uintptr(s.allocCount) != s.nelems {
+ throw("s.allocCount != s.nelems && freeIndex == s.nelems")
}
systemstack(func() {
c.refill(int32(sizeclass))
v = gclinkptr(freeIndex*s.elemsize + s.base())
// Advance the freeIndex.
s.freeindex = freeIndex + 1
- s.ref++
- if uintptr(s.ref) > s.nelems {
- throw("s.ref > s.nelems")
+ s.allocCount++
+ if uintptr(s.allocCount) > s.nelems {
+ throw("s.allocCount > s.nelems")
}
return
}
// Return the current cached span to the central lists.
s := c.alloc[sizeclass]
- if uintptr(s.ref) != s.nelems {
+ if uintptr(s.allocCount) != s.nelems {
throw("refill of span with free space remaining")
}
throw("out of memory")
}
- if uintptr(s.ref) == s.nelems {
+ if uintptr(s.allocCount) == s.nelems {
throw("span has no free space")
}
// c is unlocked.
havespan:
cap := int32((s.npages << _PageShift) / s.elemsize)
- n := cap - int32(s.ref)
+ n := cap - int32(s.allocCount)
if n == 0 {
- throw("empty span")
+ throw("span has no free objects")
}
- usedBytes := uintptr(s.ref) * s.elemsize
+ usedBytes := uintptr(s.allocCount) * s.elemsize
if usedBytes > 0 {
reimburseSweepCredit(usedBytes)
}
s.incache = false
- if s.ref == 0 {
- throw("uncaching full span")
+ if s.allocCount == 0 {
+ throw("uncaching span but s.allocCount == 0")
}
cap := int32((s.npages << _PageShift) / s.elemsize)
- n := cap - int32(s.ref)
+ n := cap - int32(s.allocCount)
if n > 0 {
c.empty.remove(s)
c.nonempty.insert(s)
throw("freeSpan given cached span")
}
- s.ref -= uint16(n)
+ s.allocCount -= uint16(n)
if preserve {
// preserve is set only when called from MCentral_CacheSpan above,
// lock of c above.)
atomic.Store(&s.sweepgen, mheap_.sweepgen)
- if s.ref != 0 {
+ if s.allocCount != 0 {
unlock(&c.lock)
return false
}
sweepgen uint32
divMul uint32 // for divide by elemsize - divMagic.mul
- ref uint16 // capacity - number of objects in freelist
+ allocCount uint16 // capacity - number of objects in freelist
sizeclass uint8 // size class
incache bool // being used by an mcache
state uint8 // mspaninuse etc
// able to map interior pointer to containing span.
atomic.Store(&s.sweepgen, h.sweepgen)
s.state = _MSpanInUse
- s.ref = 0
+ s.allocCount = 0
s.sizeclass = uint8(sizeclass)
if sizeclass == 0 {
s.elemsize = s.npages << _PageShift
if s != nil {
s.state = _MSpanStack
s.stackfreelist = 0
- s.ref = 0
+ s.allocCount = 0
memstats.stacks_inuse += uint64(s.npages << _PageShift)
}
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
switch s.state {
case _MSpanStack:
- if s.ref != 0 {
+ if s.allocCount != 0 {
throw("MHeap_FreeSpanLocked - invalid stack free")
}
case _MSpanInUse:
- if s.ref != 0 || s.sweepgen != h.sweepgen {
- print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
+ if s.allocCount != 0 || s.sweepgen != h.sweepgen {
+ print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
throw("MHeap_FreeSpanLocked - invalid free")
}
h.pagesInUse -= uint64(s.npages)
span.list = nil
span.start = start
span.npages = npages
- span.ref = 0
+ span.allocCount = 0
span.sizeclass = 0
span.incache = false
span.elemsize = 0
memstats.nmalloc++
memstats.alloc += uint64(s.elemsize)
} else {
- memstats.nmalloc += uint64(s.ref)
- memstats.by_size[s.sizeclass].nmalloc += uint64(s.ref)
- memstats.alloc += uint64(s.ref) * uint64(s.elemsize)
+ memstats.nmalloc += uint64(s.allocCount)
+ memstats.by_size[s.sizeclass].nmalloc += uint64(s.allocCount)
+ memstats.alloc += uint64(s.allocCount) * uint64(s.elemsize)
}
}
unlock(&mheap_.lock)
if s == nil {
throw("out of memory")
}
- if s.ref != 0 {
- throw("bad ref")
+ if s.allocCount != 0 {
+ throw("bad allocCount")
}
if s.stackfreelist.ptr() != nil {
throw("bad stackfreelist")
throw("span has no free stacks")
}
s.stackfreelist = x.ptr().next
- s.ref++
+ s.allocCount++
if s.stackfreelist.ptr() == nil {
// all stacks in s are allocated.
list.remove(s)
}
x.ptr().next = s.stackfreelist
s.stackfreelist = x
- s.ref--
- if gcphase == _GCoff && s.ref == 0 {
+ s.allocCount--
+ if gcphase == _GCoff && s.allocCount == 0 {
// Span is completely free. Return it to the heap
// immediately if we're sweeping.
//
list := &stackpool[order]
for s := list.first; s != nil; {
next := s.next
- if s.ref == 0 {
+ if s.allocCount == 0 {
list.remove(s)
s.stackfreelist = 0
mheap_.freeStack(s)