}
// Initialize a single central free list.
-func mCentral_Init(c *mcentral, sizeclass int32) {
+func (c *mcentral) init(sizeclass int32) {
c.sizeclass = sizeclass
- mSpanList_Init(&c.nonempty)
- mSpanList_Init(&c.empty)
+ c.nonempty.init()
+ c.empty.init()
}
// Allocate a span to use in an MCache.
-func mCentral_CacheSpan(c *mcentral) *mspan {
+func (c *mcentral) cacheSpan() *mspan {
// Deduct credit for this span allocation and sweep if necessary.
deductSweepCredit(uintptr(class_to_size[c.sizeclass]), 0)
var s *mspan
for s = c.nonempty.first; s != nil; s = s.next {
if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
- mSpanList_Remove(&c.nonempty, s)
- mSpanList_InsertBack(&c.empty, s)
+ c.nonempty.remove(s)
+ c.empty.insertBack(s)
unlock(&c.lock)
- mSpan_Sweep(s, true)
+ s.sweep(true)
goto havespan
}
if s.sweepgen == sg-1 {
continue
}
// we have a nonempty span that does not require sweeping, allocate from it
- mSpanList_Remove(&c.nonempty, s)
- mSpanList_InsertBack(&c.empty, s)
+ c.nonempty.remove(s)
+ c.empty.insertBack(s)
unlock(&c.lock)
goto havespan
}
if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
// we have an empty span that requires sweeping,
// sweep it and see if we can free some space in it
- mSpanList_Remove(&c.empty, s)
+ c.empty.remove(s)
// swept spans are at the end of the list
- mSpanList_InsertBack(&c.empty, s)
+ c.empty.insertBack(s)
unlock(&c.lock)
- mSpan_Sweep(s, true)
+ s.sweep(true)
if s.freelist.ptr() != nil {
goto havespan
}
unlock(&c.lock)
// Replenish central list if empty.
- s = mCentral_Grow(c)
+ s = c.grow()
if s == nil {
return nil
}
lock(&c.lock)
- mSpanList_InsertBack(&c.empty, s)
+ c.empty.insertBack(s)
unlock(&c.lock)
// At this point s is a non-empty span, queued at the end of the empty list,
}
// Return span from an MCache.
-func mCentral_UncacheSpan(c *mcentral, s *mspan) {
+func (c *mcentral) uncacheSpan(s *mspan) {
lock(&c.lock)
s.incache = false
cap := int32((s.npages << _PageShift) / s.elemsize)
n := cap - int32(s.ref)
if n > 0 {
- mSpanList_Remove(&c.empty, s)
- mSpanList_Insert(&c.nonempty, s)
+ c.empty.remove(s)
+ c.nonempty.insert(s)
}
unlock(&c.lock)
}
// the latest generation.
// If preserve=true, don't return the span to heap nor relink in MCentral lists;
// caller takes care of it.
-func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
+func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
if s.incache {
throw("freespan into cached span")
}
if preserve {
// preserve is set only when called from MCentral_CacheSpan above,
// the span must be in the empty list.
- if !mSpan_InList(s) {
+ if !s.inList() {
throw("can't preserve unlinked span")
}
atomic.Store(&s.sweepgen, mheap_.sweepgen)
// Move to nonempty if necessary.
if wasempty {
- mSpanList_Remove(&c.empty, s)
- mSpanList_Insert(&c.nonempty, s)
+ c.empty.remove(s)
+ c.nonempty.insert(s)
}
// delay updating sweepgen until here. This is the signal that
}
// s is completely freed, return it to the heap.
- mSpanList_Remove(&c.nonempty, s)
+ c.nonempty.remove(s)
s.needzero = 1
s.freelist = 0
unlock(&c.lock)
heapBitsForSpan(s.base()).initSpan(s.layout())
- mHeap_Free(&mheap_, s, 0)
+ mheap_.freeSpan(s, 0)
return true
}
// Fetch a new span from the heap and carve into objects for the free list.
-func mCentral_Grow(c *mcentral) *mspan {
+func (c *mcentral) grow() *mspan {
npages := uintptr(class_to_allocnpages[c.sizeclass])
size := uintptr(class_to_size[c.sizeclass])
n := (npages << _PageShift) / size
- s := mHeap_Alloc(&mheap_, npages, c.sizeclass, false, true)
+ s := mheap_.alloc(npages, c.sizeclass, false, true)
if s == nil {
return nil
}
unlock(&mheap_.lock)
}
- s := mHeap_LookupMaybe(&mheap_, unsafe.Pointer(v))
+ s := mheap_.lookupMaybe(unsafe.Pointer(v))
if sp != nil {
*sp = s
}
}
// Initialize the heap.
-func mHeap_Init(h *mheap, spans_size uintptr) {
- fixAlloc_Init(&h.spanalloc, unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
- fixAlloc_Init(&h.cachealloc, unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
- fixAlloc_Init(&h.specialfinalizeralloc, unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
- fixAlloc_Init(&h.specialprofilealloc, unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
+func (h *mheap) init(spans_size uintptr) {
+ h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
+ h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
+ h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
+ h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
// h->mapcache needs no init
for i := range h.free {
- mSpanList_Init(&h.free[i])
- mSpanList_Init(&h.busy[i])
+ h.free[i].init()
+ h.busy[i].init()
}
- mSpanList_Init(&h.freelarge)
- mSpanList_Init(&h.busylarge)
+ h.freelarge.init()
+ h.busylarge.init()
for i := range h.central {
- mCentral_Init(&h.central[i].mcentral, int32(i))
+ h.central[i].mcentral.init(int32(i))
}
sp := (*slice)(unsafe.Pointer(&h_spans))
// Waiting to update arena_used until after the memory has been mapped
// avoids faults when other threads try access the bitmap immediately
// after observing the change to arena_used.
-func mHeap_MapSpans(h *mheap, arena_used uintptr) {
+func (h *mheap) mapSpans(arena_used uintptr) {
// Map spans array, PageSize at a time.
n := arena_used
n -= h.arena_start
// Sweeps spans in list until reclaims at least npages into heap.
// Returns the actual number of pages reclaimed.
-func mHeap_ReclaimList(h *mheap, list *mSpanList, npages uintptr) uintptr {
+func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr {
n := uintptr(0)
sg := mheap_.sweepgen
retry:
for s := list.first; s != nil; s = s.next {
if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
- mSpanList_Remove(list, s)
+ list.remove(s)
// swept spans are at the end of the list
- mSpanList_InsertBack(list, s)
+ list.insertBack(s)
unlock(&h.lock)
snpages := s.npages
- if mSpan_Sweep(s, false) {
+ if s.sweep(false) {
n += snpages
}
lock(&h.lock)
// Sweeps and reclaims at least npage pages into heap.
// Called before allocating npage pages.
-func mHeap_Reclaim(h *mheap, npage uintptr) {
+func (h *mheap) reclaim(npage uintptr) {
// First try to sweep busy spans with large objects of size >= npage,
// this has good chances of reclaiming the necessary space.
for i := int(npage); i < len(h.busy); i++ {
- if mHeap_ReclaimList(h, &h.busy[i], npage) != 0 {
+ if h.reclaimList(&h.busy[i], npage) != 0 {
return // Bingo!
}
}
// Then -- even larger objects.
- if mHeap_ReclaimList(h, &h.busylarge, npage) != 0 {
+ if h.reclaimList(&h.busylarge, npage) != 0 {
return // Bingo!
}
// One such object is not enough, so we need to reclaim several of them.
reclaimed := uintptr(0)
for i := 0; i < int(npage) && i < len(h.busy); i++ {
- reclaimed += mHeap_ReclaimList(h, &h.busy[i], npage-reclaimed)
+ reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed)
if reclaimed >= npage {
return
}
// Allocate a new span of npage pages from the heap for GC'd memory
// and record its size class in the HeapMap and HeapMapCache.
-func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan {
+func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
_g_ := getg()
if _g_ != _g_.m.g0 {
throw("_mheap_alloc not on g0 stack")
// If GC kept a bit for whether there were any marks
// in a span, we could release these free spans
// at the end of GC and eliminate this entirely.
- mHeap_Reclaim(h, npage)
+ h.reclaim(npage)
}
// transfer stats from cache to global
memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
_g_.m.mcache.local_tinyallocs = 0
- s := mHeap_AllocSpanLocked(h, npage)
+ s := h.allocSpanLocked(npage)
if s != nil {
// Record span info, because gc needs to be
// able to map interior pointer to containing span.
memstats.heap_live += uint64(npage << _PageShift)
// Swept spans are at the end of lists.
if s.npages < uintptr(len(h.free)) {
- mSpanList_InsertBack(&h.busy[s.npages], s)
+ h.busy[s.npages].insertBack(s)
} else {
- mSpanList_InsertBack(&h.busylarge, s)
+ h.busylarge.insertBack(s)
}
}
}
return s
}
-func mHeap_Alloc(h *mheap, npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
+func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
// Don't do any operations that lock the heap on the G stack.
// It might trigger stack growth, and the stack growth code needs
// to be able to allocate heap.
var s *mspan
systemstack(func() {
- s = mHeap_Alloc_m(h, npage, sizeclass, large)
+ s = h.alloc_m(npage, sizeclass, large)
})
if s != nil {
return s
}
-func mHeap_AllocStack(h *mheap, npage uintptr) *mspan {
+func (h *mheap) allocStack(npage uintptr) *mspan {
_g_ := getg()
if _g_ != _g_.m.g0 {
throw("mheap_allocstack not on g0 stack")
}
lock(&h.lock)
- s := mHeap_AllocSpanLocked(h, npage)
+ s := h.allocSpanLocked(npage)
if s != nil {
s.state = _MSpanStack
s.freelist = 0
// Allocates a span of the given size. h must be locked.
// The returned span has been removed from the
// free list, but its state is still MSpanFree.
-func mHeap_AllocSpanLocked(h *mheap, npage uintptr) *mspan {
+func (h *mheap) allocSpanLocked(npage uintptr) *mspan {
var list *mSpanList
var s *mspan
// Try in fixed-size lists up to max.
for i := int(npage); i < len(h.free); i++ {
list = &h.free[i]
- if !mSpanList_IsEmpty(list) {
+ if !list.isEmpty() {
s = list.first
goto HaveSpan
}
// Best fit in list of large spans.
list = &h.freelarge
- s = mHeap_AllocLarge(h, npage)
+ s = h.allocLarge(npage)
if s == nil {
- if !mHeap_Grow(h, npage) {
+ if !h.grow(npage) {
return nil
}
- s = mHeap_AllocLarge(h, npage)
+ s = h.allocLarge(npage)
if s == nil {
return nil
}
if s.npages < npage {
throw("MHeap_AllocLocked - bad npages")
}
- mSpanList_Remove(list, s)
- if mSpan_InList(s) {
+ list.remove(s)
+ if s.inList() {
throw("still in list")
}
if s.npreleased > 0 {
if s.npages > npage {
// Trim extra and put it back in the heap.
- t := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
- mSpan_Init(t, s.start+pageID(npage), s.npages-npage)
+ t := (*mspan)(h.spanalloc.alloc())
+ t.init(s.start+pageID(npage), s.npages-npage)
s.npages = npage
p := uintptr(t.start)
p -= (h.arena_start >> _PageShift)
t.needzero = s.needzero
s.state = _MSpanStack // prevent coalescing with s
t.state = _MSpanStack
- mHeap_FreeSpanLocked(h, t, false, false, s.unusedsince)
+ h.freeSpanLocked(t, false, false, s.unusedsince)
s.state = _MSpanFree
}
s.unusedsince = 0
memstats.heap_idle -= uint64(npage << _PageShift)
//println("spanalloc", hex(s.start<<_PageShift))
- if mSpan_InList(s) {
+ if s.inList() {
throw("still in list")
}
return s
}
// Allocate a span of exactly npage pages from the list of large spans.
-func mHeap_AllocLarge(h *mheap, npage uintptr) *mspan {
+func (h *mheap) allocLarge(npage uintptr) *mspan {
return bestFit(&h.freelarge, npage, nil)
}
// returning whether it worked.
//
// h must be locked.
-func mHeap_Grow(h *mheap, npage uintptr) bool {
+func (h *mheap) grow(npage uintptr) bool {
// Ask for a big chunk, to reduce the number of mappings
// the operating system needs to track; also amortizes
// the overhead of an operating system mapping.
ask = _HeapAllocChunk
}
- v := mHeap_SysAlloc(h, ask)
+ v := h.sysAlloc(ask)
if v == nil {
if ask > npage<<_PageShift {
ask = npage << _PageShift
- v = mHeap_SysAlloc(h, ask)
+ v = h.sysAlloc(ask)
}
if v == nil {
print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
// Create a fake "in use" span and free it, so that the
// right coalescing happens.
- s := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
- mSpan_Init(s, pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
+ s := (*mspan)(h.spanalloc.alloc())
+ s.init(pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
p := uintptr(s.start)
p -= (h.arena_start >> _PageShift)
for i := p; i < p+s.npages; i++ {
atomic.Store(&s.sweepgen, h.sweepgen)
s.state = _MSpanInUse
h.pagesInUse += uint64(npage)
- mHeap_FreeSpanLocked(h, s, false, true, 0)
+ h.freeSpanLocked(s, false, true, 0)
return true
}
// Look up the span at the given address.
// Address is guaranteed to be in map
// and is guaranteed to be start or end of span.
-func mHeap_Lookup(h *mheap, v unsafe.Pointer) *mspan {
+func (h *mheap) lookup(v unsafe.Pointer) *mspan {
p := uintptr(v)
p -= h.arena_start
return h_spans[p>>_PageShift]
// valid for allocated spans. Free spans may have
// other garbage in their middles, so we have to
// check for that.
-func mHeap_LookupMaybe(h *mheap, v unsafe.Pointer) *mspan {
+func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan {
if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used {
return nil
}
}
// Free the span back into the heap.
-func mHeap_Free(h *mheap, s *mspan, acct int32) {
+func (h *mheap) freeSpan(s *mspan, acct int32) {
systemstack(func() {
mp := getg().m
lock(&h.lock)
if gcBlackenEnabled != 0 {
gcController.revise()
}
- mHeap_FreeSpanLocked(h, s, true, true, 0)
+ h.freeSpanLocked(s, true, true, 0)
if trace.enabled {
traceHeapAlloc()
}
})
}
-func mHeap_FreeStack(h *mheap, s *mspan) {
+func (h *mheap) freeStack(s *mspan) {
_g_ := getg()
if _g_ != _g_.m.g0 {
throw("mheap_freestack not on g0 stack")
s.needzero = 1
lock(&h.lock)
memstats.stacks_inuse -= uint64(s.npages << _PageShift)
- mHeap_FreeSpanLocked(h, s, true, true, 0)
+ h.freeSpanLocked(s, true, true, 0)
unlock(&h.lock)
}
// s must be on a busy list (h.busy or h.busylarge) or unlinked.
-func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsince int64) {
+func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
switch s.state {
case _MSpanStack:
if s.ref != 0 {
memstats.heap_idle += uint64(s.npages << _PageShift)
}
s.state = _MSpanFree
- if mSpan_InList(s) {
- mSpanList_Remove(mHeap_BusyList(h, s.npages), s)
+ if s.inList() {
+ h.busyList(s.npages).remove(s)
}
// Stamp newly unused spans. The scavenger will use that
s.needzero |= t.needzero
p -= t.npages
h_spans[p] = s
- mSpanList_Remove(mHeap_FreeList(h, t.npages), t)
+ h.freeList(t.npages).remove(t)
t.state = _MSpanDead
- fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t))
+ h.spanalloc.free(unsafe.Pointer(t))
}
}
if (p+s.npages)*ptrSize < h.spans_mapped {
s.npreleased += t.npreleased
s.needzero |= t.needzero
h_spans[p+s.npages-1] = s
- mSpanList_Remove(mHeap_FreeList(h, t.npages), t)
+ h.freeList(t.npages).remove(t)
t.state = _MSpanDead
- fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t))
+ h.spanalloc.free(unsafe.Pointer(t))
}
}
// Insert s into appropriate list.
- mSpanList_Insert(mHeap_FreeList(h, s.npages), s)
+ h.freeList(s.npages).insert(s)
}
-func mHeap_FreeList(h *mheap, npages uintptr) *mSpanList {
+func (h *mheap) freeList(npages uintptr) *mSpanList {
if npages < uintptr(len(h.free)) {
return &h.free[npages]
}
return &h.freelarge
}
-func mHeap_BusyList(h *mheap, npages uintptr) *mSpanList {
+func (h *mheap) busyList(npages uintptr) *mSpanList {
if npages < uintptr(len(h.free)) {
return &h.busy[npages]
}
return 0
}
- if mSpanList_IsEmpty(list) {
+ if list.isEmpty() {
return 0
}
return sumreleased
}
-func mHeap_Scavenge(k int32, now, limit uint64) {
- h := &mheap_
+func (h *mheap) scavenge(k int32, now, limit uint64) {
lock(&h.lock)
var sumreleased uintptr
for i := 0; i < len(h.free); i++ {
//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
func runtime_debug_freeOSMemory() {
gcStart(gcForceBlockMode, false)
- systemstack(func() { mHeap_Scavenge(-1, ^uint64(0), 0) })
+ systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) })
}
// Initialize a new span with the given start and npages.
-func mSpan_Init(span *mspan, start pageID, npages uintptr) {
+func (span *mspan) init(start pageID, npages uintptr) {
span.next = nil
span.prev = nil
span.list = nil
span.needzero = 0
}
-func mSpan_InList(span *mspan) bool {
+func (span *mspan) inList() bool {
return span.prev != nil
}
// Initialize an empty doubly-linked list.
-func mSpanList_Init(list *mSpanList) {
+func (list *mSpanList) init() {
list.first = nil
list.last = &list.first
}
-func mSpanList_Remove(list *mSpanList, span *mspan) {
+func (list *mSpanList) remove(span *mspan) {
if span.prev == nil || span.list != list {
println("failed MSpanList_Remove", span, span.prev, span.list, list)
throw("MSpanList_Remove")
span.list = nil
}
-func mSpanList_IsEmpty(list *mSpanList) bool {
+func (list *mSpanList) isEmpty() bool {
return list.first == nil
}
-func mSpanList_Insert(list *mSpanList, span *mspan) {
+func (list *mSpanList) insert(span *mspan) {
if span.next != nil || span.prev != nil || span.list != nil {
println("failed MSpanList_Insert", span, span.next, span.prev, span.list)
throw("MSpanList_Insert")
span.list = list
}
-func mSpanList_InsertBack(list *mSpanList, span *mspan) {
+func (list *mSpanList) insertBack(span *mspan) {
if span.next != nil || span.prev != nil || span.list != nil {
println("failed MSpanList_InsertBack", span, span.next, span.prev, span.list)
throw("MSpanList_InsertBack")
// (The add will fail only if a record with the same p and s->kind
// already exists.)
func addspecial(p unsafe.Pointer, s *special) bool {
- span := mHeap_LookupMaybe(&mheap_, p)
+ span := mheap_.lookupMaybe(p)
if span == nil {
throw("addspecial on invalid pointer")
}
// Sweeping accesses the specials list w/o locks, so we have
// to synchronize with it. And it's just much safer.
mp := acquirem()
- mSpan_EnsureSwept(span)
+ span.ensureSwept()
offset := uintptr(p) - uintptr(span.start<<_PageShift)
kind := s.kind
// Returns the record if the record existed, nil otherwise.
// The caller must FixAlloc_Free the result.
func removespecial(p unsafe.Pointer, kind uint8) *special {
- span := mHeap_LookupMaybe(&mheap_, p)
+ span := mheap_.lookupMaybe(p)
if span == nil {
throw("removespecial on invalid pointer")
}
// Sweeping accesses the specials list w/o locks, so we have
// to synchronize with it. And it's just much safer.
mp := acquirem()
- mSpan_EnsureSwept(span)
+ span.ensureSwept()
offset := uintptr(p) - uintptr(span.start<<_PageShift)
// Adds a finalizer to the object p. Returns true if it succeeded.
func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
lock(&mheap_.speciallock)
- s := (*specialfinalizer)(fixAlloc_Alloc(&mheap_.specialfinalizeralloc))
+ s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
unlock(&mheap_.speciallock)
s.special.kind = _KindSpecialFinalizer
s.fn = f
// There was an old finalizer
lock(&mheap_.speciallock)
- fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(s))
+ mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
unlock(&mheap_.speciallock)
return false
}
return // there wasn't a finalizer to remove
}
lock(&mheap_.speciallock)
- fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(s))
+ mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
unlock(&mheap_.speciallock)
}
// Set the heap profile bucket associated with addr to b.
func setprofilebucket(p unsafe.Pointer, b *bucket) {
lock(&mheap_.speciallock)
- s := (*specialprofile)(fixAlloc_Alloc(&mheap_.specialprofilealloc))
+ s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
unlock(&mheap_.speciallock)
s.special.kind = _KindSpecialProfile
s.b = b
sf := (*specialfinalizer)(unsafe.Pointer(s))
queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
lock(&mheap_.speciallock)
- fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(sf))
+ mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
unlock(&mheap_.speciallock)
case _KindSpecialProfile:
sp := (*specialprofile)(unsafe.Pointer(s))
mProf_Free(sp.b, size)
lock(&mheap_.speciallock)
- fixAlloc_Free(&mheap_.specialprofilealloc, unsafe.Pointer(sp))
+ mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
unlock(&mheap_.speciallock)
default:
throw("bad special kind")