Currently, the '_FixAllocChunk % fixalloc.size' tail bytes
will never be used when allocing from persistentalloc.
Wasted bytes on darwin/amd64:
_FixAllocChunk % mheap_.spanalloc.size = 64
_FixAllocChunk % mheap_.cachealloc.size = 784
_FixAllocChunk % mheap_.specialfinalizeralloc.size = 16
_FixAllocChunk % mheap_.specialprofilealloc.size = 16
_FixAllocChunk % mheap_.specialReachableAlloc.size = 16
_FixAllocChunk % mheap_.arenaHintAlloc.size = 16
After this commit, fixalloc alloc '_FixAllocChunk / fixalloc.size'
objects exactly with zero waste. Sizeof(fixalloc{}) is unchanged.
Change-Id: Ifc551f5b7aa9d842fa559abbe532ffcfb4d3540c
GitHub-Last-Rev:
e08b4c66b82bc7be9d14fb7eb7580504d777481e
GitHub-Pull-Request: golang/go#47439
Reviewed-on: https://go-review.googlesource.com/c/go/+/338090
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Cherry Mui <cherryyz@google.com>
arg unsafe.Pointer
list *mlink
chunk uintptr // use uintptr instead of unsafe.Pointer to avoid write barriers
- nchunk uint32
+ nchunk uint32 // bytes remaining in current chunk
+ nalloc uint32 // size of new chunks in bytes
inuse uintptr // in-use bytes now
stat *sysMemStat
zero bool // zero allocations
f.list = nil
f.chunk = 0
f.nchunk = 0
+ f.nalloc = uint32(_FixAllocChunk / size * size) // Round _FixAllocChunk down to an exact multiple of size to eliminate tail waste
f.inuse = 0
f.stat = stat
f.zero = true
return v
}
if uintptr(f.nchunk) < f.size {
- f.chunk = uintptr(persistentalloc(_FixAllocChunk, 0, f.stat))
- f.nchunk = _FixAllocChunk
+ f.chunk = uintptr(persistentalloc(uintptr(f.nalloc), 0, f.stat))
+ f.nchunk = f.nalloc
}
v := unsafe.Pointer(f.chunk)