"addb",
"adjustpanics",
"adjustpointer",
+ "alignDown",
+ "alignUp",
"bucketMask",
"bucketShift",
"chanbuf",
"readUnaligned32",
"readUnaligned64",
"releasem",
- "round",
"roundupsize",
"stackmapdata",
"stringStructOf",
if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
p = mheap_.heapArenaAlloc.end
}
- p = round(p+(256<<10), heapArenaBytes)
+ p = alignUp(p+(256<<10), heapArenaBytes)
// Because we're worried about fragmentation on
// 32-bit, we try to make a large initial reservation.
arenaSizes := []uintptr{
//
// h must be locked.
func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
- n = round(n, heapArenaBytes)
+ n = alignUp(n, heapArenaBytes)
// First, try the arena pre-reservation.
v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys)
// re-reserve the aligned sub-region. This may race,
// so we may have to try again.
sysFree(unsafe.Pointer(p), size+align, nil)
- p = round(p, align)
+ p = alignUp(p, align)
p2 := sysReserve(unsafe.Pointer(p), size)
if p != uintptr(p2) {
// Must have raced. Try again.
return p2, size
default:
// Trim off the unaligned parts.
- pAligned := round(p, align)
+ pAligned := alignUp(p, align)
sysFree(unsafe.Pointer(p), pAligned-p, nil)
end := pAligned + size
endLen := (p + size + align) - end
off := c.tinyoffset
// Align tiny pointer for required (conservative) alignment.
if size&7 == 0 {
- off = round(off, 8)
+ off = alignUp(off, 8)
} else if size&3 == 0 {
- off = round(off, 4)
+ off = alignUp(off, 4)
} else if size&1 == 0 {
- off = round(off, 2)
+ off = alignUp(off, 2)
}
if off+size <= maxTinySize && c.tiny != 0 {
// The object fits into existing tiny block.
lock(&globalAlloc.mutex)
persistent = &globalAlloc.persistentAlloc
}
- persistent.off = round(persistent.off, align)
+ persistent.off = alignUp(persistent.off, align)
if persistent.off+size > persistentChunkSize || persistent.base == nil {
persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
if persistent.base == nil {
break
}
}
- persistent.off = round(sys.PtrSize, align)
+ persistent.off = alignUp(sys.PtrSize, align)
}
p := persistent.base.add(persistent.off)
persistent.off += size
}
func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
- p := round(l.next, align)
+ p := alignUp(l.next, align)
if p+size > l.end {
return nil
}
l.next = p + size
- if pEnd := round(l.next-1, physPageSize); pEnd > l.mapped {
+ if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
// Transition from Reserved to Prepared to Ready.
sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
var head, tail uintptr
if uintptr(v)&(physHugePageSize-1) != 0 {
// Compute huge page containing v.
- head = uintptr(v) &^ (physHugePageSize - 1)
+ head = alignDown(uintptr(v), physHugePageSize)
}
if (uintptr(v)+n)&(physHugePageSize-1) != 0 {
// Compute huge page containing v+n-1.
- tail = (uintptr(v) + n - 1) &^ (physHugePageSize - 1)
+ tail = alignDown(uintptr(v)+n-1, physHugePageSize)
}
// Note that madvise will return EINVAL if the flag is
func sysHugePage(v unsafe.Pointer, n uintptr) {
if physHugePageSize != 0 {
// Round v up to a huge page boundary.
- beg := (uintptr(v) + (physHugePageSize - 1)) &^ (physHugePageSize - 1)
+ beg := alignUp(uintptr(v), physHugePageSize)
// Round v+n down to a huge page boundary.
- end := (uintptr(v) + n) &^ (physHugePageSize - 1)
+ end := alignDown(uintptr(v)+n, physHugePageSize)
if beg < end {
madvise(unsafe.Pointer(beg), end-beg, _MADV_HUGEPAGE)
// compute size needed for return parameters
nret := uintptr(0)
for _, t := range ft.out() {
- nret = round(nret, uintptr(t.align)) + uintptr(t.size)
+ nret = alignUp(nret, uintptr(t.align)) + uintptr(t.size)
}
- nret = round(nret, sys.PtrSize)
+ nret = alignUp(nret, sys.PtrSize)
// make sure we have a finalizer goroutine
createfing()
end := start + s.npages<<_PageShift
if physPageSize > _PageSize {
// Round start and end in.
- start = (start + physPageSize - 1) &^ (physPageSize - 1)
- end &^= physPageSize - 1
+ start = alignUp(start, physPageSize)
+ end = alignDown(end, physPageSize)
}
return start, end
}
// scavenged span.
boundary := b.startAddr
if a.scavenged {
- boundary &^= (physPageSize - 1)
+ boundary = alignDown(boundary, physPageSize)
} else {
- boundary = (boundary + physPageSize - 1) &^ (physPageSize - 1)
+ boundary = alignUp(boundary, physPageSize)
}
a.npages = (boundary - a.startAddr) / pageSize
b.npages = (b.startAddr + b.npages*pageSize - boundary) / pageSize
end := start + s.npages*pageSize
if physHugePageSize > pageSize {
// Round start and end in.
- start = (start + physHugePageSize - 1) &^ (physHugePageSize - 1)
- end &^= physHugePageSize - 1
+ start = alignUp(start, physHugePageSize)
+ end = alignDown(end, physHugePageSize)
}
if start < end {
return (end - start) >> physHugePageShift
func (h *mheap) grow(npage uintptr) bool {
ask := npage << _PageShift
- nBase := round(h.curArena.base+ask, physPageSize)
+ nBase := alignUp(h.curArena.base+ask, physPageSize)
if nBase > h.curArena.end {
// Not enough room in the current arena. Allocate more
// arena space. This may not be contiguous with the
memstats.heap_idle += uint64(asize)
// Recalculate nBase
- nBase = round(h.curArena.base+ask, physPageSize)
+ nBase = alignUp(h.curArena.base+ask, physPageSize)
}
// Grow into the current arena.
if base <= start {
return nil
}
- if physHugePageSize > pageSize && base&^(physHugePageSize-1) >= start {
+ if physHugePageSize > pageSize && alignDown(base, physHugePageSize) >= start {
// We're in danger of breaking apart a huge page, so include the entire
// huge page in the bound by rounding down to the huge page size.
// base should still be aligned to pageSize.
- base &^= physHugePageSize - 1
+ base = alignDown(base, physHugePageSize)
}
if base == start {
// After all that we rounded base down to s.base(), so no need to split.
if size+_PageSize < size {
return size
}
- return round(size, _PageSize)
+ return alignUp(size, _PageSize)
}
if end < firstmoduledata.ebss {
end = firstmoduledata.ebss
}
- size := round(end-start, _PageSize)
+ size := alignUp(end-start, _PageSize)
racecall(&__tsan_map_shadow, start, size, 0, 0)
racedatastart = start
racedataend = start + size
}
if debug.efence != 0 || stackFromSystem != 0 {
- n = uint32(round(uintptr(n), physPageSize))
+ n = uint32(alignUp(uintptr(n), physPageSize))
v := sysAlloc(uintptr(n), &memstats.stacks_sys)
if v == nil {
throw("out of memory (stackalloc)")
func systemstack_switch()
-// round n up to a multiple of a. a must be a power of 2.
-func round(n, a uintptr) uintptr {
+// alignUp rounds n up to a multiple of a. a must be a power of 2.
+func alignUp(n, a uintptr) uintptr {
return (n + a - 1) &^ (a - 1)
}
+// alignDown rounds n down to a multiple of a. a must be a power of 2.
+func alignDown(n, a uintptr) uintptr {
+ return n &^ (a - 1)
+}
+
// checkASM reports whether assembly runtime checks have passed.
func checkASM() bool
// alloc allocates n-byte block.
func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
- n = round(n, sys.PtrSize)
+ n = alignUp(n, sys.PtrSize)
if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
if n > uintptr(len(a.head.ptr().data)) {
throw("trace: alloc too large")