func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) }
func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) }
+// memAlloc allocates n bytes from the brk reservation, or if it's full,
+// the system.
+//
+// memlock must be held.
+//
+// memAlloc must be called on the system stack, otherwise a stack growth
+// could cause us to call back into it. Since memlock is held, that could
+// lead to a self-deadlock.
+//
+//go:systemstack
func memAlloc(n uintptr) unsafe.Pointer {
if p := memAllocNoGrow(n); p != nil {
return p
return sbrk(n)
}
+// memAllocNoGrow attempts to allocate n bytes from the existing brk.
+//
+// memlock must be held.
+//
+// memAlloc must be called on the system stack, otherwise a stack growth
+// could cause us to call back into it. Since memlock is held, that could
+// lead to a self-deadlock.
+//
+//go:systemstack
func memAllocNoGrow(n uintptr) unsafe.Pointer {
n = memRound(n)
var prevp *memHdr
return nil
}
+// memFree makes [ap, ap+n) available for reallocation by memAlloc.
+//
+// memlock must be held.
+//
+// memAlloc must be called on the system stack, otherwise a stack growth
+// could cause us to call back into it. Since memlock is held, that could
+// lead to a self-deadlock.
+//
+//go:systemstack
func memFree(ap unsafe.Pointer, n uintptr) {
n = memRound(n)
memclrNoHeapPointers(ap, n)
}
}
+// memCheck checks invariants around free list management.
+//
+// memlock must be held.
+//
+// memAlloc must be called on the system stack, otherwise a stack growth
+// could cause us to call back into it. Since memlock is held, that could
+// lead to a self-deadlock.
+//
+//go:systemstack
func memCheck() {
if !memDebug {
return
}
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
- lock(&memlock)
- p := memAlloc(n)
- memCheck()
- unlock(&memlock)
- return p
+ var p uintptr
+ systemstack(func() {
+ lock(&memlock)
+ p = uintptr(memAlloc(n))
+ memCheck()
+ unlock(&memlock)
+ })
+ return unsafe.Pointer(p)
}
func sysFreeOS(v unsafe.Pointer, n uintptr) {
- lock(&memlock)
- if uintptr(v)+n == bloc {
- // Address range being freed is at the end of memory,
- // so record a new lower value for end of memory.
- // Can't actually shrink address space because segment is shared.
- memclrNoHeapPointers(v, n)
- bloc -= n
- } else {
- memFree(v, n)
- memCheck()
- }
- unlock(&memlock)
+ systemstack(func() {
+ lock(&memlock)
+ if uintptr(v)+n == bloc {
+ // Address range being freed is at the end of memory,
+ // so record a new lower value for end of memory.
+ // Can't actually shrink address space because segment is shared.
+ memclrNoHeapPointers(v, n)
+ bloc -= n
+ } else {
+ memFree(v, n)
+ memCheck()
+ }
+ unlock(&memlock)
+ })
}
func sysUnusedOS(v unsafe.Pointer, n uintptr) {
}
func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
- lock(&memlock)
- var p unsafe.Pointer
- if uintptr(v) == bloc {
- // Address hint is the current end of memory,
- // so try to extend the address space.
- p = sbrk(n)
- }
- if p == nil && v == nil {
- p = memAlloc(n)
- memCheck()
- }
- unlock(&memlock)
- return p
+ var p uintptr
+ systemstack(func() {
+ lock(&memlock)
+ if uintptr(v) == bloc {
+ // Address hint is the current end of memory,
+ // so try to extend the address space.
+ p = uintptr(sbrk(n))
+ }
+ if p == 0 && v == nil {
+ p = uintptr(memAlloc(n))
+ memCheck()
+ }
+ unlock(&memlock)
+ })
+ return unsafe.Pointer(p)
}
func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) {
- lock(&memlock)
- if p := memAllocNoGrow(size + align); p != nil {
- // We can satisfy the reservation from the free list.
- // Trim off the unaligned parts.
- pAligned := alignUp(uintptr(p), align)
- if startLen := pAligned - uintptr(p); startLen > 0 {
- memFree(p, startLen)
+ var p uintptr
+ systemstack(func() {
+ lock(&memlock)
+ if base := memAllocNoGrow(size + align); base != nil {
+ // We can satisfy the reservation from the free list.
+ // Trim off the unaligned parts.
+ start := alignUp(uintptr(base), align)
+ if startLen := start - uintptr(base); startLen > 0 {
+ memFree(base, startLen)
+ }
+ end := start + size
+ if endLen := (uintptr(base) + size + align) - end; endLen > 0 {
+ memFree(unsafe.Pointer(end), endLen)
+ }
+ memCheck()
+ unlock(&memlock)
+ p = start
+ return
}
- end := pAligned + size
- if endLen := (uintptr(p) + size + align) - end; endLen > 0 {
- memFree(unsafe.Pointer(end), endLen)
+
+ // Round up bloc to align, then allocate size.
+ p = alignUp(bloc, align)
+ r := sbrk(p + size - bloc)
+ if r == nil {
+ p, size = 0, 0
+ } else if l := p - uintptr(r); l > 0 {
+ // Free the area we skipped over for alignment.
+ memFree(r, l)
+ memCheck()
}
- memCheck()
unlock(&memlock)
- return unsafe.Pointer(pAligned), size
- }
-
- // Round up bloc to align, then allocate size.
- p := alignUp(bloc, align)
- r := sbrk(p + size - bloc)
- if r == nil {
- p, size = 0, 0
- } else if l := p - uintptr(r); l > 0 {
- // Free the area we skipped over for alignment.
- memFree(r, l)
- memCheck()
- }
- unlock(&memlock)
+ })
return unsafe.Pointer(p), size
}