package runtime
-import (
- "unsafe"
-)
+import "unsafe"
-// Don't split the stack as this function may be invoked without a valid G,
-// which prevents us from allocating more stack.
-//
-//go:nosplit
-func sysAllocOS(n uintptr) unsafe.Pointer {
- p := sysReserveOS(nil, n)
- sysMapOS(p, n)
- return p
-}
-
-func sysUnusedOS(v unsafe.Pointer, n uintptr) {
-}
-
-func sysUsedOS(v unsafe.Pointer, n uintptr) {
-}
-
-func sysHugePageOS(v unsafe.Pointer, n uintptr) {
-}
-
-// Don't split the stack as this function may be invoked without a valid G,
-// which prevents us from allocating more stack.
-//
-//go:nosplit
-func sysFreeOS(v unsafe.Pointer, n uintptr) {
-}
+func sbrk(n uintptr) unsafe.Pointer {
+ grow := divRoundUp(n, physPageSize)
+ size := currentMemory()
-func sysFaultOS(v unsafe.Pointer, n uintptr) {
-}
-
-var reserveEnd uintptr
-
-func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
- // TODO(neelance): maybe unify with mem_plan9.go, depending on how https://github.com/WebAssembly/design/blob/master/FutureFeatures.md#finer-grained-control-over-memory turns out
-
- if v != nil {
- // The address space of WebAssembly's linear memory is contiguous,
- // so requesting specific addresses is not supported. We could use
- // a different address, but then mheap.sysAlloc discards the result
- // right away and we don't reuse chunks passed to sysFree.
+ if growMemory(int32(grow)) < 0 {
return nil
}
- // Round up the initial reserveEnd to 64 KiB so that
- // reservations are always aligned to the page size.
- initReserveEnd := alignUp(lastmoduledatap.end, physPageSize)
- if reserveEnd < initReserveEnd {
- reserveEnd = initReserveEnd
- }
- v = unsafe.Pointer(reserveEnd)
- reserveEnd += alignUp(n, physPageSize)
-
- current := currentMemory()
- // reserveEnd is always at a page boundary.
- needed := int32(reserveEnd / physPageSize)
- if current < needed {
- if growMemory(needed-current) == -1 {
- return nil
- }
- resetMemoryDataView()
- }
-
- return v
+ resetMemoryDataView()
+ return unsafe.Pointer(uintptr(size) * physPageSize)
}
func currentMemory() int32
//
//go:wasmimport gojs runtime.resetMemoryDataView
func resetMemoryDataView()
-
-func sysMapOS(v unsafe.Pointer, n uintptr) {
-}
import "unsafe"
-const memDebug = false
-
-var bloc uintptr
-var blocMax uintptr
-var memlock mutex
-
-type memHdr struct {
- next memHdrPtr
- size uintptr
-}
-
-var memFreelist memHdrPtr // sorted in ascending order
-
-type memHdrPtr uintptr
-
-func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) }
-func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) }
-
-func memAlloc(n uintptr) unsafe.Pointer {
- n = memRound(n)
- var prevp *memHdr
- for p := memFreelist.ptr(); p != nil; p = p.next.ptr() {
- if p.size >= n {
- if p.size == n {
- if prevp != nil {
- prevp.next = p.next
- } else {
- memFreelist = p.next
- }
- } else {
- p.size -= n
- p = (*memHdr)(add(unsafe.Pointer(p), p.size))
- }
- *p = memHdr{}
- return unsafe.Pointer(p)
- }
- prevp = p
- }
- return sbrk(n)
-}
-
-func memFree(ap unsafe.Pointer, n uintptr) {
- n = memRound(n)
- memclrNoHeapPointers(ap, n)
- bp := (*memHdr)(ap)
- bp.size = n
- bpn := uintptr(ap)
- if memFreelist == 0 {
- bp.next = 0
- memFreelist.set(bp)
- return
- }
- p := memFreelist.ptr()
- if bpn < uintptr(unsafe.Pointer(p)) {
- memFreelist.set(bp)
- if bpn+bp.size == uintptr(unsafe.Pointer(p)) {
- bp.size += p.size
- bp.next = p.next
- *p = memHdr{}
- } else {
- bp.next.set(p)
- }
- return
- }
- for ; p.next != 0; p = p.next.ptr() {
- if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) {
- break
- }
- }
- if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) {
- bp.size += p.next.ptr().size
- bp.next = p.next.ptr().next
- *p.next.ptr() = memHdr{}
- } else {
- bp.next = p.next
- }
- if uintptr(unsafe.Pointer(p))+p.size == bpn {
- p.size += bp.size
- p.next = bp.next
- *bp = memHdr{}
- } else {
- p.next.set(bp)
- }
-}
-
-func memCheck() {
- if !memDebug {
- return
- }
- for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() {
- if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) {
- print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n")
- throw("mem: infinite loop")
- }
- if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) {
- print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n")
- throw("mem: unordered list")
- }
- if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) {
- print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n")
- throw("mem: overlapping blocks")
- }
- for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) {
- if *(*byte)(b) != 0 {
- print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n")
- throw("mem: uninitialised memory")
- }
- }
- }
-}
-
-func memRound(p uintptr) uintptr {
- return (p + _PAGESIZE - 1) &^ (_PAGESIZE - 1)
-}
-
-func initBloc() {
- bloc = memRound(firstmoduledata.end)
- blocMax = bloc
-}
-
func sbrk(n uintptr) unsafe.Pointer {
// Plan 9 sbrk from /sys/src/libc/9sys/sbrk.c
bl := bloc
bloc += n
return unsafe.Pointer(bl)
}
-
-func sysAllocOS(n uintptr) unsafe.Pointer {
- lock(&memlock)
- p := memAlloc(n)
- memCheck()
- unlock(&memlock)
- return p
-}
-
-func sysFreeOS(v unsafe.Pointer, n uintptr) {
- lock(&memlock)
- if uintptr(v)+n == bloc {
- // Address range being freed is at the end of memory,
- // so record a new lower value for end of memory.
- // Can't actually shrink address space because segment is shared.
- memclrNoHeapPointers(v, n)
- bloc -= n
- } else {
- memFree(v, n)
- memCheck()
- }
- unlock(&memlock)
-}
-
-func sysUnusedOS(v unsafe.Pointer, n uintptr) {
-}
-
-func sysUsedOS(v unsafe.Pointer, n uintptr) {
-}
-
-func sysHugePageOS(v unsafe.Pointer, n uintptr) {
-}
-
-func sysMapOS(v unsafe.Pointer, n uintptr) {
-}
-
-func sysFaultOS(v unsafe.Pointer, n uintptr) {
-}
-
-func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
- lock(&memlock)
- var p unsafe.Pointer
- if uintptr(v) == bloc {
- // Address hint is the current end of memory,
- // so try to extend the address space.
- p = sbrk(n)
- }
- if p == nil && v == nil {
- p = memAlloc(n)
- memCheck()
- }
- unlock(&memlock)
- return p
-}
--- /dev/null
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build plan9 || wasm
+
+package runtime
+
+import "unsafe"
+
+const memDebug = false
+
+var bloc uintptr
+var blocMax uintptr
+var memlock mutex
+
+type memHdr struct {
+ next memHdrPtr
+ size uintptr
+}
+
+var memFreelist memHdrPtr // sorted in ascending order
+
+type memHdrPtr uintptr
+
+func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) }
+func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) }
+
+func memAlloc(n uintptr) unsafe.Pointer {
+ n = memRound(n)
+ var prevp *memHdr
+ for p := memFreelist.ptr(); p != nil; p = p.next.ptr() {
+ if p.size >= n {
+ if p.size == n {
+ if prevp != nil {
+ prevp.next = p.next
+ } else {
+ memFreelist = p.next
+ }
+ } else {
+ p.size -= n
+ p = (*memHdr)(add(unsafe.Pointer(p), p.size))
+ }
+ *p = memHdr{}
+ return unsafe.Pointer(p)
+ }
+ prevp = p
+ }
+ return sbrk(n)
+}
+
+func memFree(ap unsafe.Pointer, n uintptr) {
+ n = memRound(n)
+ memclrNoHeapPointers(ap, n)
+ bp := (*memHdr)(ap)
+ bp.size = n
+ bpn := uintptr(ap)
+ if memFreelist == 0 {
+ bp.next = 0
+ memFreelist.set(bp)
+ return
+ }
+ p := memFreelist.ptr()
+ if bpn < uintptr(unsafe.Pointer(p)) {
+ memFreelist.set(bp)
+ if bpn+bp.size == uintptr(unsafe.Pointer(p)) {
+ bp.size += p.size
+ bp.next = p.next
+ *p = memHdr{}
+ } else {
+ bp.next.set(p)
+ }
+ return
+ }
+ for ; p.next != 0; p = p.next.ptr() {
+ if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) {
+ break
+ }
+ }
+ if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) {
+ bp.size += p.next.ptr().size
+ bp.next = p.next.ptr().next
+ *p.next.ptr() = memHdr{}
+ } else {
+ bp.next = p.next
+ }
+ if uintptr(unsafe.Pointer(p))+p.size == bpn {
+ p.size += bp.size
+ p.next = bp.next
+ *bp = memHdr{}
+ } else {
+ p.next.set(bp)
+ }
+}
+
+func memCheck() {
+ if !memDebug {
+ return
+ }
+ for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() {
+ if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) {
+ print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n")
+ throw("mem: infinite loop")
+ }
+ if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) {
+ print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n")
+ throw("mem: unordered list")
+ }
+ if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) {
+ print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n")
+ throw("mem: overlapping blocks")
+ }
+ for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) {
+ if *(*byte)(b) != 0 {
+ print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n")
+ throw("mem: uninitialised memory")
+ }
+ }
+ }
+}
+
+func memRound(p uintptr) uintptr {
+ return alignUp(p, physPageSize)
+}
+
+func initBloc() {
+ bloc = memRound(firstmoduledata.end)
+ blocMax = bloc
+}
+
+func sysAllocOS(n uintptr) unsafe.Pointer {
+ lock(&memlock)
+ p := memAlloc(n)
+ memCheck()
+ unlock(&memlock)
+ return p
+}
+
+func sysFreeOS(v unsafe.Pointer, n uintptr) {
+ lock(&memlock)
+ if uintptr(v)+n == bloc {
+ // Address range being freed is at the end of memory,
+ // so record a new lower value for end of memory.
+ // Can't actually shrink address space because segment is shared.
+ memclrNoHeapPointers(v, n)
+ bloc -= n
+ } else {
+ memFree(v, n)
+ memCheck()
+ }
+ unlock(&memlock)
+}
+
+func sysUnusedOS(v unsafe.Pointer, n uintptr) {
+}
+
+func sysUsedOS(v unsafe.Pointer, n uintptr) {
+}
+
+func sysHugePageOS(v unsafe.Pointer, n uintptr) {
+}
+
+func sysMapOS(v unsafe.Pointer, n uintptr) {
+}
+
+func sysFaultOS(v unsafe.Pointer, n uintptr) {
+}
+
+func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
+ lock(&memlock)
+ var p unsafe.Pointer
+ if uintptr(v) == bloc {
+ // Address hint is the current end of memory,
+ // so try to extend the address space.
+ p = sbrk(n)
+ }
+ if p == nil && v == nil {
+ p = memAlloc(n)
+ memCheck()
+ }
+ unlock(&memlock)
+ return p
+}
}
func osinit() {
+ // https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances
+ physPageSize = 64 * 1024
+ initBloc()
ncpu = 1
getg().m.procid = 2
- physPageSize = 64 * 1024
}
// wasm has no signals
}
func osinit() {
+ physPageSize = getPageSize()
initBloc()
ncpu = getproccount()
- physPageSize = getPageSize()
getg().m.procid = getpid()
}