MOVQ AX, ret+16(FP)
RET
+TEXT runtime·xadduintptr(SB), NOSPLIT, $0-24
+ JMP runtime·xadd64(SB)
+
TEXT runtime·xchg(SB), NOSPLIT, $0-20
MOVQ ptr+0(FP), BX
MOVL new+8(FP), AX
MOVQ AX, ret+16(FP)
RET
+TEXT runtime·xadduintptr(SB), NOSPLIT, $0-12
+ JMP runtime·xadd(SB)
+
TEXT runtime·xchg(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
}
}
+//go:noescape
+//go:linkname xadduintptr runtime.xadd
+func xadduintptr(ptr *uintptr, delta uintptr) uintptr
+
//go:nosplit
func xchg64(ptr *uint64, new uint64) uint64 {
for {
//go:noescape
func xadd64(ptr *uint64, delta int64) uint64
+//go:noescape
+func xadduintptr(ptr *uintptr, delta uintptr) uintptr
+
//go:noescape
func xchg(ptr *uint32, new uint32) uint32
}
}
+//go:noescape
+//go:linkname xadduintptr runtime.xadd
+func xadduintptr(ptr *uintptr, delta uintptr) uintptr
+
//go:nosplit
func xchg(addr *uint32, v uint32) uint32 {
for {
//go:noescape
func xadd64(ptr *uint64, delta int64) uint64
+//go:noescape
+//go:linkname xadduintptr runtime.xadd64
+func xadduintptr(ptr *uintptr, delta uintptr) uintptr
+
//go:noescape
func xchg(ptr *uint32, new uint32) uint32
//go:noescape
func xadd64(ptr *uint64, delta int64) uint64
+//go:noescape
+//go:linkname xadduintptr runtime.xadd64
+func xadduintptr(ptr *uintptr, delta uintptr) uintptr
+
//go:noescape
func xchg(ptr *uint32, new uint32) uint32
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "testing"
+ "unsafe"
+)
+
+func runParallel(N, iter int, f func()) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(int(N)))
+ done := make(chan bool)
+ for i := 0; i < N; i++ {
+ go func() {
+ for j := 0; j < iter; j++ {
+ f()
+ }
+ done <- true
+ }()
+ }
+ for i := 0; i < N; i++ {
+ <-done
+ }
+}
+
+func TestXadduintptr(t *testing.T) {
+ const N = 20
+ const iter = 100000
+ inc := uintptr(100)
+ total := uintptr(0)
+ runParallel(N, iter, func() {
+ runtime.Xadduintptr(&total, inc)
+ })
+ if want := uintptr(N * iter * inc); want != total {
+ t.Fatalf("xadduintpr error, want %d, got %d", want, total)
+ }
+ total = 0
+ runParallel(N, iter, func() {
+ runtime.Xadduintptr(&total, inc)
+ runtime.Xadduintptr(&total, uintptr(-int64(inc)))
+ })
+ if total != 0 {
+ t.Fatalf("xadduintpr total error, want %d, got %d", 0, total)
+ }
+}
+
+// Tests that xadduintptr correctly updates 64-bit values. The place where
+// we actually do so is mstats.go, functions mSysStat{Inc,Dec}.
+func TestXadduintptrOnUint64(t *testing.T) {
+ if runtime.BigEndian != 0 {
+ // On big endian architectures, we never use xadduintptr to update
+ // 64-bit values and hence we skip the test. (Note that functions
+ // mSysStat{Inc,Dec} in mstats.go have explicit checks for
+ // big-endianness.)
+ return
+ }
+ const inc = 100
+ val := uint64(0)
+ runtime.Xadduintptr((*uintptr)(unsafe.Pointer(&val)), inc)
+ if inc != val {
+ t.Fatalf("xadduintptr should increase lower-order bits, want %d, got %d", inc, val)
+ }
+}
var Entersyscall = entersyscall
var Exitsyscall = exitsyscall
var LockedOSThread = lockedOSThread
+var Xadduintptr = xadduintptr
var FuncPC = funcPC
func Envs() []string { return envs }
func SetEnvs(e []string) { envs = e }
+
+var BigEndian = _BigEndian
// There is no associated free operation.
// Intended for things like function/type/debug-related persistent data.
// If align is 0, uses default align (currently 8).
-func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
+func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
const (
chunk = 256 << 10
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
}
if size >= maxBlock {
- return sysAlloc(size, stat)
+ return sysAlloc(size, sysStat)
}
mp := acquirem()
unlock(&globalAlloc.mutex)
}
- if stat != &memstats.other_sys {
- xadd64(stat, int64(size))
- xadd64(&memstats.other_sys, -int64(size))
+ if sysStat != &memstats.other_sys {
+ mSysStatInc(sysStat, size)
+ mSysStatDec(&memstats.other_sys, size)
}
return p
}
import "unsafe"
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
//go:nosplit
-func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
+func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
v := unsafe.Pointer(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
if uintptr(v) < 4096 {
return nil
}
- xadd64(stat, int64(n))
+ mSysStatInc(sysStat, n)
return v
}
func sysUsed(v unsafe.Pointer, n uintptr) {
}
-func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
- xadd64(stat, -int64(n))
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
+//go:nosplit
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
+ mSysStatDec(sysStat, n)
munmap(v, n)
}
return p
}
-func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
+func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
const _ENOMEM = 12
- xadd64(stat, int64(n))
+ mSysStatInc(sysStat, n)
// On 64-bit, we don't actually have v reserved, so tread carefully.
if !reserved {
import "unsafe"
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
//go:nosplit
-func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
+func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
v := (unsafe.Pointer)(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
if uintptr(v) < 4096 {
return nil
}
- xadd64(stat, int64(n))
+ mSysStatInc(sysStat, n)
return v
}
func sysUsed(v unsafe.Pointer, n uintptr) {
}
-func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
- xadd64(stat, -int64(n))
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
+//go:nosplit
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
+ mSysStatDec(sysStat, n)
munmap(v, n)
}
_ENOMEM = 12
)
-func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
- xadd64(stat, int64(n))
+func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
+ mSysStatInc(sysStat, n)
p := (unsafe.Pointer)(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
if uintptr(p) == _ENOMEM {
throw("runtime: out of memory")
return p
}
+// Don't split the stack as this method may be invoked without a valid G, which
+// prevents us from allocating more stack.
//go:nosplit
-func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
+func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
p := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if uintptr(p) < 4096 {
if uintptr(p) == _EACCES {
}
return nil
}
- xadd64(stat, int64(n))
+ mSysStatInc(sysStat, n)
return p
}
}
}
-func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
- xadd64(stat, -int64(n))
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
+//go:nosplit
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
+ mSysStatDec(sysStat, n)
munmap(v, n)
}
return p
}
-func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
- xadd64(stat, int64(n))
+func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
+ mSysStatInc(sysStat, n)
// On 64-bit, we don't actually have v reserved, so tread carefully.
if !reserved {
return unsafe.Pointer(bl)
}
-func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
+func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
lock(&memlock)
p := memAlloc(n)
memCheck()
unlock(&memlock)
if p != nil {
- xadd64(stat, int64(n))
+ mSysStatInc(sysStat, n)
}
return p
}
-func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
- xadd64(stat, -int64(n))
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
+ mSysStatDec(sysStat, n)
lock(&memlock)
memFree(v, n)
memCheck()
func sysUsed(v unsafe.Pointer, n uintptr) {
}
-func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
+func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
// sysReserve has already allocated all heap memory,
// but has not adjusted stats.
- xadd64(stat, int64(n))
+ mSysStatInc(sysStat, n)
}
func sysFault(v unsafe.Pointer, n uintptr) {
_PAGE_NOACCESS = 0x0001
)
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
//go:nosplit
-func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
- xadd64(stat, int64(n))
+func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
+ mSysStatInc(sysStat, n)
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
}
}
}
-func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
- xadd64(stat, -int64(n))
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
+//go:nosplit
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
+ mSysStatDec(sysStat, n)
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
if r == 0 {
throw("runtime: failed to release pages")
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
}
-func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
- xadd64(stat, int64(n))
+func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
+ mSysStatInc(sysStat, n)
p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
if p != uintptr(v) {
throw("runtime: cannot map pages in arena address space")
c.local_nsmallfree[i] = 0
}
}
+
+// Atomically increases a given *system* memory stat. We are counting on this
+// stat never overflowing a uintptr, so this function must only be used for
+// system memory stats.
+//
+// The current implementation for little endian architectures is based on
+// xadduintptr(), which is less than ideal: xadd64() should really be used.
+// Using xadduintptr() is a stop-gap solution until arm supports xadd64() that
+// doesn't use locks. (Locks are a problem as they require a valid G, which
+// restricts their useability.)
+//
+// A side-effect of using xadduintptr() is that we need to check for
+// overflow errors.
+//go:nosplit
+func mSysStatInc(sysStat *uint64, n uintptr) {
+ if _BigEndian != 0 {
+ xadd64(sysStat, int64(n))
+ return
+ }
+ if val := xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n {
+ print("runtime: stat overflow: val ", val, ", n ", n, "\n")
+ exit(2)
+ }
+}
+
+// Atomically decreases a given *system* memory stat. Same comments as
+// mSysStatInc apply.
+//go:nosplit
+func mSysStatDec(sysStat *uint64, n uintptr) {
+ if _BigEndian != 0 {
+ xadd64(sysStat, -int64(n))
+ return
+ }
+ if val := xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n {
+ print("runtime: stat underflow: val ", val, ", n ", n, "\n")
+ exit(2)
+ }
+}
//
//go:nosplit
func newosproc0(stacksize uintptr, fn unsafe.Pointer, fnarg uintptr) {
- var dummy uint64
- stack := sysAlloc(stacksize, &dummy)
+ stack := sysAlloc(stacksize, &memstats.stacks_sys)
if stack == nil {
write(2, unsafe.Pointer(&failallocatestack[0]), int32(len(failallocatestack)))
exit(1)
// Version of newosproc that doesn't require a valid G.
//go:nosplit
func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
- var dummy uint64
- stack := sysAlloc(stacksize, &dummy)
+ stack := sysAlloc(stacksize, &memstats.stacks_sys)
if stack == nil {
write(2, unsafe.Pointer(&failallocatestack[0]), int32(len(failallocatestack)))
exit(1)