import (
"internal/cpu"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
const (
- c0 = uintptr((8-sys.PtrSize)/4*2860486313 + (sys.PtrSize-4)/4*33054211828000289)
- c1 = uintptr((8-sys.PtrSize)/4*3267000013 + (sys.PtrSize-4)/4*23344194077549503)
+ c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289)
+ c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503)
)
func memhash0(p unsafe.Pointer, h uintptr) uintptr {
return interhash(noescape(unsafe.Pointer(&i)), seed)
}
-const hashRandomBytes = sys.PtrSize / 4 * 64
+const hashRandomBytes = goarch.PtrSize / 4 * 64
// used in asm_{386,amd64,arm64}.s to seed the hash function
var aeskeysched [hashRandomBytes]byte
initAlgAES()
return
}
- getRandomData((*[len(hashkey) * sys.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
+ getRandomData((*[len(hashkey) * goarch.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
hashkey[0] |= 1 // make sure these numbers are odd
hashkey[1] |= 1
hashkey[2] |= 1
import (
"runtime/internal/atomic"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
if inheap(uintptr(unsafe.Pointer(it))) {
panic(errorString(msg))
}
- p = *(*unsafe.Pointer)(add(p, sys.PtrSize))
+ p = *(*unsafe.Pointer)(add(p, goarch.PtrSize))
if !cgoIsGoPointer(p) {
return
}
}
hbits := heapBitsForAddr(base)
n := span.elemsize
- for i = uintptr(0); i < n; i += sys.PtrSize {
+ for i = uintptr(0); i < n; i += goarch.PtrSize {
if !hbits.morePointers() {
// No more possible pointers.
break
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// src must be in the regular heap.
hbits := heapBitsForAddr(uintptr(src))
- for i := uintptr(0); i < off+size; i += sys.PtrSize {
+ for i := uintptr(0); i < off+size; i += goarch.PtrSize {
bits := hbits.bits()
if i >= off && bits&bitPointer != 0 {
v := *(*unsafe.Pointer)(add(src, i))
//go:nosplit
//go:nowritebarrier
func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
- skipMask := off / sys.PtrSize / 8
- skipBytes := skipMask * sys.PtrSize * 8
+ skipMask := off / goarch.PtrSize / 8
+ skipBytes := skipMask * goarch.PtrSize * 8
ptrmask := addb(gcbits, skipMask)
src = add(src, skipBytes)
off -= skipBytes
size += off
var bits uint32
- for i := uintptr(0); i < size; i += sys.PtrSize {
- if i&(sys.PtrSize*8-1) == 0 {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
+ if i&(goarch.PtrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
bits >>= 1
}
if off > 0 {
- off -= sys.PtrSize
+ off -= goarch.PtrSize
} else {
if bits&1 != 0 {
v := *(*unsafe.Pointer)(add(src, i))
// license that can be found in the LICENSE file.
//go:build amd64 && linux && !goexperiment.regabiargs
-// +build amd64,linux
-// +build !goexperiment.regabiargs
+// +build amd64,linux,!goexperiment.regabiargs
package runtime
// license that can be found in the LICENSE file.
//go:build amd64 && linux && goexperiment.regabiargs
-// +build amd64,linux
-// +build goexperiment.regabiargs
+// +build amd64,linux,goexperiment.regabiargs
package runtime
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
return false
}
// Push current PC on the stack.
- rsp := ctxt.rsp() - sys.PtrSize
+ rsp := ctxt.rsp() - goarch.PtrSize
*(*uint64)(unsafe.Pointer(uintptr(rsp))) = ctxt.rip()
ctxt.set_rsp(rsp)
// Write the argument frame size.
storeRegArgs(ctxt.regs(), h.regArgs)
}
// Push return PC.
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
ctxt.set_rsp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = ctxt.rip()
// Set PC to call and context register.
case 2:
// Function panicked. Copy panic out.
sp := ctxt.rsp()
- memmove(unsafe.Pointer(&h.panic), unsafe.Pointer(uintptr(sp)), 2*sys.PtrSize)
+ memmove(unsafe.Pointer(&h.panic), unsafe.Pointer(uintptr(sp)), 2*goarch.PtrSize)
case 8:
// Call isn't safe. Get the reason.
sp := ctxt.rsp()
import (
"runtime/internal/atomic"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
})
}
-const PtrSize = sys.PtrSize
+const PtrSize = goarch.PtrSize
var ForceGCPeriod = &forcegcperiod
import (
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
for i := uintptr(0); i < uintptr(cbv.n); i++ {
if cbv.ptrbit(i) == 1 {
dumpint(fieldKindPtr)
- dumpint(uint64(offset + i*sys.PtrSize))
+ dumpint(uint64(offset + i*goarch.PtrSize))
}
}
}
dumpbv(&child.args, child.argoff)
} else {
// conservative - everything might be a pointer
- for off := child.argoff; off < child.argoff+child.arglen; off += sys.PtrSize {
+ for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
// Dump fields in the local vars section
if stkmap == nil {
// No locals information, dump everything.
- for off := child.arglen; off < s.varp-s.sp; off += sys.PtrSize {
+ for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
} else if stkmap.n < 0 {
// Locals size information, dump just the locals.
size := uintptr(-stkmap.n)
- for off := s.varp - size - s.sp; off < s.varp-s.sp; off += sys.PtrSize {
+ for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
} else if stkmap.n > 0 {
// Locals bitmap information, scan just the pointers in
// locals.
- dumpbv(&bv, s.varp-uintptr(bv.n)*sys.PtrSize-s.sp)
+ dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
}
dumpint(fieldKindEol)
} else {
dumpbool(true) // big-endian ptrs
}
- dumpint(sys.PtrSize)
+ dumpint(goarch.PtrSize)
var arenaStart, arenaEnd uintptr
for i1 := range mheap_.arenas {
if mheap_.arenas[i1] == nil {
func makeheapobjbv(p uintptr, size uintptr) bitvector {
// Extend the temp buffer if necessary.
- nptr := size / sys.PtrSize
+ nptr := size / goarch.PtrSize
if uintptr(len(tmpbuf)) < nptr/8+1 {
if tmpbuf != nil {
sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
"internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
}
// Entry doesn't exist yet. Make a new entry & add it.
- m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys))
+ m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*goarch.PtrSize, 0, &memstats.other_sys))
m.inter = inter
m._type = typ
// The hash is used in type switches. However, compiler statically generates itab's
mask := t.size - 1
h := itabHashFunc(inter, typ) & mask
for i := uintptr(1); ; i++ {
- p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize))
+ p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
// Use atomic read here so if we see m != nil, we also see
// the initializations of the fields of m.
// m := *p
// t2 = new(itabTableType) + some additional entries
// We lie and tell malloc we want pointer-free memory because
// all the pointed-to values are not in the heap.
- t2 := (*itabTableType)(mallocgc((2+2*t.size)*sys.PtrSize, nil, true))
+ t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
t2.size = t.size * 2
// Copy over entries.
mask := t.size - 1
h := itabHashFunc(m.inter, m._type) & mask
for i := uintptr(1); ; i++ {
- p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize))
+ p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
m2 := *p
if m2 == m {
// A given itab may be used in more than one module
// so no other locks/atomics needed.
t := itabTable
for i := uintptr(0); i < t.size; i++ {
- m := *(**itab)(add(unsafe.Pointer(&t.entries), i*sys.PtrSize))
+ m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
if m != nil {
fn(m)
}
package math
-import "runtime/internal/sys"
+import "internal/goarch"
const MaxUintptr = ^uintptr(0)
// MulUintptr returns a * b and whether the multiplication overflowed.
// On supported platforms this is an intrinsic lowered by the compiler.
func MulUintptr(a, b uintptr) (uintptr, bool) {
- if a|b < 1<<(4*sys.PtrSize) || a == 0 {
+ if a|b < 1<<(4*goarch.PtrSize) || a == 0 {
return a * b, false
}
overflow := b > MaxUintptr/a
"runtime/internal/atomic"
"runtime/internal/math"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// windows/32 | 4KB | 3
// windows/64 | 8KB | 2
// plan9 | 4KB | 3
- _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
+ _NumStackOrders = 4 - goarch.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
// heapAddrBits is the number of bits in a heap address. On
// amd64, addresses are sign-extended beyond heapAddrBits. On
logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm + (2+20)*sys.GoosIos*sys.GoarchArm64
// heapArenaBitmapBytes is the size of each heap arena's bitmap.
- heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
+ heapArenaBitmapBytes = heapArenaBytes / (goarch.PtrSize * 8 / 2)
pagesPerArena = heapArenaBytes / pageSize
lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
// Create initial arena growth hints.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
// On a 64-bit machine, we pick the following hints
// because:
//
l2 := h.arenas[ri.l1()]
if l2 == nil {
// Allocate an L2 arena map.
- l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil))
+ l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), goarch.PtrSize, nil))
if l2 == nil {
throw("out of memory allocating heap arena map")
}
throw("arena already initialized")
}
var r *heapArena
- r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
+ r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
- r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
+ r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
throw("out of memory allocating heap arena metadata")
}
// Add the arena to the arenas list.
if len(h.allArenas) == cap(h.allArenas) {
- size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize
+ size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
if size == 0 {
size = physPageSize
}
- newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys))
+ newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
if newArray == nil {
throw("out of memory allocating allArenas")
}
oldSlice := h.allArenas
- *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)}
+ *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
copy(h.allArenas, oldSlice)
// Do not free the old backing array because
// there may be concurrent readers. Since we
// Align tiny pointer for required (conservative) alignment.
if size&7 == 0 {
off = alignUp(off, 8)
- } else if sys.PtrSize == 4 && size == 12 {
+ } else if goarch.PtrSize == 4 && size == 12 {
// Conservatively align 12-byte objects to 8 bytes on 32-bit
// systems so that objects whose first field is a 64-bit
// value is aligned to 8 bytes and does not cause a fault on
break
}
}
- persistent.off = alignUp(sys.PtrSize, align)
+ persistent.off = alignUp(goarch.PtrSize, align)
}
p := persistent.base.add(persistent.off)
persistent.off += size
"internal/abi"
"runtime/internal/atomic"
"runtime/internal/math"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
sameSizeGrow = 8 // the current map growth is to a new map of the same size
// sentinel bucket ID for iterator checks
- noCheck = 1<<(8*sys.PtrSize) - 1
+ noCheck = 1<<(8*goarch.PtrSize) - 1
)
// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
// bucketShift returns 1<<b, optimized for code generation.
func bucketShift(b uint8) uintptr {
// Masking the shift amount allows overflow checks to be elided.
- return uintptr(1) << (b & (sys.PtrSize*8 - 1))
+ return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
}
// bucketMask returns 1<<b - 1, optimized for code generation.
// tophash calculates the tophash value for hash.
func tophash(hash uintptr) uint8 {
- top := uint8(hash >> (sys.PtrSize*8 - 8))
+ top := uint8(hash >> (goarch.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
}
func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
+ return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize))
}
func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
+ *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize)) = ovf
}
func (b *bmap) keys() unsafe.Pointer {
return
}
- if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
+ if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
}
it.t = t
if t.key.equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
- if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
+ if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(goarch.PtrSize)) ||
t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
throw("key size wrong")
}
- if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(sys.PtrSize)) ||
+ if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(goarch.PtrSize)) ||
t.elem.size <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.size)) {
throw("elem size wrong")
}
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// Only clear key if there are pointers in it.
// This can only happen if pointers are 32 bit
// wide as 64 bit pointers do not fit into a 32 bit key.
- if sys.PtrSize == 4 && t.key.ptrdata != 0 {
+ if goarch.PtrSize == 4 && t.key.ptrdata != 0 {
// The key must be a pointer as we checked pointers are
// 32 bits wide and the key is 32 bits wide also.
*(*unsafe.Pointer)(k) = nil
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
- if sys.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
+ if goarch.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
}
// Only clear key if there are pointers in it.
if t.key.ptrdata != 0 {
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
*(*unsafe.Pointer)(k) = nil
} else {
// There are three ways to squeeze at one ore more 32 bit pointers into 64 bits.
// Copy key.
if t.key.ptrdata != 0 && writeBarrier.enabled {
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
keymaybe = i
}
if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
}
}
}
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
keymaybe = i
}
if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
}
}
}
}
continue
}
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
if k.len != key.len {
continue
}
}
insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize)
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
// store new key at insert position
*((*stringStruct)(insertk)) = *key
h.count++
done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
top := tophash(hash)
search:
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
// Clear key's pointer.
k.str = nil
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
if t.elem.ptrdata != 0 {
memclrHasPointers(e, t.elem.size)
} else {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*2*sys.PtrSize)
+ x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*2*sys.PtrSize)
+ y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*2*sys.PtrSize)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) {
+ e := add(k, bucketCnt*2*goarch.PtrSize)
+ for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.elemsize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*2*sys.PtrSize)
+ dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
- dst.k = add(dst.k, 2*sys.PtrSize)
+ dst.k = add(dst.k, 2*goarch.PtrSize)
dst.e = add(dst.e, uintptr(t.elemsize))
}
}
"math"
"reflect"
"runtime"
- "runtime/internal/sys"
"sort"
+ "internal/goarch"
"strconv"
"strings"
"sync"
// The structure of hmap is defined in runtime/map.go
// and in cmd/compile/internal/gc/reflect.go and must be in sync.
// The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms.
- var hmapSize = uintptr(8 + 5*sys.PtrSize)
+ var hmapSize = uintptr(8 + 5*goarch.PtrSize)
if runtime.RuntimeHmapSize != hmapSize {
t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
}
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// off must be a multiple of sys.PtrSize.
//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
- if writeBarrier.needed && typ.ptrdata > off && size >= sys.PtrSize {
- if off&(sys.PtrSize-1) != 0 {
+ if writeBarrier.needed && typ.ptrdata > off && size >= goarch.PtrSize {
+ if off&(goarch.PtrSize-1) != 0 {
panic("reflect: internal error: misaligned offset")
}
- pwsize := alignDown(size, sys.PtrSize)
+ pwsize := alignDown(size, goarch.PtrSize)
if poff := typ.ptrdata - off; pwsize > poff {
pwsize = poff
}
//
//go:nosplit
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
- if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize {
+ if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= goarch.PtrSize {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
}
memmove(dst, src, size)
import (
"runtime/internal/atomic"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// we expect to crash in the caller.
return
}
- h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
- h.shift = uint32((addr / sys.PtrSize) & 3)
+ h.bitp = &ha.bitmap[(addr/(goarch.PtrSize*4))%heapArenaBitmapBytes]
+ h.shift = uint32((addr / goarch.PtrSize) & 3)
h.arena = uint32(arena)
h.last = &ha.bitmap[len(ha.bitmap)-1]
return
//
//go:nosplit
func bulkBarrierPreWrite(dst, src, size uintptr) {
- if (dst|src|size)&(sys.PtrSize-1) != 0 {
+ if (dst|src|size)&(goarch.PtrSize-1) != 0 {
throw("bulkBarrierPreWrite: unaligned arguments")
}
if !writeBarrier.needed {
buf := &getg().m.p.ptr().wbBuf
h := heapBitsForAddr(dst)
if src == 0 {
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
if !buf.putFast(*dstx, 0) {
h = h.next()
}
} else {
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
srcx := (*uintptr)(unsafe.Pointer(src + i))
// created and zeroed with malloc.
//go:nosplit
func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
- if (dst|src|size)&(sys.PtrSize-1) != 0 {
+ if (dst|src|size)&(goarch.PtrSize-1) != 0 {
throw("bulkBarrierPreWrite: unaligned arguments")
}
if !writeBarrier.needed {
}
buf := &getg().m.p.ptr().wbBuf
h := heapBitsForAddr(dst)
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
srcx := (*uintptr)(unsafe.Pointer(src + i))
if !buf.putFast(0, *srcx) {
//
//go:nosplit
func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
- word := maskOffset / sys.PtrSize
+ word := maskOffset / goarch.PtrSize
bits = addb(bits, word/8)
mask := uint8(1) << (word % 8)
buf := &getg().m.p.ptr().wbBuf
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if mask == 0 {
bits = addb(bits, 1)
if *bits == 0 {
// Skip 8 words.
- i += 7 * sys.PtrSize
+ i += 7 * goarch.PtrSize
continue
}
mask = 1
ptrmask := typ.gcdata
buf := &getg().m.p.ptr().wbBuf
var bits uint32
- for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
- if i&(sys.PtrSize*8-1) == 0 {
+ for i := uintptr(0); i < typ.ptrdata; i += goarch.PtrSize {
+ if i&(goarch.PtrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
// Otherwise, it initializes all words to scalar/dead.
func (h heapBits) initSpan(s *mspan) {
// Clear bits corresponding to objects.
- nw := (s.npages << _PageShift) / sys.PtrSize
+ nw := (s.npages << _PageShift) / goarch.PtrSize
if nw%wordsPerBitmapByte != 0 {
throw("initSpan: unaligned length")
}
if h.shift != 0 {
throw("initSpan: unaligned base")
}
- isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
+ isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
for nw > 0 {
hNext, anw := h.forwardOrBoundary(nw)
nbyte := anw / wordsPerBitmapByte
// The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
// assume that dataSize == size without checking it explicitly.
- if sys.PtrSize == 8 && size == sys.PtrSize {
+ if goarch.PtrSize == 8 && size == goarch.PtrSize {
// It's one word and it has pointers, it must be a pointer.
// Since all allocated one-word objects are pointers
// (non-pointers are aggregated into tinySize allocations),
// objects are at least 4 words long and that their bitmaps start either at the beginning
// of a bitmap byte, or half-way in (h.shift of 0 and 2 respectively).
- if size == 2*sys.PtrSize {
- if typ.size == sys.PtrSize {
+ if size == 2*goarch.PtrSize {
+ if typ.size == goarch.PtrSize {
// We're allocating a block big enough to hold two pointers.
// On 64-bit, that means the actual object must be two pointers,
// or else we'd have used the one-pointer-sized block.
// just the smallest block available. Distinguish by checking dataSize.
// (In general the number of instances of typ being allocated is
// dataSize/typ.size.)
- if sys.PtrSize == 4 && dataSize == sys.PtrSize {
+ if goarch.PtrSize == 4 && dataSize == goarch.PtrSize {
// 1 pointer object. On 32-bit machines clear the bit for the
// unused second word.
*h.bitp &^= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
// Otherwise typ.size must be 2*sys.PtrSize,
// and typ.kind&kindGCProg == 0.
if doubleCheck {
- if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
+ if typ.size != 2*goarch.PtrSize || typ.kind&kindGCProg != 0 {
print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
throw("heapBitsSetType")
}
}
b := uint32(*ptrmask)
hb := b & 3
- hb |= bitScanAll & ((bitScan << (typ.ptrdata / sys.PtrSize)) - 1)
+ hb |= bitScanAll & ((bitScan << (typ.ptrdata / goarch.PtrSize)) - 1)
// Clear the bits for this object so we can set the
// appropriate ones.
*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
*h.bitp |= uint8(hb << h.shift)
return
- } else if size == 3*sys.PtrSize {
+ } else if size == 3*goarch.PtrSize {
b := uint8(*ptrmask)
if doubleCheck {
if b == 0 {
println("runtime: invalid type ", typ.string())
throw("heapBitsSetType: called with non-pointer type")
}
- if sys.PtrSize != 8 {
+ if goarch.PtrSize != 8 {
throw("heapBitsSetType: unexpected 3 pointer wide size class on 32 bit")
}
if typ.kind&kindGCProg != 0 {
throw("heapBitsSetType: unexpected GC prog for 3 pointer wide size class")
}
- if typ.size == 2*sys.PtrSize {
+ if typ.size == 2*goarch.PtrSize {
print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, "\n")
throw("heapBitsSetType: inconsistent object sizes")
}
}
- if typ.size == sys.PtrSize {
+ if typ.size == goarch.PtrSize {
// The type contains a pointer otherwise heapBitsSetType wouldn't have been called.
// Since the type is only 1 pointer wide and contains a pointer, its gcdata must be exactly 1.
if doubleCheck && *typ.gcdata != 1 {
// Filling in bits for an array of typ.
// Set up for repetition of ptrmask during main loop.
// Note that ptrmask describes only a prefix of
- const maxBits = sys.PtrSize*8 - 7
- if typ.ptrdata/sys.PtrSize <= maxBits {
+ const maxBits = goarch.PtrSize*8 - 7
+ if typ.ptrdata/goarch.PtrSize <= maxBits {
// Entire ptrmask fits in uintptr with room for a byte fragment.
// Load into pbits and never read from ptrmask again.
// This is especially important when the ptrmask has
// Accumulate ptrmask into b.
// ptrmask is sized to describe only typ.ptrdata, but we record
// it as describing typ.size bytes, since all the high bits are zero.
- nb = typ.ptrdata / sys.PtrSize
+ nb = typ.ptrdata / goarch.PtrSize
for i := uintptr(0); i < nb; i += 8 {
b |= uintptr(*p) << i
p = add1(p)
}
- nb = typ.size / sys.PtrSize
+ nb = typ.size / goarch.PtrSize
// Replicate ptrmask to fill entire pbits uintptr.
// Doubling and truncating is fewer steps than
pbits = b
endnb = nb
if nb+nb <= maxBits {
- for endnb <= sys.PtrSize*8 {
+ for endnb <= goarch.PtrSize*8 {
pbits |= pbits << endnb
endnb += endnb
}
endp = nil
} else {
// Ptrmask is larger. Read it multiple times.
- n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
+ n := (typ.ptrdata/goarch.PtrSize+7)/8 - 1
endp = addb(ptrmask, n)
- endnb = typ.size/sys.PtrSize - n*8
+ endnb = typ.size/goarch.PtrSize - n*8
}
}
if p != nil {
if typ.size == dataSize {
// Single entry: can stop once we reach the non-pointer data.
- nw = typ.ptrdata / sys.PtrSize
+ nw = typ.ptrdata / goarch.PtrSize
} else {
// Repeated instances of typ in an array.
// Have to process first N-1 entries in full, but can stop
// once we reach the non-pointer data in the final entry.
- nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
+ nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / goarch.PtrSize
}
if nw == 0 {
// No pointers! Caller was supposed to check.
}
// Change nw from counting possibly-pointer words to total words in allocation.
- nw = size / sys.PtrSize
+ nw = size / goarch.PtrSize
// Write whole bitmap bytes.
// The first is hb, the rest are zero.
h := heapBitsForAddr(x)
// cnw is the number of heap words, or bit pairs
// remaining (like nw above).
- cnw := size / sys.PtrSize
+ cnw := size / goarch.PtrSize
src := (*uint8)(unsafe.Pointer(x))
// We know the first and last byte of the bitmap are
// not the same, but it's still possible for small
if doubleCheck {
// x+size may not point to the heap, so back up one
// word and then advance it the way we do above.
- end := heapBitsForAddr(x + size - sys.PtrSize)
+ end := heapBitsForAddr(x + size - goarch.PtrSize)
if outOfPlace {
// In out-of-place copying, we just advance
// using next.
// Double-check that bits to be written were written correctly.
// Does not check that other bits were not written, unfortunately.
h := heapBitsForAddr(x)
- nptr := typ.ptrdata / sys.PtrSize
- ndata := typ.size / sys.PtrSize
+ nptr := typ.ptrdata / goarch.PtrSize
+ ndata := typ.size / goarch.PtrSize
count := dataSize / typ.size
- totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
- for i := uintptr(0); i < size/sys.PtrSize; i++ {
+ totalptr := ((count-1)*typ.size + typ.ptrdata) / goarch.PtrSize
+ for i := uintptr(0); i < size/goarch.PtrSize; i++ {
j := i % ndata
var have, want uint8
have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
- println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
+ println("at word", i, "offset", i*goarch.PtrSize, "have", hex(have), "want", hex(want))
if typ.kind&kindGCProg != 0 {
println("GC program:")
dumpGCProg(addb(typ.gcdata, 4))
// so that the relevant bitmap bytes are not shared with surrounding
// objects.
func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
- if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
+ if goarch.PtrSize == 8 && allocSize%(4*goarch.PtrSize) != 0 {
// Alignment will be wrong.
throw("heapBitsSetTypeGCProg: small allocation")
}
var totalBits uintptr
if elemSize == dataSize {
totalBits = runGCProg(prog, nil, h.bitp, 2)
- if totalBits*sys.PtrSize != progSize {
+ if totalBits*goarch.PtrSize != progSize {
println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
throw("heapBitsSetTypeGCProg: unexpected bit count")
}
// repeats that first element to fill the array.
var trailer [40]byte // 3 varints (max 10 each) + some bytes
i := 0
- if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
+ if n := elemSize/goarch.PtrSize - progSize/goarch.PtrSize; n > 0 {
// literal(0)
trailer[i] = 0x01
i++
// repeat(elemSize/ptrSize, count-1)
trailer[i] = 0x80
i++
- n := elemSize / sys.PtrSize
+ n := elemSize / goarch.PtrSize
for ; n >= 0x80; n >>= 7 {
trailer[i] = byte(n | 0x80)
i++
// last element. This will cause the code below to
// memclr the dead section of the final array element,
// so that scanobject can stop early in the final element.
- totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
+ totalBits = (elemSize*(count-1) + progSize) / goarch.PtrSize
}
endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
- endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
+ endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/goarch.PtrSize/wordsPerBitmapByte))
memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
}
// size the size of the region described by prog, in bytes.
// The resulting bitvector will have no more than size/sys.PtrSize bits.
func progToPointerMask(prog *byte, size uintptr) bitvector {
- n := (size/sys.PtrSize + 7) / 8
+ n := (size/goarch.PtrSize + 7) / 8
x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
x[len(x)-1] = 0xa1 // overflow check sentinel
n = runGCProg(prog, nil, &x[0], 1)
// the pattern to a bit buffer holding at most 7 bits (a partial byte)
// it will not overflow.
src := dst
- const maxBits = sys.PtrSize*8 - 7
+ const maxBits = goarch.PtrSize*8 - 7
if n <= maxBits {
// Start with bits in output buffer.
pattern := bits
nb := npattern
if nb+nb <= maxBits {
// Double pattern until the whole uintptr is filled.
- for nb <= sys.PtrSize*8 {
+ for nb <= goarch.PtrSize*8 {
b |= b << nb
nb += nb
}
// The result must be deallocated with dematerializeGCProg.
func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
// Each word of ptrdata needs one bit in the bitmap.
- bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize)
+ bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
// Compute the number of pages needed for bitmapBytes.
pages := divRoundUp(bitmapBytes, pageSize)
s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
func reflect_gcbits(x interface{}) []byte {
ret := getgcmask(x)
typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
- nptr := typ.ptrdata / sys.PtrSize
+ nptr := typ.ptrdata / goarch.PtrSize
for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
ret = ret[:len(ret)-1]
}
if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
bitmap := datap.gcdatamask.bytedata
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - datap.data) / sys.PtrSize
- mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - datap.data) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
bitmap := datap.gcbssmask.bytedata
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - datap.bss) / sys.PtrSize
- mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
hbits := heapBitsForAddr(base)
n := s.elemsize
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
if hbits.isPointer() {
- mask[i/sys.PtrSize] = 1
+ mask[i/goarch.PtrSize] = 1
}
if !hbits.morePointers() {
- mask = mask[:i/sys.PtrSize]
+ mask = mask[:i/goarch.PtrSize]
break
}
hbits = hbits.next()
if locals.n == 0 {
return
}
- size := uintptr(locals.n) * sys.PtrSize
+ size := uintptr(locals.n) * goarch.PtrSize
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
- mask[i/sys.PtrSize] = locals.ptrbit(off)
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - frame.varp + size) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = locals.ptrbit(off)
}
}
return
import (
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// allocation.
//
//go:notinheap
-type checkmarksMap [heapArenaBytes / sys.PtrSize / 8]uint8
+type checkmarksMap [heapArenaBytes / goarch.PtrSize / 8]uint8
// If useCheckmark is true, marking of an object uses the checkmark
// bits instead of the standard mark bits.
import (
"internal/abi"
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
next *finblock
cnt uint32
_ int32
- fin [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
+ fin [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
}
var finlock mutex // protects the following variables
var fing *g // goroutine that runs finalizers
var finq *finblock // list of finalizers that are to be executed
var finc *finblock // cache of free blocks
-var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
+var finptrmask [_FinBlockSize / goarch.PtrSize / 8]byte
var fingwait bool
var fingwake bool
var allfin *finblock // list of all blocks
if finptrmask[0] == 0 {
// Build pointer mask for Finalizer array in block.
// Check assumptions made in finalizer1 array above.
- if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
+ if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
unsafe.Offsetof(finalizer{}.fn) != 0 ||
- unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
- unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
- unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
- unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
+ unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) {
throw("finalizer out of sync")
}
for i := range finptrmask {
for _, t := range ft.out() {
nret = alignUp(nret, uintptr(t.align)) + uintptr(t.size)
}
- nret = alignUp(nret, sys.PtrSize)
+ nret = alignUp(nret, goarch.PtrSize)
// make sure we have a finalizer goroutine
createfing()
import (
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
//
//go:nowritebarrier
func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
- if rootBlockBytes%(8*sys.PtrSize) != 0 {
+ if rootBlockBytes%(8*goarch.PtrSize) != 0 {
// This is necessary to pick byte offsets in ptrmask0.
throw("rootBlockBytes must be a multiple of 8*ptrSize")
}
return
}
b := b0 + off
- ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
+ ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
n := uintptr(rootBlockBytes)
if off+n > n0 {
n = n0 - off
scanobject(p, gcw)
// The special itself is a root.
- scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
+ scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
}
unlock(&s.speciallock)
}
// register that gets moved back and forth between the
// register and sched.ctxt without a write barrier.
if gp.sched.ctxt != nil {
- scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
// Scan the stack. Accumulate a list of stack objects.
if d.fn != nil {
// Scan the func value, which could be a stack allocated closure.
// See issue 30453.
- scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
if d.link != nil {
// The link field of a stack-allocated defer record might point
// to a heap-allocated defer record. Keep that heap record live.
- scanblock(uintptr(unsafe.Pointer(&d.link)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
// Retain defers records themselves.
// Defer records might not be reachable from the G through regular heap
// tracing because the defer linked list might weave between the stack and the heap.
if d.heap {
- scanblock(uintptr(unsafe.Pointer(&d)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
}
if gp._panic != nil {
// Scan local variables if stack frame has been allocated.
if locals.n > 0 {
- size := uintptr(locals.n) * sys.PtrSize
+ size := uintptr(locals.n) * goarch.PtrSize
scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
}
// Scan arguments.
if args.n > 0 {
- scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state)
+ scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
}
// Add all stack objects to the stack object list.
for i := uintptr(0); i < n; {
// Find bits for the next word.
- bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
+ bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
if bits == 0 {
- i += sys.PtrSize * 8
+ i += goarch.PtrSize * 8
continue
}
for j := 0; j < 8 && i < n; j++ {
}
}
bits >>= 1
- i += sys.PtrSize
+ i += goarch.PtrSize
}
}
}
}
var i uintptr
- for i = 0; i < n; i, hbits = i+sys.PtrSize, hbits.next() {
+ for i = 0; i < n; i, hbits = i+goarch.PtrSize, hbits.next() {
// Load bits once. See CL 22712 and issue 16973 for discussion.
bits := hbits.bits()
if bits&bitScan == 0 {
print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
hexdumpWords(b, b+n, func(p uintptr) byte {
if ptrmask != nil {
- word := (p - b) / sys.PtrSize
+ word := (p - b) / goarch.PtrSize
bits := *addb(ptrmask, word/8)
if (bits>>(word%8))&1 == 0 {
return '$'
printunlock()
}
- for i := uintptr(0); i < n; i += sys.PtrSize {
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
if ptrmask != nil {
- word := i / sys.PtrSize
+ word := i / goarch.PtrSize
bits := *addb(ptrmask, word/8)
if bits == 0 {
// Skip 8 words (the loop increment will do the 8th)
// seen this word of ptrmask, so i
// must be 8-word-aligned, but check
// our reasoning just in case.
- if i%(sys.PtrSize*8) != 0 {
+ if i%(goarch.PtrSize*8) != 0 {
throw("misaligned mask")
}
- i += sys.PtrSize*8 - sys.PtrSize
+ i += goarch.PtrSize*8 - goarch.PtrSize
continue
}
if (bits>>(word%8))&1 == 0 {
//go:nowritebarrierrec
func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
// obj should be start of allocation, and so must be at least pointer-aligned.
- if obj&(sys.PtrSize-1) != 0 {
+ if obj&(goarch.PtrSize-1) != 0 {
throw("greyobject: obj not pointer-aligned")
}
mbits := span.markBitsForIndex(objIndex)
// We're printing something from a stack frame. We
// don't know how big it is, so just show up to an
// including off.
- size = off + sys.PtrSize
+ size = off + goarch.PtrSize
}
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
// For big objects, just print the beginning (because
// that usually hints at the object's type) and the
// fields around off.
- if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
+ if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
skipped = true
continue
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
//go:notinheap
type stackWorkBuf struct {
stackWorkBufHdr
- obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / sys.PtrSize]uintptr
+ obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
}
// Header declaration must come after the buf declaration above, because of issue #14620.
import (
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
type workbuf struct {
workbufhdr
// account for the above fields
- obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / sys.PtrSize]uintptr
+ obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
}
// workbuf factory routines. These funcs are used to manage the
import (
"internal/cpu"
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
assertLockHeld(&h.lock)
if len(h.allspans) >= cap(h.allspans) {
- n := 64 * 1024 / sys.PtrSize
+ n := 64 * 1024 / goarch.PtrSize
if n < cap(h.allspans)*3/2 {
n = cap(h.allspans) * 3 / 2
}
var new []*mspan
sp := (*slice)(unsafe.Pointer(&new))
- sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys)
+ sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys)
if sp.array == nil {
throw("runtime: cannot allocate memory")
}
scanobject(base, gcw)
// Mark the finalizer itself, since the
// special isn't part of the GC'd heap.
- scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
+ scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
releasem(mp)
}
return true
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
ranges.len = 0
ranges.cap = 16
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, sysStat))
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
a.sysStat = sysStat
a.totalBytes = 0
}
ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
ranges.len = len(oldRanges) + 1
ranges.cap = cap(oldRanges) * 2
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, a.sysStat))
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
// Copy in the old array, but make space for the new range.
copy(a.ranges[:i], oldRanges[:i])
ranges := (*notInHeapSlice)(unsafe.Pointer(&b.ranges))
ranges.len = 0
ranges.cap = cap(a.ranges)
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, b.sysStat))
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
}
b.ranges = b.ranges[:len(a.ranges)]
b.totalBytes = a.totalBytes
import (
"internal/cpu"
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
retry:
if top < spineLen {
spine := atomic.Loadp(unsafe.Pointer(&b.spine))
- blockp := add(spine, sys.PtrSize*top)
+ blockp := add(spine, goarch.PtrSize*top)
block = (*spanSetBlock)(atomic.Loadp(blockp))
} else {
// Add a new block to the spine, potentially growing
if newCap == 0 {
newCap = spanSetInitSpineCap
}
- newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
+ newSpine := persistentalloc(newCap*goarch.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
if b.spineCap != 0 {
// Blocks are allocated off-heap, so
// no write barriers.
- memmove(newSpine, b.spine, b.spineCap*sys.PtrSize)
+ memmove(newSpine, b.spine, b.spineCap*goarch.PtrSize)
}
// Spine is allocated off-heap, so no write barrier.
atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine)
block = spanSetBlockPool.alloc()
// Add it to the spine.
- blockp := add(b.spine, sys.PtrSize*top)
+ blockp := add(b.spine, goarch.PtrSize*top)
// Blocks are allocated off-heap, so no write barrier.
atomic.StorepNoWB(blockp, unsafe.Pointer(block))
atomic.Storeuintptr(&b.spineLen, spineLen+1)
// grows monotonically and we've already verified it, we'll definitely
// be reading from a valid block.
spine := atomic.Loadp(unsafe.Pointer(&b.spine))
- blockp := add(spine, sys.PtrSize*uintptr(top))
+ blockp := add(spine, goarch.PtrSize*uintptr(top))
// Given that the spine length is correct, we know we will never
// see a nil block here, since the length is always updated after
// since it may be pushed into again. In order to avoid leaking
// memory since we're going to reset the head and tail, clean
// up such a block now, if it exists.
- blockp := (**spanSetBlock)(add(b.spine, sys.PtrSize*uintptr(top)))
+ blockp := (**spanSetBlock)(add(b.spine, goarch.PtrSize*uintptr(top)))
block := *blockp
if block != nil {
// Sanity check the popped value.
import (
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// Add a uint32 to ensure this struct is a multiple of 8 bytes in size.
// Only necessary on 32-bit platforms.
- _ [(sys.PtrSize / 4) % 2]uint32
+ _ [(goarch.PtrSize / 4) % 2]uint32
}
// merge adds in the deltas from b into a.
import (
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
p := (*[2]uintptr)(unsafe.Pointer(b.next))
p[0] = old
p[1] = new
- b.next += 2 * sys.PtrSize
+ b.next += 2 * goarch.PtrSize
return b.next != b.end
}
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
if usesLR {
c.setlr(pc)
} else {
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = pc
c.setsp(sp)
}
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// skip NULL separator
n++
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
}
maskSize := int(maxcpus+7) / 8
- if maskSize < sys.PtrSize {
- maskSize = sys.PtrSize
+ if maskSize < goarch.PtrSize {
+ maskSize = goarch.PtrSize
}
if maskSize > len(mask) {
maskSize = len(mask)
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
if sysauxv(auxv[:]) != 0 {
return
}
import (
"internal/abi"
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
param := tforkt{
tf_tcb: unsafe.Pointer(&mp.tls[0]),
tf_tid: nil, // minit will record tid
- tf_stack: uintptr(stk) - sys.PtrSize,
+ tf_stack: uintptr(stk) - goarch.PtrSize,
}
var oset sigset
import (
"internal/abi"
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
case "386", "amd64":
// Make it look like the thread called targetPC.
sp := c.sp()
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = newpc
c.set_sp(sp)
c.set_ip(targetPC)
import (
"internal/abi"
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
f = findfunc(abi.FuncPCABIInternal(asyncPreempt2))
total += funcMaxSPDelta(f)
// Add some overhead for return PCs, etc.
- asyncPreemptStack = uintptr(total) + 8*sys.PtrSize
+ asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize
if asyncPreemptStack > _StackLimit {
// We need more than the nosplit limit. This isn't
// unsafe, but it may limit asynchronous preemption.
import (
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
var markbuf [1]byte
markbuf[0] = ' '
minhexdigits = int(unsafe.Sizeof(uintptr(0)) * 2)
- for i := uintptr(0); p+i < end; i += sys.PtrSize {
+ for i := uintptr(0); p+i < end; i += goarch.PtrSize {
if i%16 == 0 {
if i != 0 {
println()
"internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Using decimal instead of binary GB and MB because
// they look nicer in the stack overflow failure message.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
maxstacksize = 1000000000
} else {
maxstacksize = 250000000
// atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
func atomicAllGIndex(ptr **g, i uintptr) *g {
- return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize))
+ return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
}
// forEachG calls fn on every G from allgs.
gp := malg(4096)
gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
gp.sched.sp = gp.stack.hi
- gp.sched.sp -= 4 * sys.PtrSize // extra space in case of reads slightly beyond frame
+ gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
gp.sched.lr = 0
gp.sched.g = guintptr(unsafe.Pointer(gp))
gp.syscallpc = gp.sched.pc
throw("newproc1: new g is not Gdead")
}
- totalSize := uintptr(4*sys.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
+ totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
totalSize = alignUp(totalSize, sys.StackAlign)
sp := newg.stack.hi - totalSize
spArg := sp
t.state = 1 // initialization in progress
for i := uintptr(0); i < t.ndeps; i++ {
- p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize)
+ p := add(unsafe.Pointer(t), (3+i)*goarch.PtrSize)
t2 := *(**initTask)(p)
doInit(t2)
}
before = inittrace
}
- firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*sys.PtrSize)
+ firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*goarch.PtrSize)
for i := uintptr(0); i < t.nfns; i++ {
- p := add(firstFunc, i*sys.PtrSize)
+ p := add(firstFunc, i*goarch.PtrSize)
f := *(*func())(unsafe.Pointer(&p))
f()
}
import (
"internal/bytealg"
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// nosplit for use in linux startup sysargs
//go:nosplit
func argv_index(argv **byte, i int32) *byte {
- return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
+ return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
}
func args(c int32, v **byte) {
if unsafe.Sizeof(j) != 8 {
throw("bad j")
}
- if unsafe.Sizeof(k) != sys.PtrSize {
+ if unsafe.Sizeof(k) != goarch.PtrSize {
throw("bad k")
}
- if unsafe.Sizeof(l) != sys.PtrSize {
+ if unsafe.Sizeof(l) != goarch.PtrSize {
throw("bad l")
}
if unsafe.Sizeof(x1) != 1 {
import (
"runtime/internal/atomic"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms,
// like Windows.
tlsSlots = 6
- tlsSize = tlsSlots * sys.PtrSize
+ tlsSize = tlsSlots * goarch.PtrSize
)
type m struct {
w = 16
}
h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
- for i := 0; i < sys.PtrSize && n < len(r); i++ {
+ for i := 0; i < goarch.PtrSize && n < len(r); i++ {
r[n] = byte(h)
n++
h >>= 8
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Make it look like we called target at resumePC.
sp := uintptr(c.esp())
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = resumePC
c.set_esp(uint32(sp))
c.set_eip(uint32(targetPC))
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Make it look like we called target at resumePC.
sp := uintptr(c.rsp())
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = resumePC
c.set_rsp(uint64(sp))
c.set_rip(uint64(targetPC))
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_esp(x uint32) { c.regs().esp = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint32) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_rsp(x uint64) { c.regs().rsp = x }
func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint32) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_r28(x uint64) { c.regs().regs[28] = x }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
import (
"internal/abi"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_pc(x uint64) { c.regs().psw_addr = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
func dumpregs(c *sigctxt) {
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// functions are correctly handled. This smashes
// the stack frame but we're not going back there
// anyway.
- sp := c.sp() - sys.PtrSize
+ sp := c.sp() - goarch.PtrSize
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// functions are correctly handled. This smashes
// the stack frame but we're not going back there
// anyway.
- sp := c.sp() - sys.PtrSize
+ sp := c.sp() - goarch.PtrSize
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra()
// push the call. The function being pushed is responsible
// for restoring the LR and setting the SP back.
// This extra slot is known to gentraceback.
- sp := c.sp() - sys.PtrSize
+ sp := c.sp() - goarch.PtrSize
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra()
// Set up PC and LR to pretend the function being signaled
"internal/abi"
"runtime/internal/math"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
capmem = roundupsize(uintptr(newcap))
overflow = uintptr(newcap) > maxAlloc
newcap = int(capmem)
- case et.size == sys.PtrSize:
- lenmem = uintptr(old.len) * sys.PtrSize
- newlenmem = uintptr(cap) * sys.PtrSize
- capmem = roundupsize(uintptr(newcap) * sys.PtrSize)
- overflow = uintptr(newcap) > maxAlloc/sys.PtrSize
- newcap = int(capmem / sys.PtrSize)
+ case et.size == goarch.PtrSize:
+ lenmem = uintptr(old.len) * goarch.PtrSize
+ newlenmem = uintptr(cap) * goarch.PtrSize
+ capmem = roundupsize(uintptr(newcap) * goarch.PtrSize)
+ overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
+ newcap = int(capmem / goarch.PtrSize)
case isPowerOfTwo(et.size):
var shift uintptr
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
// Mask shift for better code generation.
shift = uintptr(sys.Ctz64(uint64(et.size))) & 63
} else {
"internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// to each stack below the usual guard area for OS-specific
// purposes like signal handling. Used on Windows, Plan 9,
// and iOS because they do not use a separate stack.
- _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosIos*sys.GoarchArm64*1024
+ _StackSystem = sys.GoosWindows*512*goarch.PtrSize + sys.GoosPlan9*512 + sys.GoosIos*sys.GoarchArm64*1024
// The minimum size of stack used by Go code
_StackMin = 2048
)
const (
- uintptrMask = 1<<(8*sys.PtrSize) - 1
+ uintptrMask = 1<<(8*goarch.PtrSize) - 1
// The values below can be stored to g.stackguard0 to force
// the next stack check to fail.
for i := uintptr(0); i < num; i += 8 {
if stackDebug >= 4 {
for j := uintptr(0); j < 8; j++ {
- print(" ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
+ print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
}
}
b := *(addb(bv.bytedata, i/8))
for b != 0 {
j := uintptr(sys.Ctz8(b))
b &= b - 1
- pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
+ pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
retry:
p := *pp
if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
// Adjust local variables if stack frame has been allocated.
if locals.n > 0 {
- size := uintptr(locals.n) * sys.PtrSize
+ size := uintptr(locals.n) * goarch.PtrSize
adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
}
// Adjust saved base pointer if there is one.
// TODO what about arm64 frame pointer adjustment?
- if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.PtrSize {
+ if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize {
if stackDebug >= 3 {
print(" saved bp\n")
}
s = materializeGCProg(ptrdata, gcdata)
gcdata = (*byte)(unsafe.Pointer(s.startAddr))
}
- for i := uintptr(0); i < ptrdata; i += sys.PtrSize {
- if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
+ for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
+ if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
adjustpointer(adjinfo, unsafe.Pointer(p+i))
}
}
sp := gp.sched.sp
if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
// The call to morestack cost a word.
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
}
if stackDebug >= 1 || sp < gp.stack.lo {
print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
// In this case, arglen specifies how much of the args section is actually live.
// (It could be either all the args + results, or just the args.)
args = *frame.argmap
- n := int32(frame.arglen / sys.PtrSize)
+ n := int32(frame.arglen / goarch.PtrSize)
if n < args.n {
args.n = n // Don't use more of the arguments than arglen.
}
p := funcdata(f, _FUNCDATA_StackObjects)
if p != nil {
n := *(*uintptr)(p)
- p = add(p, sys.PtrSize)
+ p = add(p, goarch.PtrSize)
*(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
// Note: the noescape above is needed to keep
// getStackMap from "leaking param content:
import (
"runtime/internal/atomic"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func moduledataverify1(datap *moduledata) {
// Check that the pclntab's format is valid.
hdr := datap.pcHeader
- if hdr.magic != 0xfffffffa || hdr.pad1 != 0 || hdr.pad2 != 0 || hdr.minLC != sys.PCQuantum || hdr.ptrSize != sys.PtrSize {
+ if hdr.magic != 0xfffffffa || hdr.pad1 != 0 || hdr.pad2 != 0 || hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize {
print("runtime: function symbol table header:", hex(hdr.magic), hex(hdr.pad1), hex(hdr.pad2), hex(hdr.minLC), hex(hdr.ptrSize))
if datap.pluginpath != "" {
print(", plugin:", datap.pluginpath)
// For now, align to sys.PtrSize and reduce mod the number of entries.
// In practice, this appears to be fairly randomly and evenly distributed.
func pcvalueCacheKey(targetpc uintptr) uintptr {
- return (targetpc / sys.PtrSize) % uintptr(len(pcvalueCache{}.entries))
+ return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries))
}
// Returns the PCData value, and the PC where this value starts.
func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 {
x, _ := pcvalue(f, f.pcsp, targetpc, cache, true)
- if x&(sys.PtrSize-1) != 0 {
+ if x&(goarch.PtrSize-1) != 0 {
print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
}
return x
return nil
}
p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4)
- if sys.PtrSize == 8 && uintptr(p)&4 != 0 {
+ if goarch.PtrSize == 8 && uintptr(p)&4 != 0 {
if uintptr(unsafe.Pointer(f._func))&4 != 0 {
println("runtime: misaligned func", f._func)
}
p = add(p, 4)
}
- return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize))
+ return *(*unsafe.Pointer)(add(p, uintptr(i)*goarch.PtrSize))
}
// step advances to the next pc, value pair in the encoded table.
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
for i, x := range tlsbase {
if x == magic {
- *tlsg = uintptr(i * sys.PtrSize)
+ *tlsg = uintptr(i * goarch.PtrSize)
g0_pthread_setspecific(k, 0)
return
}
import (
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// and then stopped before the first instruction in fn.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
sp := buf.sp
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = buf.pc
buf.sp = sp
buf.pc = uintptr(fn)
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// and then stopped before the first instruction in fn.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
sp := buf.sp
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = buf.pc
buf.sp = sp
buf.pc = uintptr(fn)
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
}
func (p *abiDesc) assignArg(t *_type) {
- if t.size > sys.PtrSize {
+ if t.size > goarch.PtrSize {
// We don't support this right now. In
// stdcall/cdecl, 64-bit ints and doubles are
// passed as two words (little endian); and
// cdecl, stdcall, fastcall, and arm pad arguments to word size.
// TODO(rsc): On arm and arm64 do we need to skip the caller's saved LR?
- p.srcStackSize += sys.PtrSize
+ p.srcStackSize += goarch.PtrSize
}
// tryRegAssignArg tries to register-assign a value of type t.
return p.assignReg(t.size, offset)
case kindInt64, kindUint64:
// Only register-assign if the registers are big enough.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
return p.assignReg(t.size, offset)
}
case kindArray:
return abi.FuncPCABI0(callbackasm) + uintptr(i*entrySize)
}
-const callbackMaxFrame = 64 * sys.PtrSize
+const callbackMaxFrame = 64 * goarch.PtrSize
// compileCallback converts a Go function fn into a C function pointer
// that can be passed to Windows APIs.
}
// The Go ABI aligns the result to the word size. src is
// already aligned.
- abiMap.dstStackSize = alignUp(abiMap.dstStackSize, sys.PtrSize)
+ abiMap.dstStackSize = alignUp(abiMap.dstStackSize, goarch.PtrSize)
abiMap.retOffset = abiMap.dstStackSize
if len(ft.out()) != 1 {
panic("compileCallback: expected function with one uintptr-sized result")
}
- if ft.out()[0].size != sys.PtrSize {
+ if ft.out()[0].size != goarch.PtrSize {
panic("compileCallback: expected function with one uintptr-sized result")
}
if k := ft.out()[0].kind & kindMask; k == kindFloat32 || k == kindFloat64 {
// Make room for the uintptr-sized result.
// If there are argument registers, the return value will
// be passed in the first register.
- abiMap.dstStackSize += sys.PtrSize
+ abiMap.dstStackSize += goarch.PtrSize
}
// TODO(mknyszek): Remove dstSpill from this calculation when we no longer have
// caller reserved spill space.
- frameSize := alignUp(abiMap.dstStackSize, sys.PtrSize)
+ frameSize := alignUp(abiMap.dstStackSize, goarch.PtrSize)
frameSize += abiMap.dstSpill
if frameSize > callbackMaxFrame {
panic("compileCallback: function argument frame too large")
// TODO(mknyszek): Remove this when we no longer have
// caller reserved spill space.
- frameSize := alignUp(c.abiMap.dstStackSize, sys.PtrSize)
+ frameSize := alignUp(c.abiMap.dstStackSize, goarch.PtrSize)
frameSize += c.abiMap.dstSpill
// Even though this is copying back results, we can pass a nil
import (
"runtime/internal/atomic"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// newStack allocates a new stack of size n.
func (tab *traceStackTable) newStack(n int) *traceStack {
- return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
+ return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
}
// allFrames returns all of the Frames corresponding to pcs.
//go:notinheap
type traceAllocBlock struct {
next traceAllocBlockPtr
- data [64<<10 - sys.PtrSize]byte
+ data [64<<10 - goarch.PtrSize]byte
}
// TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
// alloc allocates n-byte block.
func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
- n = alignUp(n, sys.PtrSize)
+ n = alignUp(n, goarch.PtrSize)
if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
if n > uintptr(len(a.head.ptr().data)) {
throw("trace: alloc too large")
"internal/bytealg"
"runtime/internal/atomic"
"runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
frame.lr = 0
} else {
frame.pc = uintptr(*(*uintptr)(unsafe.Pointer(frame.sp)))
- frame.sp += sys.PtrSize
+ frame.sp += goarch.PtrSize
}
}
frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc, &cache))
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
- frame.fp += sys.PtrSize
+ frame.fp += goarch.PtrSize
}
}
var flr funcInfo
}
} else {
if frame.lr == 0 {
- lrPtr = frame.fp - sys.PtrSize
+ lrPtr = frame.fp - goarch.PtrSize
frame.lr = uintptr(*(*uintptr)(unsafe.Pointer(lrPtr)))
}
}
frame.varp = frame.fp
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
- frame.varp -= sys.PtrSize
+ frame.varp -= goarch.PtrSize
}
// For architectures with frame pointers, if there's
// And it happens to end up mimicking the x86 layout.
// Other architectures may make different decisions.
if frame.varp > frame.sp && framepointer_enabled {
- frame.varp -= sys.PtrSize
+ frame.varp -= goarch.PtrSize
}
// Derive size of arguments.
// Figure out whether the return values are valid.
// Reflect will update this value after it copies
// in the return values.
- retValid = *(*bool)(unsafe.Pointer(arg0 + 4*sys.PtrSize))
+ retValid = *(*bool)(unsafe.Pointer(arg0 + 4*goarch.PtrSize))
}
if mv.fn != f.entry {
print("runtime: confused by ", funcname(f), "\n")
throw("reflect mismatch")
}
bv := mv.stack
- arglen = uintptr(bv.n * sys.PtrSize)
+ arglen = uintptr(bv.n * goarch.PtrSize)
if !retValid {
- arglen = uintptr(mv.argLen) &^ (sys.PtrSize - 1)
+ arglen = uintptr(mv.argLen) &^ (goarch.PtrSize - 1)
}
argmap = bv
}
// for debugging purposes. If the address bad is included in the
// hexdumped range, it will mark it as well.
func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) {
- const expand = 32 * sys.PtrSize
- const maxExpand = 256 * sys.PtrSize
+ const expand = 32 * goarch.PtrSize
+ const maxExpand = 256 * goarch.PtrSize
// Start around frame.sp.
lo, hi := frame.sp, frame.sp
// Expand to include frame.fp.