sys here is runtime/internal/sys.
Replace uses of sys.CacheLineSize for padding by
cpu.CacheLinePad or cpu.CacheLinePadSize.
Replace other uses of sys.CacheLineSize by cpu.CacheLineSize.
Remove now unused sys.CacheLineSize.
Updates #25203
Change-Id: I1daf410fe8f6c0493471c2ceccb9ca0a5a75ed8f
Reviewed-on: https://go-review.googlesource.com/126601
Run-TryBot: Martin Möhrmann <moehrmann@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
"io": {"errors", "sync", "sync/atomic"},
"runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys", "internal/cpu", "internal/bytealg"},
"runtime/internal/sys": {},
- "runtime/internal/atomic": {"unsafe", "runtime/internal/sys"},
+ "runtime/internal/atomic": {"unsafe", "internal/cpu"},
"internal/race": {"runtime", "unsafe"},
"sync": {"internal/race", "runtime", "sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"},
// CacheLineSize is the CPU's assumed cache line size.
// There is currently no runtime detection of the real cache line size
// so we use the constant per GOARCH CacheLinePadSize as an approximation.
-var CacheLineSize = CacheLinePadSize
+var CacheLineSize uintptr = CacheLinePadSize
var X86 x86
package atomic
import (
- "runtime/internal/sys"
+ "internal/cpu"
"unsafe"
)
var locktab [57]struct {
l spinlock
- pad [sys.CacheLineSize - unsafe.Sizeof(spinlock{})]byte
+ pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte
}
func addrLock(addr *uint64) *spinlock {
package atomic
import (
- "runtime/internal/sys"
+ "internal/cpu"
"unsafe"
)
// TODO implement lock striping
var lock struct {
state uint32
- pad [sys.CacheLineSize - 4]byte
+ pad [cpu.CacheLinePadSize - 4]byte
}
//go:noescape
const (
ArchFamily = I386
BigEndian = false
- CacheLineSize = 64
DefaultPhysPageSize = GoosNacl*65536 + (1-GoosNacl)*4096 // 4k normally; 64k on NaCl
PCQuantum = 1
Int64Align = 4
const (
ArchFamily = AMD64
BigEndian = false
- CacheLineSize = 64
DefaultPhysPageSize = 4096
PCQuantum = 1
Int64Align = 8
const (
ArchFamily = AMD64
BigEndian = false
- CacheLineSize = 64
DefaultPhysPageSize = 65536*GoosNacl + 4096*(1-GoosNacl)
PCQuantum = 1
Int64Align = 8
const (
ArchFamily = ARM
BigEndian = false
- CacheLineSize = 32
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 4
const (
ArchFamily = ARM64
BigEndian = false
- CacheLineSize = 64
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 8
const (
ArchFamily = MIPS
BigEndian = true
- CacheLineSize = 32
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 4
const (
ArchFamily = MIPS64
BigEndian = true
- CacheLineSize = 32
DefaultPhysPageSize = 16384
PCQuantum = 4
Int64Align = 8
const (
ArchFamily = MIPS64
BigEndian = false
- CacheLineSize = 32
DefaultPhysPageSize = 16384
PCQuantum = 4
Int64Align = 8
const (
ArchFamily = MIPS
BigEndian = false
- CacheLineSize = 32
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 4
const (
ArchFamily = PPC64
BigEndian = true
- CacheLineSize = 128
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 8
const (
ArchFamily = PPC64
BigEndian = false
- CacheLineSize = 128
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 8
const (
ArchFamily = S390X
BigEndian = true
- CacheLineSize = 256
DefaultPhysPageSize = 4096
PCQuantum = 2
Int64Align = 8
const (
ArchFamily = WASM
BigEndian = false
- CacheLineSize = 64
DefaultPhysPageSize = 65536
PCQuantum = 1
Int64Align = 8
package runtime
import (
+ "internal/cpu"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
// If this is zero, no fractional workers are needed.
fractionalUtilizationGoal float64
- _ [sys.CacheLineSize]byte
+ _ cpu.CacheLinePad
}
// startCycle resets the GC controller's state and computes estimates
const gcOverAssistWork = 64 << 10
var work struct {
- full lfstack // lock-free list of full blocks workbuf
- empty lfstack // lock-free list of empty blocks workbuf
- pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
+ full lfstack // lock-free list of full blocks workbuf
+ empty lfstack // lock-free list of empty blocks workbuf
+ pad0 cpu.CacheLinePad // prevents false-sharing between full/empty and nproc/nwait
wbufSpans struct {
lock mutex
package runtime
import (
+ "internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
if newCap == 0 {
newCap = gcSweepBufInitSpineCap
}
- newSpine := persistentalloc(newCap*sys.PtrSize, sys.CacheLineSize, &memstats.gc_sys)
+ newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gc_sys)
if b.spineCap != 0 {
// Blocks are allocated off-heap, so
// no write barriers.
}
// Allocate a new block and add it to the spine.
- block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), sys.CacheLineSize, &memstats.gc_sys))
+ block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), cpu.CacheLineSize, &memstats.gc_sys))
blockp := add(b.spine, sys.PtrSize*top)
// Blocks are allocated off-heap, so no write barrier.
atomic.StorepNoWB(blockp, unsafe.Pointer(block))
package runtime
import (
+ "internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
// central free lists for small size classes.
// the padding makes sure that the MCentrals are
- // spaced CacheLineSize bytes apart, so that each MCentral.lock
+ // spaced CacheLinePadSize bytes apart, so that each MCentral.lock
// gets its own cache line.
// central is indexed by spanClass.
central [numSpanClasses]struct {
mcentral mcentral
- pad [sys.CacheLineSize - unsafe.Sizeof(mcentral{})%sys.CacheLineSize]byte
+ pad [cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize]byte
}
spanalloc fixalloc // allocator for span*
package runtime
import (
+ "internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
- pad [sys.CacheLineSize]byte
+ pad cpu.CacheLinePad
}
type schedt struct {
package runtime
import (
+ "internal/cpu"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
var semtable [semTabSize]struct {
root semaRoot
- pad [sys.CacheLineSize - unsafe.Sizeof(semaRoot{})]byte
+ pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte
}
//go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
package runtime
import (
- "runtime/internal/sys"
+ "internal/cpu"
"unsafe"
)
// The padding should eliminate false sharing
// between timersBucket values.
- pad [sys.CacheLineSize - unsafe.Sizeof(timersBucket{})%sys.CacheLineSize]byte
+ pad [cpu.CacheLinePadSize - unsafe.Sizeof(timersBucket{})%cpu.CacheLinePadSize]byte
}
func (t *timer) assignBucket() *timersBucket {