// at process startup. Changes to operating system CPU allocation after
// process startup are not reflected.
func NumCPU() int {
- return int(ncpu)
+ return int(numCPUStartup)
}
// NumCgoCall returns the number of cgo calls made by the current process.
dumpint(uint64(arenaEnd))
dumpstr(goarch.GOARCH)
dumpstr(buildVersion)
- dumpint(uint64(ncpu))
+ dumpint(uint64(numCPUStartup))
}
func itab_callback(tab *itab) {
// On uniprocessors, no point spinning.
// On multiprocessors, spin for mutexActiveSpinCount attempts.
spin := 0
- if ncpu > 1 {
+ if numCPUStartup > 1 {
spin = mutexActiveSpinCount
}
systemstack(gcResetMarkState)
work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
- if work.stwprocs > ncpu {
- // This is used to compute CPU time of the STW phases,
- // so it can't be more than ncpu, even if GOMAXPROCS is.
- work.stwprocs = ncpu
+ if work.stwprocs > numCPUStartup {
+ // This is used to compute CPU time of the STW phases, so it
+ // can't be more than the CPU count, even if GOMAXPROCS is.
+ work.stwprocs = numCPUStartup
}
work.heap0 = gcController.heapLive.Load()
work.pauseNS = 0
// before calling minit on m0.
asmcgocall(unsafe.Pointer(abi.FuncPCABI0(miniterrno)), unsafe.Pointer(&libc____errno))
- ncpu = getncpu()
+ numCPUStartup = getCPUCount()
if physPageSize == 0 {
physPageSize = getPageSize()
}
// before calling minit on m0.
miniterrno()
- ncpu = int32(sysconf(__SC_NPROCESSORS_ONLN))
+ numCPUStartup = getCPUCount()
physPageSize = sysconf(__SC_PAGE_SIZE)
}
+func getCPUCount() int32 {
+ return int32(sysconf(__SC_NPROCESSORS_ONLN))
+}
+
// newosproc0 is a version of newosproc that can be called before the runtime
// is initialized.
//
// pthread_create delayed until end of goenvs so that we
// can look at the environment first.
- ncpu = getncpu()
+ numCPUStartup = getCPUCount()
physPageSize = getPageSize()
osinit_hack()
_HW_PAGESIZE = 7
)
-func getncpu() int32 {
+func getCPUCount() int32 {
// Use sysctl to fetch hw.ncpu.
mib := [2]uint32{_CTL_HW, _HW_NCPU}
out := uint32(0)
var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
-func getncpu() int32 {
+func getCPUCount() int32 {
mib := [2]uint32{_CTL_HW, _HW_NCPU}
out := uint32(0)
nout := unsafe.Sizeof(out)
}
func osinit() {
- ncpu = getncpu()
+ numCPUStartup = getCPUCount()
if physPageSize == 0 {
physPageSize = getPageSize()
}
func cpuset_getaffinity(level int, which int, id int64, size int, mask *byte) int32
//go:systemstack
-func getncpu() int32 {
+func getCPUCount() int32 {
// Use a large buffer for the CPU mask. We're on the system
// stack, so this is fine, and we can't allocate memory for a
// dynamically-sized buffer at this point.
}
func osinit() {
- ncpu = getncpu()
+ numCPUStartup = getCPUCount()
if physPageSize == 0 {
physPageSize = getPageSize()
}
exit(1)
}
- // osinit not called yet, so ncpu not set: must use getncpu directly.
- if getncpu() > 1 && goarm < 7 {
+ // osinit not called yet, so numCPUStartup not set: must use
+ // getCPUCount directly.
+ if getCPUCount() > 1 && goarm < 7 {
print("runtime: this system has multiple CPUs and must use\n")
print("atomic synchronization instructions. Recompile using GOARM=7.\n")
exit(1)
return capval
}
-func getncpu() int32 {
+func getCPUCount() int32 {
n := int32(sysconf(__SC_NPROCESSORS_ONLN))
if n < 1 {
return 1
*(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006
}
-func getproccount() int32 {
+func getCPUCount() int32 {
// This buffer is huge (8 kB) but we are on the system stack
// and there should be plenty of space (64 kB).
// Also this is a leaf, so we're not holding up the memory for long.
}
func osinit() {
- ncpu = getproccount()
+ numCPUStartup = getCPUCount()
physHugePageSize = getHugePageSize()
osArchInit()
vgetrandomInit()
return out, true
}
-func getncpu() int32 {
+func getCPUCount() int32 {
if n, ok := sysctlInt([]uint32{_CTL_HW, _HW_NCPUONLINE}); ok {
return int32(n)
}
}
func osinit() {
- ncpu = getncpu()
+ numCPUStartup = getCPUCount()
if physPageSize == 0 {
physPageSize = getPageSize()
}
func checkgoarm() {
// TODO(minux): FP checks like in os_linux_arm.go.
- // osinit not called yet, so ncpu not set: must use getncpu directly.
- if getncpu() > 1 && goarm < 7 {
+ // osinit not called yet, so numCPUStartup not set: must use
+ // getCPUCount directly.
+ if getCPUCount() > 1 && goarm < 7 {
print("runtime: this system has multiple CPUs and must use\n")
print("atomic synchronization instructions. Recompile using GOARM=7.\n")
exit(1)
package runtime
-func getncpu() int32 {
+func getCPUCount() int32 {
n := int32(sysconf(__SC_NPROCESSORS_ONLN))
if n < 1 {
return 1
return sysctlUint64(mib)
}
-func getncpu() int32 {
+func getCPUCount() int32 {
// Try hw.ncpuonline first because hw.ncpu would report a number twice as
// high as the actual CPUs running on OpenBSD 6.4 with hyperthreading
// disabled (hw.smt=0). See https://golang.org/issue/30127
}
func osinit() {
- ncpu = getncpu()
+ numCPUStartup = getCPUCount()
physPageSize = getPageSize()
}
func checkgoarm() {
// TODO(minux): FP checks like in os_linux_arm.go.
- // osinit not called yet, so ncpu not set: must use getncpu directly.
- if getncpu() > 1 && goarm < 7 {
+ // osinit not called yet, so numCPUStartup not set: must use
+ // getCPUCount directly.
+ if getCPUCount() > 1 && goarm < 7 {
print("runtime: this system has multiple CPUs and must use\n")
print("atomic synchronization instructions. Recompile using GOARM=7.\n")
exit(1)
var sysstat = []byte("/dev/sysstat\x00")
-func getproccount() int32 {
+func getCPUCount() int32 {
var buf [2048]byte
fd := open(&sysstat[0], _OREAD|_OCEXEC, 0)
if fd < 0 {
func osinit() {
physPageSize = getPageSize()
initBloc()
- ncpu = getproccount()
+ numCPUStartup = getCPUCount()
getg().m.procid = getpid()
fd := open(&bintimeDev[0], _OREAD|_OCEXEC, 0)
physPageSize = 64 * 1024
initBloc()
blocMax = uintptr(currentMemory()) * physPageSize // record the initial linear memory size
- ncpu = 1
+ numCPUStartup = getCPUCount()
getg().m.procid = 2
}
+func getCPUCount() int32 {
+ return 1
+}
+
const _SIGSEGV = 0xb
func sigpanic() {
uintptr(unsafe.Pointer(¶ms)), uintptr(unsafe.Pointer(&handle)))
}
-func getproccount() int32 {
+func getCPUCount() int32 {
var mask, sysmask uintptr
ret := stdcall3(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
if ret != 0 {
initSysDirectory()
initLongPathSupport()
- ncpu = getproccount()
+ numCPUStartup = getCPUCount()
physPageSize = getPageSize()
lock(&sched.lock)
sched.lastpoll.Store(nanotime())
- procs := ncpu
+ procs := numCPUStartup
if n, ok := strconv.Atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
procs = n
}
// GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
// As opposed to runtime mutex we don't do passive spinning here,
// because there can be work on global runq or on other Ps.
- if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
+ if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
return false
}
if p := getg().m.p.ptr(); !runqempty(p) {
}
var (
- allm *m
- gomaxprocs int32
- ncpu int32
- forcegc forcegcstate
- sched schedt
- newprocs int32
+ allm *m
+ gomaxprocs int32
+ numCPUStartup int32
+ forcegc forcegcstate
+ sched schedt
+ newprocs int32
)
var (
func vgetrandomGetState() uintptr {
lock(&vgetrandomAlloc.statesLock)
if len(vgetrandomAlloc.states) == 0 {
- num := uintptr(ncpu) // Just a reasonable size hint to start.
+ num := uintptr(numCPUStartup) // Just a reasonable size hint to start.
stateSizeCacheAligned := (vgetrandomAlloc.stateSize + cpu.CacheLineSize - 1) &^ (cpu.CacheLineSize - 1)
allocSize := (num*stateSizeCacheAligned + physPageSize - 1) &^ (physPageSize - 1)
num = (physPageSize / stateSizeCacheAligned) * (allocSize / physPageSize)