From: Carlo Alberto Ferraris Date: Sat, 26 Oct 2019 07:26:59 +0000 (+0900) Subject: runtime: consistently seed fastrand state across archs X-Git-Tag: go1.14beta1~223 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=97d0505334c71a8d7a1e7431c1e1515c93b59e2b;p=gostls13.git runtime: consistently seed fastrand state across archs Some, but not all, architectures mix in OS-provided random seeds when initializing the fastrand state. The others have TODOs saying we need to do the same. Lift that logic up in the architecture-independent part, and use memhash to mix the seed instead of a simple addition. Previously, dumping the fastrand state at initialization would yield something like the following on linux-amd64, where the values in the first column do not change between runs (as thread IDs are sequential and always start at 0), and the values in the second column, while changing every run, are pretty correlated: first run: 0x0 0x44d82f1c 0x5f356495 0x44f339de 0xbe6ac92a 0x44f91cd8 0x1da02dbf 0x44fd91bc 0x7cd59254 0x44fee8a4 0xdc0af6e9 0x4547a1e0 0x3b405b7e 0x474c76fc 0x9a75c013 0x475309dc 0xf9ab24a8 0x4bffd075 second run: 0x0 0xa63fc3eb 0x5f356495 0xa6648dc2 0xbe6ac92a 0xa66c1c59 0x1da02dbf 0xa671bce8 0x7cd59254 0xa70e8287 0xdc0af6e9 0xa7129d2e 0x3b405b7e 0xa7379e2d 0x9a75c013 0xa7e4c64c 0xf9ab24a8 0xa7ecce07 With this change, we get initial states that appear to be much more unpredictable, both within the same run as well as between runs: 0x11bddad7 0x97241c63 0x553dacc6 0x2bcd8523 0x62c01085 0x16413d92 0x6f40e9e6 0x7a138de6 0xa4898053 0x70d816f0 0x5ca5b433 0x188a395b 0x62778ca9 0xd462c3b5 0xd6e160e4 0xac9b4bd 0xb9571d65 0x597a981d Change-Id: Ib22c530157d74200df0083f830e0408fd4aaea58 Reviewed-on: https://go-review.googlesource.com/c/go/+/203439 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- diff --git a/src/runtime/os_darwin_arm.go b/src/runtime/os_darwin_arm.go index ee1bd174f1..2703e3cff8 100644 --- a/src/runtime/os_darwin_arm.go +++ b/src/runtime/os_darwin_arm.go @@ -19,6 +19,5 @@ func checkgoarm() { func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // TODO: need more entropy to better seed fastrand. return nanotime() } diff --git a/src/runtime/os_darwin_arm64.go b/src/runtime/os_darwin_arm64.go index 8de132d8e2..b808150de0 100644 --- a/src/runtime/os_darwin_arm64.go +++ b/src/runtime/os_darwin_arm64.go @@ -8,6 +8,5 @@ package runtime func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // TODO: need more entropy to better seed fastrand. return nanotime() } diff --git a/src/runtime/os_freebsd_arm.go b/src/runtime/os_freebsd_arm.go index 3edd381302..3feaa5e225 100644 --- a/src/runtime/os_freebsd_arm.go +++ b/src/runtime/os_freebsd_arm.go @@ -44,6 +44,5 @@ func archauxv(tag, val uintptr) { func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // TODO: need more entropy to better seed fastrand. return nanotime() } diff --git a/src/runtime/os_freebsd_arm64.go b/src/runtime/os_freebsd_arm64.go index 800bd2fa6e..51ebf9d478 100644 --- a/src/runtime/os_freebsd_arm64.go +++ b/src/runtime/os_freebsd_arm64.go @@ -151,6 +151,5 @@ func extractBits(data uint64, start, end uint) uint { func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed fastrand(). // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // TODO: need more entropy to better seed fastrand. return nanotime() } diff --git a/src/runtime/os_js.go b/src/runtime/os_js.go index 3738c9b237..ff0ee3aa6b 100644 --- a/src/runtime/os_js.go +++ b/src/runtime/os_js.go @@ -131,7 +131,6 @@ func os_sigpipe() { func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // TODO: need more entropy to better seed fastrand. return nanotime() } diff --git a/src/runtime/os_linux_arm.go b/src/runtime/os_linux_arm.go index 207b0e4d4d..5f89c30f7a 100644 --- a/src/runtime/os_linux_arm.go +++ b/src/runtime/os_linux_arm.go @@ -11,8 +11,6 @@ const ( _HWCAP_VFPv3 = 1 << 13 // introduced in 2.6.30 ) -var randomNumber uint32 - func checkgoarm() { // On Android, /proc/self/auxv might be unreadable and hwcap won't // reflect the CPU capabilities. Assume that every Android arm device @@ -34,13 +32,6 @@ func checkgoarm() { func archauxv(tag, val uintptr) { switch tag { - case _AT_RANDOM: - // sysargs filled in startupRandomData, but that - // pointer may not be word aligned, so we must treat - // it as a byte array. - randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 | - uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24 - case _AT_HWCAP: cpu.HWCap = uint(val) case _AT_HWCAP2: @@ -52,6 +43,5 @@ func archauxv(tag, val uintptr) { func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed fastrand(). // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // randomNumber provides better seeding of fastrand. - return nanotime() + int64(randomNumber) + return nanotime() } diff --git a/src/runtime/os_linux_arm64.go b/src/runtime/os_linux_arm64.go index 2d6f68bdd9..b51bc88820 100644 --- a/src/runtime/os_linux_arm64.go +++ b/src/runtime/os_linux_arm64.go @@ -8,17 +8,8 @@ package runtime import "internal/cpu" -var randomNumber uint32 - func archauxv(tag, val uintptr) { switch tag { - case _AT_RANDOM: - // sysargs filled in startupRandomData, but that - // pointer may not be word aligned, so we must treat - // it as a byte array. - randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 | - uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24 - case _AT_HWCAP: // arm64 doesn't have a 'cpuid' instruction equivalent and relies on // HWCAP/HWCAP2 bits for hardware capabilities. @@ -40,6 +31,5 @@ func archauxv(tag, val uintptr) { func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed fastrand(). // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // randomNumber provides better seeding of fastrand. - return nanotime() + int64(randomNumber) + return nanotime() } diff --git a/src/runtime/os_linux_mips64x.go b/src/runtime/os_linux_mips64x.go index 0d7b84dcee..59d2a8f2c6 100644 --- a/src/runtime/os_linux_mips64x.go +++ b/src/runtime/os_linux_mips64x.go @@ -7,25 +7,14 @@ package runtime -var randomNumber uint32 - func archauxv(tag, val uintptr) { - switch tag { - case _AT_RANDOM: - // sysargs filled in startupRandomData, but that - // pointer may not be word aligned, so we must treat - // it as a byte array. - randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 | - uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24 - } } //go:nosplit func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed fastrand(). // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // randomNumber provides better seeding of fastrand. - return nanotime() + int64(randomNumber) + return nanotime() } const ( diff --git a/src/runtime/os_linux_mipsx.go b/src/runtime/os_linux_mipsx.go index e0548ecc79..ccdc3a7fe5 100644 --- a/src/runtime/os_linux_mipsx.go +++ b/src/runtime/os_linux_mipsx.go @@ -7,25 +7,14 @@ package runtime -var randomNumber uint32 - func archauxv(tag, val uintptr) { - switch tag { - case _AT_RANDOM: - // sysargs filled in startupRandomData, but that - // pointer may not be word aligned, so we must treat - // it as a byte array. - randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 | - uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24 - } } //go:nosplit func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed fastrand(). // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // randomNumber provides better seeding of fastrand1. - return nanotime() + int64(randomNumber) + return nanotime() } const ( diff --git a/src/runtime/os_netbsd_arm.go b/src/runtime/os_netbsd_arm.go index 95603da643..b5ec23e45b 100644 --- a/src/runtime/os_netbsd_arm.go +++ b/src/runtime/os_netbsd_arm.go @@ -30,6 +30,5 @@ func checkgoarm() { func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // TODO: need more entropy to better seed fastrand. return nanotime() } diff --git a/src/runtime/os_netbsd_arm64.go b/src/runtime/os_netbsd_arm64.go index fd81eb7557..8d21b0a430 100644 --- a/src/runtime/os_netbsd_arm64.go +++ b/src/runtime/os_netbsd_arm64.go @@ -19,6 +19,5 @@ func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintp func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // TODO: need more entropy to better seed fastrand. return nanotime() } diff --git a/src/runtime/os_openbsd_arm.go b/src/runtime/os_openbsd_arm.go index be2e1e9959..0a2409676c 100644 --- a/src/runtime/os_openbsd_arm.go +++ b/src/runtime/os_openbsd_arm.go @@ -19,6 +19,5 @@ func checkgoarm() { func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // TODO: need more entropy to better seed fastrand. return nanotime() } diff --git a/src/runtime/os_openbsd_arm64.go b/src/runtime/os_openbsd_arm64.go index f15a95b653..d559a2a3e5 100644 --- a/src/runtime/os_openbsd_arm64.go +++ b/src/runtime/os_openbsd_arm64.go @@ -12,7 +12,6 @@ import ( func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // TODO: need more entropy to better seed fastrand. return nanotime() } diff --git a/src/runtime/os_plan9_arm.go b/src/runtime/os_plan9_arm.go index fdce1e7a35..f165a34151 100644 --- a/src/runtime/os_plan9_arm.go +++ b/src/runtime/os_plan9_arm.go @@ -12,6 +12,5 @@ func checkgoarm() { func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // TODO: need more entropy to better seed fastrand. return nanotime() } diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 56e9530ab6..3252173c0a 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -543,6 +543,7 @@ func schedinit() { moduledataverify() stackinit() mallocinit() + fastrandinit() // must run before mcommoninit mcommoninit(_g_.m) cpuinit() // must run before alginit alginit() // maps must not be used before this call @@ -620,8 +621,8 @@ func mcommoninit(mp *m) { sched.mnext++ checkmcount() - mp.fastrand[0] = 1597334677 * uint32(mp.id) - mp.fastrand[1] = uint32(cputicks()) + mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed)) + mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed)) if mp.fastrand[0]|mp.fastrand[1] == 0 { mp.fastrand[1] = 1 } @@ -646,6 +647,13 @@ func mcommoninit(mp *m) { } } +var fastrandseed uintptr + +func fastrandinit() { + s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:] + getRandomData(s) +} + // Mark gp ready to run. func ready(gp *g, traceskip int, next bool) { if trace.enabled {