From ce5cb037d171273f1a5294723234be5495c9d336 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 6 Jan 2015 09:06:44 -0800 Subject: [PATCH] runtime: use some startup randomness in the fallback hashes Fold in some startup randomness to make the hash vary across different runs. This helps prevent attackers from choosing keys that all map to the same bucket. Also, reorganize the hash a bit. Move the *m1 multiply to after the xor of the current hash and the message. For hash quality it doesn't really matter, but for DDOS resistance it helps a lot (any processing done to the message before it is merged with the random seed is useless, as it is easily inverted by an attacker). Update #9365 Change-Id: Ib19968168e1bbc541d1d28be2701bb83e53f1e24 Reviewed-on: https://go-review.googlesource.com/2344 Reviewed-by: Ian Lance Taylor --- src/runtime/alg.go | 20 +++++++----- src/runtime/hash32.go | 66 +++++++++++++++++++--------------------- src/runtime/hash64.go | 71 +++++++++++++++++++------------------------ 3 files changed, 75 insertions(+), 82 deletions(-) diff --git a/src/runtime/alg.go b/src/runtime/alg.go index 15e3abe368..6713d298da 100644 --- a/src/runtime/alg.go +++ b/src/runtime/alg.go @@ -285,20 +285,21 @@ func memclrBytes(b []byte) { memclr(s.array, uintptr(s.len)) } -// used in asm_{386,amd64}.s const hashRandomBytes = ptrSize / 4 * 64 +// used in asm_{386,amd64}.s to seed the hash function var aeskeysched [hashRandomBytes]byte -func init() { - if GOOS == "nacl" { - return - } +// used in hash{32,64}.go to seed the hash function +var hashkey [4]uintptr +func init() { // Install aes hash algorithm if we have the instructions we need - if (cpuid_ecx&(1<<25)) != 0 && // aes (aesenc) - (cpuid_ecx&(1<<9)) != 0 && // sse3 (pshufb) - (cpuid_ecx&(1<<19)) != 0 { // sse4.1 (pinsr{d,q}) + if (GOARCH == "386" || GOARCH == "amd64") && + GOOS != "nacl" && + cpuid_ecx&(1<<25) != 0 && // aes (aesenc) + cpuid_ecx&(1<<9) != 0 && // sse3 (pshufb) + cpuid_ecx&(1<<19) != 0 { // sse4.1 (pinsr{d,q}) useAeshash = true algarray[alg_MEM].hash = aeshash algarray[alg_MEM8].hash = aeshash @@ -309,5 +310,8 @@ func init() { algarray[alg_STRING].hash = aeshashstr // Initialize with random data so hash collisions will be hard to engineer. getRandomData(aeskeysched[:]) + return } + getRandomData((*[len(hashkey) * ptrSize]byte)(unsafe.Pointer(&hashkey))[:]) + hashkey[0] |= 1 // make sure this number is odd } diff --git a/src/runtime/hash32.go b/src/runtime/hash32.go index 7fada1518b..363b4ae28c 100644 --- a/src/runtime/hash32.go +++ b/src/runtime/hash32.go @@ -24,54 +24,53 @@ func memhash(p unsafe.Pointer, s, seed uintptr) uintptr { if GOARCH == "386" && GOOS != "nacl" && useAeshash { return aeshash(p, s, seed) } - h := uint32(seed + s) + h := uint32(seed + s*hashkey[0]) tail: switch { case s == 0: case s < 4: - w := uint32(*(*byte)(p)) - w += uint32(*(*byte)(add(p, s>>1))) << 8 - w += uint32(*(*byte)(add(p, s-1))) << 16 - h ^= w * m1 + h ^= uint32(*(*byte)(p)) + h ^= uint32(*(*byte)(add(p, s>>1))) << 8 + h ^= uint32(*(*byte)(add(p, s-1))) << 16 + h = rotl_15(h*m1) * m2 case s == 4: - h ^= readUnaligned32(p) * m1 + h ^= readUnaligned32(p) + h = rotl_15(h*m1) * m2 case s <= 8: - h ^= readUnaligned32(p) * m1 - h = rotl_15(h) * m2 - h = rotl_11(h) - h ^= readUnaligned32(add(p, s-4)) * m1 + h ^= readUnaligned32(p) + h = rotl_15(h*m1) * m2 + h ^= readUnaligned32(add(p, s-4)) + h = rotl_15(h*m1) * m2 case s <= 16: - h ^= readUnaligned32(p) * m1 - h = rotl_15(h) * m2 - h = rotl_11(h) - h ^= readUnaligned32(add(p, 4)) * m1 - h = rotl_15(h) * m2 - h = rotl_11(h) - h ^= readUnaligned32(add(p, s-8)) * m1 - h = rotl_15(h) * m2 - h = rotl_11(h) - h ^= readUnaligned32(add(p, s-4)) * m1 + h ^= readUnaligned32(p) + h = rotl_15(h*m1) * m2 + h ^= readUnaligned32(add(p, 4)) + h = rotl_15(h*m1) * m2 + h ^= readUnaligned32(add(p, s-8)) + h = rotl_15(h*m1) * m2 + h ^= readUnaligned32(add(p, s-4)) + h = rotl_15(h*m1) * m2 default: v1 := h - v2 := h + m1 - v3 := h + m2 - v4 := h + m3 + v2 := uint32(hashkey[1]) + v3 := uint32(hashkey[2]) + v4 := uint32(hashkey[3]) for s >= 16 { - v1 ^= readUnaligned32(p) * m1 - v1 = rotl_15(v1) * m2 + v1 ^= readUnaligned32(p) + v1 = rotl_15(v1*m1) * m2 p = add(p, 4) - v2 ^= readUnaligned32(p) * m1 - v2 = rotl_15(v2) * m2 + v2 ^= readUnaligned32(p) + v2 = rotl_15(v2*m2) * m3 p = add(p, 4) - v3 ^= readUnaligned32(p) * m1 - v3 = rotl_15(v3) * m2 + v3 ^= readUnaligned32(p) + v3 = rotl_15(v3*m3) * m4 p = add(p, 4) - v4 ^= readUnaligned32(p) * m1 - v4 = rotl_15(v4) * m2 + v4 ^= readUnaligned32(p) + v4 = rotl_15(v4*m4) * m1 p = add(p, 4) s -= 16 } - h = rotl_11(v1)*m1 + rotl_11(v2)*m2 + rotl_11(v3)*m3 + rotl_11(v4)*m4 + h = v1 ^ v2 ^ v3 ^ v4 goto tail } h ^= h >> 17 @@ -88,6 +87,3 @@ tail: func rotl_15(x uint32) uint32 { return (x << 15) | (x >> (32 - 15)) } -func rotl_11(x uint32) uint32 { - return (x << 11) | (x >> (32 - 11)) -} diff --git a/src/runtime/hash64.go b/src/runtime/hash64.go index fc7eef45a4..4a52d98996 100644 --- a/src/runtime/hash64.go +++ b/src/runtime/hash64.go @@ -24,61 +24,57 @@ func memhash(p unsafe.Pointer, s, seed uintptr) uintptr { if GOARCH == "amd64" && GOOS != "nacl" && useAeshash { return aeshash(p, s, seed) } - h := uint64(seed + s) + h := uint64(seed + s*hashkey[0]) tail: switch { case s == 0: case s < 4: - w := uint64(*(*byte)(p)) - w += uint64(*(*byte)(add(p, s>>1))) << 8 - w += uint64(*(*byte)(add(p, s-1))) << 16 - h ^= w * m1 + h ^= uint64(*(*byte)(p)) + h ^= uint64(*(*byte)(add(p, s>>1))) << 8 + h ^= uint64(*(*byte)(add(p, s-1))) << 16 + h = rotl_31(h*m1) * m2 case s <= 8: - w := uint64(readUnaligned32(p)) - w += uint64(readUnaligned32(add(p, s-4))) << 32 - h ^= w * m1 + h ^= uint64(readUnaligned32(p)) + h ^= uint64(readUnaligned32(add(p, s-4))) << 32 + h = rotl_31(h*m1) * m2 case s <= 16: - h ^= readUnaligned64(p) * m1 - h = rotl_31(h) * m2 - h = rotl_27(h) - h ^= readUnaligned64(add(p, s-8)) * m1 + h ^= readUnaligned64(p) + h = rotl_31(h*m1) * m2 + h ^= readUnaligned64(add(p, s-8)) + h = rotl_31(h*m1) * m2 case s <= 32: - h ^= readUnaligned64(p) * m1 - h = rotl_31(h) * m2 - h = rotl_27(h) - h ^= readUnaligned64(add(p, 8)) * m1 - h = rotl_31(h) * m2 - h = rotl_27(h) - h ^= readUnaligned64(add(p, s-16)) * m1 - h = rotl_31(h) * m2 - h = rotl_27(h) - h ^= readUnaligned64(add(p, s-8)) * m1 + h ^= readUnaligned64(p) + h = rotl_31(h*m1) * m2 + h ^= readUnaligned64(add(p, 8)) + h = rotl_31(h*m1) * m2 + h ^= readUnaligned64(add(p, s-16)) + h = rotl_31(h*m1) * m2 + h ^= readUnaligned64(add(p, s-8)) + h = rotl_31(h*m1) * m2 default: v1 := h - v2 := h + m1 - v3 := h + m2 - v4 := h + m3 + v2 := uint64(hashkey[1]) + v3 := uint64(hashkey[2]) + v4 := uint64(hashkey[3]) for s >= 32 { - v1 ^= readUnaligned64(p) * m1 - v1 = rotl_31(v1) * m2 + v1 ^= readUnaligned64(p) + v1 = rotl_31(v1*m1) * m2 p = add(p, 8) - v2 ^= readUnaligned64(p) * m1 - v2 = rotl_31(v2) * m2 + v2 ^= readUnaligned64(p) + v2 = rotl_31(v2*m2) * m3 p = add(p, 8) - v3 ^= readUnaligned64(p) * m1 - v3 = rotl_31(v3) * m2 + v3 ^= readUnaligned64(p) + v3 = rotl_31(v3*m3) * m4 p = add(p, 8) - v4 ^= readUnaligned64(p) * m1 - v4 = rotl_31(v4) * m2 + v4 ^= readUnaligned64(p) + v4 = rotl_31(v4*m4) * m1 p = add(p, 8) s -= 32 } - h = rotl_27(v1)*m1 + rotl_27(v2)*m2 + rotl_27(v3)*m3 + rotl_27(v4)*m4 + h = v1 ^ v2 ^ v3 ^ v4 goto tail } - h ^= h >> 33 - h *= m2 h ^= h >> 29 h *= m3 h ^= h >> 32 @@ -91,6 +87,3 @@ tail: func rotl_31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } -func rotl_27(x uint64) uint64 { - return (x << 27) | (x >> (64 - 27)) -} -- 2.50.0