From 2aa7c6c5488d3ef5d3eeb53488f74d39facd9301 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Mon, 6 Jan 2020 11:23:08 -0800 Subject: [PATCH] cmd/compile: don't allow NaNs in floating-point constant ops We store 32-bit floating point constants in a 64-bit field, by converting that 32-bit float to 64-bit float to store it, and convert it back to use it. That works for *almost* all floating-point constants. The exception is signaling NaNs. The round trip described above means we can't represent a 32-bit signaling NaN, because conversions strip the signaling bit. To fix this issue, just forbid NaNs as floating-point constants in SSA form. This shouldn't affect any real-world code, as people seldom constant-propagate NaNs (except in test code). Additionally, NaNs are somewhat underspecified (which of the many NaNs do you get when dividing 0/0?), so when cross-compiling there's a danger of using the compiler machine's NaN regime for some math, and the target machine's NaN regime for other math. Better to use the target machine's NaN regime always. This has been a bug since 1.10, and there's an easy workaround (declare a global varaible containing the signaling NaN pattern, and use that as the argument to math.Float32frombits) so we'll fix it in 1.15. Fixes #36400 Update #36399 Change-Id: Icf155e743281560eda2eed953d19a829552ccfda Reviewed-on: https://go-review.googlesource.com/c/go/+/213477 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/gc/float_test.go | 60 +++++++++++++++++++ src/cmd/compile/internal/ssa/check.go | 10 +++- src/cmd/compile/internal/ssa/gen/PPC64.rules | 2 +- src/cmd/compile/internal/ssa/gen/Wasm.rules | 21 +++---- .../compile/internal/ssa/gen/generic.rules | 14 ++--- .../compile/internal/ssa/gen/genericOps.go | 7 ++- src/cmd/compile/internal/ssa/rewrite.go | 6 ++ src/cmd/compile/internal/ssa/rewritePPC64.go | 4 ++ src/cmd/compile/internal/ssa/rewriteWasm.go | 41 +++++++++++++ .../compile/internal/ssa/rewritegeneric.go | 28 +++++++-- test/codegen/math.go | 33 +++++++++- 11 files changed, 200 insertions(+), 26 deletions(-) diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go index c5c604003a..94da49ee41 100644 --- a/src/cmd/compile/internal/gc/float_test.go +++ b/src/cmd/compile/internal/gc/float_test.go @@ -483,6 +483,66 @@ func TestFloat32StoreToLoadConstantFold(t *testing.T) { } } +// Signaling NaN values as constants. +const ( + snan32bits uint32 = 0x7f800001 + snan64bits uint64 = 0x7ff0000000000001 +) + +// Signaling NaNs as variables. +var snan32bitsVar uint32 = snan32bits +var snan64bitsVar uint64 = snan64bits + +func TestFloatSignalingNaN(t *testing.T) { + // Make sure we generate a signaling NaN from a constant properly. + // See issue 36400. + f32 := math.Float32frombits(snan32bits) + g32 := math.Float32frombits(snan32bitsVar) + x32 := math.Float32bits(f32) + y32 := math.Float32bits(g32) + if x32 != y32 { + t.Errorf("got %x, want %x (diff=%x)", x32, y32, x32^y32) + } + + f64 := math.Float64frombits(snan64bits) + g64 := math.Float64frombits(snan64bitsVar) + x64 := math.Float64bits(f64) + y64 := math.Float64bits(g64) + if x64 != y64 { + t.Errorf("got %x, want %x (diff=%x)", x64, y64, x64^y64) + } +} + +func TestFloatSignalingNaNConversion(t *testing.T) { + // Test to make sure when we convert a signaling NaN, it converts to a quiet NaN. + // See issue 36399. + s32 := math.Float32frombits(snan32bitsVar) + q64 := float64(s32) + if math.Float64bits(q64)>>52&1 == 0 { + t.Errorf("got signaling NaN, want quiet NaN") + } + s64 := math.Float64frombits(snan64bitsVar) + q32 := float32(s64) + if math.Float32bits(q32)>>22&1 == 0 { + t.Errorf("got signaling NaN, want quiet NaN") + } +} + +func TestFloatSignalingNaNConversionConst(t *testing.T) { + // Test to make sure when we convert a signaling NaN, it converts to a quiet NaN. + // See issue 36399 and 36400. + s32 := math.Float32frombits(snan32bits) + q64 := float64(s32) + if math.Float64bits(q64)>>52&1 == 0 { + t.Errorf("got signaling NaN, want quiet NaN") + } + s64 := math.Float64frombits(snan64bits) + q32 := float32(s64) + if math.Float32bits(q32)>>22&1 == 0 { + t.Errorf("got signaling NaN, want quiet NaN") + } +} + var sinkFloat float64 func BenchmarkMul2(b *testing.B) { diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index 4e258fe82b..ecce581f4b 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -141,15 +141,23 @@ func checkFunc(f *Func) { f.Fatalf("bad int32 AuxInt value for %v", v) } canHaveAuxInt = true - case auxInt64, auxFloat64: + case auxInt64: canHaveAuxInt = true case auxInt128: // AuxInt must be zero, so leave canHaveAuxInt set to false. case auxFloat32: canHaveAuxInt = true + if math.IsNaN(v.AuxFloat()) { + f.Fatalf("value %v has an AuxInt that encodes a NaN", v) + } if !isExactFloat32(v.AuxFloat()) { f.Fatalf("value %v has an AuxInt value that is not an exact float32", v) } + case auxFloat64: + canHaveAuxInt = true + if math.IsNaN(v.AuxFloat()) { + f.Fatalf("value %v has an AuxInt that encodes a NaN", v) + } case auxString, auxSym, auxTyp, auxArchSpecific: canHaveAux = true case auxSymOff, auxSymValAndOff, auxTypSize: diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index e03712b118..ed95620db4 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -78,7 +78,7 @@ // Constant folding (FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))]) -(FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) +(FSQRT (FMOVDconst [x])) && auxTo64F(x) >= 0 -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) (FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))]) (FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))]) (FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))]) diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules index 1080f0d820..68d26f8268 100644 --- a/src/cmd/compile/internal/ssa/gen/Wasm.rules +++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules @@ -372,7 +372,7 @@ (I64Or (I64Const [x]) (I64Const [y])) -> (I64Const [x | y]) (I64Xor (I64Const [x]) (I64Const [y])) -> (I64Const [x ^ y]) (F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))]) -(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))]) +(F64Mul (F64Const [x]) (F64Const [y])) && !math.IsNaN(auxTo64F(x) * auxTo64F(y)) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))]) (I64Eq (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [1]) (I64Eq (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [0]) (I64Ne (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [0]) @@ -382,15 +382,16 @@ (I64ShrU (I64Const [x]) (I64Const [y])) -> (I64Const [int64(uint64(x) >> uint64(y))]) (I64ShrS (I64Const [x]) (I64Const [y])) -> (I64Const [x >> uint64(y)]) -(I64Add (I64Const [x]) y) -> (I64Add y (I64Const [x])) -(I64Mul (I64Const [x]) y) -> (I64Mul y (I64Const [x])) -(I64And (I64Const [x]) y) -> (I64And y (I64Const [x])) -(I64Or (I64Const [x]) y) -> (I64Or y (I64Const [x])) -(I64Xor (I64Const [x]) y) -> (I64Xor y (I64Const [x])) -(F64Add (F64Const [x]) y) -> (F64Add y (F64Const [x])) -(F64Mul (F64Const [x]) y) -> (F64Mul y (F64Const [x])) -(I64Eq (I64Const [x]) y) -> (I64Eq y (I64Const [x])) -(I64Ne (I64Const [x]) y) -> (I64Ne y (I64Const [x])) +// TODO: declare these operations as commutative and get rid of these rules? +(I64Add (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Add y (I64Const [x])) +(I64Mul (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Mul y (I64Const [x])) +(I64And (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64And y (I64Const [x])) +(I64Or (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Or y (I64Const [x])) +(I64Xor (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Xor y (I64Const [x])) +(F64Add (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Add y (F64Const [x])) +(F64Mul (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Mul y (F64Const [x])) +(I64Eq (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Eq y (I64Const [x])) +(I64Ne (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Ne y (I64Const [x])) (I64Eq x (I64Const [0])) -> (I64Eqz x) (I64Ne x (I64Const [0])) -> (I64Eqz (I64Eqz x)) diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index 1382cdc259..8e51b6b657 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -118,8 +118,8 @@ (Mul16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c*d))]) (Mul32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c*d))]) (Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d]) -(Mul32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) -(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) +(Mul32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) +(Mul64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) * auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) (And8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c&d))]) (And16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c&d))]) @@ -144,8 +144,8 @@ (Div16u (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(uint16(c)/uint16(d)))]) (Div32u (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(uint32(c)/uint32(d)))]) (Div64u (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c)/uint64(d))]) -(Div32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) -(Div64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) +(Div32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) +(Div64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) / auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) (Select0 (Div128u (Const64 [0]) lo y)) -> (Div64u lo y) (Select1 (Div128u (Const64 [0]) lo y)) -> (Mod64u lo y) @@ -588,8 +588,8 @@ -> x // Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits -(Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) -> (Const64F [x]) -(Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) + (Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) -> (Const64F [x]) + (Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) (Load p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) -> (Const64 [x]) (Load p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(auxTo32F(x))))]) @@ -1858,7 +1858,7 @@ (Div32F x (Const32F [c])) && reciprocalExact32(auxTo32F(c)) -> (Mul32F x (Const32F [auxFrom32F(1/auxTo32F(c))])) (Div64F x (Const64F [c])) && reciprocalExact64(auxTo64F(c)) -> (Mul64F x (Const64F [auxFrom64F(1/auxTo64F(c))])) -(Sqrt (Const64F [c])) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) +(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(auxTo64F(c))) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) // recognize runtime.newobject and don't Zero/Nilcheck it (Zero (Load (OffPtr [c] (SP)) mem) mem) diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index b638d98887..cc72c76c2d 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -339,7 +339,12 @@ var genericOps = []opData{ {name: "Const32", aux: "Int32"}, // auxint is sign-extended 32 bits // Note: ConstX are sign-extended even when the type of the value is unsigned. // For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa. - {name: "Const64", aux: "Int64"}, // value is auxint + {name: "Const64", aux: "Int64"}, // value is auxint + // Note: for both Const32F and Const64F, we disallow encoding NaNs. + // Signaling NaNs are tricky because if you do anything with them, they become quiet. + // Particularly, converting a 32 bit sNaN to 64 bit and back converts it to a qNaN. + // See issue 36399 and 36400. + // Encodings of +inf, -inf, and -0 are fine. {name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32 {name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint)) {name: "ConstInterface"}, // nil interface diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index ef24dad747..61c5a6bff8 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -487,11 +487,17 @@ func DivisionNeedsFixUp(v *Value) bool { // auxFrom64F encodes a float64 value so it can be stored in an AuxInt. func auxFrom64F(f float64) int64 { + if f != f { + panic("can't encode a NaN in AuxInt field") + } return int64(math.Float64bits(f)) } // auxFrom32F encodes a float32 value so it can be stored in an AuxInt. func auxFrom32F(f float32) int64 { + if f != f { + panic("can't encode a NaN in AuxInt field") + } return int64(math.Float64bits(extend32Fto64F(f))) } diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 9f62e0d3ba..6009b74588 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -5828,12 +5828,16 @@ func rewriteValuePPC64_OpPPC64FNEG(v *Value) bool { func rewriteValuePPC64_OpPPC64FSQRT(v *Value) bool { v_0 := v.Args[0] // match: (FSQRT (FMOVDconst [x])) + // cond: auxTo64F(x) >= 0 // result: (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) for { if v_0.Op != OpPPC64FMOVDconst { break } x := v_0.AuxInt + if !(auxTo64F(x) >= 0) { + break + } v.reset(OpPPC64FMOVDconst) v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(x))) return true diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index 9e236a6e0e..d3fc82b86b 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -3,6 +3,7 @@ package ssa +import "math" import "cmd/internal/objabi" import "cmd/compile/internal/types" @@ -3993,6 +3994,7 @@ func rewriteValueWasm_OpWasmF64Add(v *Value) bool { return true } // match: (F64Add (F64Const [x]) y) + // cond: y.Op != OpWasmF64Const // result: (F64Add y (F64Const [x])) for { if v_0.Op != OpWasmF64Const { @@ -4000,6 +4002,9 @@ func rewriteValueWasm_OpWasmF64Add(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmF64Const) { + break + } v.reset(OpWasmF64Add) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) @@ -4015,6 +4020,7 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (F64Mul (F64Const [x]) (F64Const [y])) + // cond: !math.IsNaN(auxTo64F(x) * auxTo64F(y)) // result: (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))]) for { if v_0.Op != OpWasmF64Const { @@ -4025,11 +4031,15 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool { break } y := v_1.AuxInt + if !(!math.IsNaN(auxTo64F(x) * auxTo64F(y))) { + break + } v.reset(OpWasmF64Const) v.AuxInt = auxFrom64F(auxTo64F(x) * auxTo64F(y)) return true } // match: (F64Mul (F64Const [x]) y) + // cond: y.Op != OpWasmF64Const // result: (F64Mul y (F64Const [x])) for { if v_0.Op != OpWasmF64Const { @@ -4037,6 +4047,9 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmF64Const) { + break + } v.reset(OpWasmF64Mul) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) @@ -4067,6 +4080,7 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool { return true } // match: (I64Add (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Add y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4074,6 +4088,9 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Add) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -4153,6 +4170,7 @@ func rewriteValueWasm_OpWasmI64And(v *Value) bool { return true } // match: (I64And (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64And y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4160,6 +4178,9 @@ func rewriteValueWasm_OpWasmI64And(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64And) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -4213,6 +4234,7 @@ func rewriteValueWasm_OpWasmI64Eq(v *Value) bool { return true } // match: (I64Eq (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Eq y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4220,6 +4242,9 @@ func rewriteValueWasm_OpWasmI64Eq(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Eq) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -4533,6 +4558,7 @@ func rewriteValueWasm_OpWasmI64Mul(v *Value) bool { return true } // match: (I64Mul (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Mul y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4540,6 +4566,9 @@ func rewriteValueWasm_OpWasmI64Mul(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Mul) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -4593,6 +4622,7 @@ func rewriteValueWasm_OpWasmI64Ne(v *Value) bool { return true } // match: (I64Ne (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Ne y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4600,6 +4630,9 @@ func rewriteValueWasm_OpWasmI64Ne(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Ne) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -4643,6 +4676,7 @@ func rewriteValueWasm_OpWasmI64Or(v *Value) bool { return true } // match: (I64Or (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Or y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4650,6 +4684,9 @@ func rewriteValueWasm_OpWasmI64Or(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Or) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) @@ -4852,6 +4889,7 @@ func rewriteValueWasm_OpWasmI64Xor(v *Value) bool { return true } // match: (I64Xor (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Xor y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4859,6 +4897,9 @@ func rewriteValueWasm_OpWasmI64Xor(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Xor) v.AddArg(y) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index a4a2506d8e..1a40d3a699 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -3579,6 +3579,7 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Div32F (Const32F [c]) (Const32F [d])) + // cond: !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d))) // result: (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) for { if v_0.Op != OpConst32F { @@ -3589,6 +3590,9 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool { break } d := v_1.AuxInt + if !(!math.IsNaN(float64(auxTo32F(c) / auxTo32F(d)))) { + break + } v.reset(OpConst32F) v.AuxInt = auxFrom32F(auxTo32F(c) / auxTo32F(d)) return true @@ -4052,6 +4056,7 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Div64F (Const64F [c]) (Const64F [d])) + // cond: !math.IsNaN(auxTo64F(c) / auxTo64F(d)) // result: (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) for { if v_0.Op != OpConst64F { @@ -4062,6 +4067,9 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool { break } d := v_1.AuxInt + if !(!math.IsNaN(auxTo64F(c) / auxTo64F(d))) { + break + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(auxTo64F(c) / auxTo64F(d)) return true @@ -9564,7 +9572,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const64 [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) // result: (Const64F [x]) for { t1 := v.Type @@ -9580,7 +9588,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) { break } v.reset(OpConst64F) @@ -9588,7 +9596,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const32 [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) // result: (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) for { t1 := v.Type @@ -9604,7 +9612,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) { break } v.reset(OpConst32F) @@ -13529,6 +13537,7 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Mul32F (Const32F [c]) (Const32F [d])) + // cond: !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d))) // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -13540,6 +13549,9 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { continue } d := v_1.AuxInt + if !(!math.IsNaN(float64(auxTo32F(c) * auxTo32F(d)))) { + continue + } v.reset(OpConst32F) v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d)) return true @@ -13779,6 +13791,7 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Mul64F (Const64F [c]) (Const64F [d])) + // cond: !math.IsNaN(auxTo64F(c) * auxTo64F(d)) // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -13790,6 +13803,9 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { continue } d := v_1.AuxInt + if !(!math.IsNaN(auxTo64F(c) * auxTo64F(d))) { + continue + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d)) return true @@ -19663,12 +19679,16 @@ func rewriteValuegeneric_OpSlicemask(v *Value) bool { func rewriteValuegeneric_OpSqrt(v *Value) bool { v_0 := v.Args[0] // match: (Sqrt (Const64F [c])) + // cond: !math.IsNaN(math.Sqrt(auxTo64F(c))) // result: (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) for { if v_0.Op != OpConst64F { break } c := v_0.AuxInt + if !(!math.IsNaN(math.Sqrt(auxTo64F(c)))) { + break + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(c))) return true diff --git a/test/codegen/math.go b/test/codegen/math.go index 80e5d60d96..1ebfda0405 100644 --- a/test/codegen/math.go +++ b/test/codegen/math.go @@ -151,13 +151,13 @@ func toFloat32(u32 uint32) float32 { func constantCheck64() bool { // amd64:"MOVB\t[$]0",-"FCMP",-"MOVB\t[$]1" // s390x:"MOV(B|BZ|D)\t[$]0,",-"FCMPU",-"MOV(B|BZ|D)\t[$]1," - return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63)) || math.NaN() == math.NaN() + return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63)) } func constantCheck32() bool { // amd64:"MOVB\t[$]1",-"FCMP",-"MOVB\t[$]0" // s390x:"MOV(B|BZ|D)\t[$]1,",-"FCMPU",-"MOV(B|BZ|D)\t[$]0," - return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31)) && float32(math.NaN()) != float32(math.NaN()) + return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31)) } // Test that integer constants are converted to floating point constants @@ -186,3 +186,32 @@ func constantConvertInt32(x uint32) uint32 { } return x } + +func nanGenerate64() float64 { + // Test to make sure we don't generate a NaN while constant propagating. + // See issue 36400. + zero := 0.0 + // amd64:-"DIVSD" + inf := 1 / zero // +inf. We can constant propagate this one. + negone := -1.0 + + // amd64:"DIVSD" + z0 := zero / zero + // amd64:"MULSD" + z1 := zero * inf + // amd64:"SQRTSD" + z2 := math.Sqrt(negone) + return z0 + z1 + z2 +} + +func nanGenerate32() float32 { + zero := float32(0.0) + // amd64:-"DIVSS" + inf := 1 / zero // +inf. We can constant propagate this one. + + // amd64:"DIVSS" + z0 := zero / zero + // amd64:"MULSS" + z1 := zero * inf + return z0 + z1 +} -- 2.50.0