From cd9fd640db419ec81026945eb4f22bfe5ff5a27f Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 3 Mar 2020 17:56:20 +0000 Subject: [PATCH] cmd/compile: don't allow NaNs in floating-point constant ops Trying this CL again, with a fixed test that allows platforms to disagree on the exact behavior of converting NaNs. We store 32-bit floating point constants in a 64-bit field, by converting that 32-bit float to 64-bit float to store it, and convert it back to use it. That works for *almost* all floating-point constants. The exception is signaling NaNs. The round trip described above means we can't represent a 32-bit signaling NaN, because conversions strip the signaling bit. To fix this issue, just forbid NaNs as floating-point constants in SSA form. This shouldn't affect any real-world code, as people seldom constant-propagate NaNs (except in test code). Additionally, NaNs are somewhat underspecified (which of the many NaNs do you get when dividing 0/0?), so when cross-compiling there's a danger of using the compiler machine's NaN regime for some math, and the target machine's NaN regime for other math. Better to use the target machine's NaN regime always. Update #36400 Change-Id: Idf203b688a15abceabbd66ba290d4e9f63619ecb Reviewed-on: https://go-review.googlesource.com/c/go/+/221790 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/gc/float_test.go | 58 +++++++++++++++++++ src/cmd/compile/internal/ssa/check.go | 10 +++- src/cmd/compile/internal/ssa/gen/PPC64.rules | 2 +- src/cmd/compile/internal/ssa/gen/Wasm.rules | 21 +++---- .../compile/internal/ssa/gen/generic.rules | 14 ++--- .../compile/internal/ssa/gen/genericOps.go | 7 ++- src/cmd/compile/internal/ssa/rewrite.go | 6 ++ src/cmd/compile/internal/ssa/rewritePPC64.go | 4 ++ src/cmd/compile/internal/ssa/rewriteWasm.go | 41 +++++++++++++ .../compile/internal/ssa/rewritegeneric.go | 28 +++++++-- test/codegen/math.go | 33 ++++++++++- 11 files changed, 198 insertions(+), 26 deletions(-) diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go index c5c604003a..6ae363be22 100644 --- a/src/cmd/compile/internal/gc/float_test.go +++ b/src/cmd/compile/internal/gc/float_test.go @@ -483,6 +483,64 @@ func TestFloat32StoreToLoadConstantFold(t *testing.T) { } } +// Signaling NaN values as constants. +const ( + snan32bits uint32 = 0x7f800001 + snan64bits uint64 = 0x7ff0000000000001 +) + +// Signaling NaNs as variables. +var snan32bitsVar uint32 = snan32bits +var snan64bitsVar uint64 = snan64bits + +func TestFloatSignalingNaN(t *testing.T) { + // Make sure we generate a signaling NaN from a constant properly. + // See issue 36400. + f32 := math.Float32frombits(snan32bits) + g32 := math.Float32frombits(snan32bitsVar) + x32 := math.Float32bits(f32) + y32 := math.Float32bits(g32) + if x32 != y32 { + t.Errorf("got %x, want %x (diff=%x)", x32, y32, x32^y32) + } + + f64 := math.Float64frombits(snan64bits) + g64 := math.Float64frombits(snan64bitsVar) + x64 := math.Float64bits(f64) + y64 := math.Float64bits(g64) + if x64 != y64 { + t.Errorf("got %x, want %x (diff=%x)", x64, y64, x64^y64) + } +} + +func TestFloatSignalingNaNConversion(t *testing.T) { + // Test to make sure when we convert a signaling NaN, we get a NaN. + // (Ideally we want a quiet NaN, but some platforms don't agree.) + // See issue 36399. + s32 := math.Float32frombits(snan32bitsVar) + if s32 == s32 { + t.Errorf("converting a NaN did not result in a NaN") + } + s64 := math.Float64frombits(snan64bitsVar) + if s64 == s64 { + t.Errorf("converting a NaN did not result in a NaN") + } +} + +func TestFloatSignalingNaNConversionConst(t *testing.T) { + // Test to make sure when we convert a signaling NaN, it converts to a NaN. + // (Ideally we want a quiet NaN, but some platforms don't agree.) + // See issue 36399 and 36400. + s32 := math.Float32frombits(snan32bits) + if s32 == s32 { + t.Errorf("converting a NaN did not result in a NaN") + } + s64 := math.Float64frombits(snan64bits) + if s64 == s64 { + t.Errorf("converting a NaN did not result in a NaN") + } +} + var sinkFloat float64 func BenchmarkMul2(b *testing.B) { diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index a6746805f7..4c694a03ac 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -141,15 +141,23 @@ func checkFunc(f *Func) { f.Fatalf("bad int32 AuxInt value for %v", v) } canHaveAuxInt = true - case auxInt64, auxFloat64, auxARM64BitField: + case auxInt64, auxARM64BitField: canHaveAuxInt = true case auxInt128: // AuxInt must be zero, so leave canHaveAuxInt set to false. case auxFloat32: canHaveAuxInt = true + if math.IsNaN(v.AuxFloat()) { + f.Fatalf("value %v has an AuxInt that encodes a NaN", v) + } if !isExactFloat32(v.AuxFloat()) { f.Fatalf("value %v has an AuxInt value that is not an exact float32", v) } + case auxFloat64: + canHaveAuxInt = true + if math.IsNaN(v.AuxFloat()) { + f.Fatalf("value %v has an AuxInt that encodes a NaN", v) + } case auxString, auxSym, auxTyp, auxArchSpecific: canHaveAux = true case auxSymOff, auxSymValAndOff, auxTypSize: diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index f2b2b9b898..c53ec0fde1 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -80,7 +80,7 @@ // Constant folding (FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))]) -(FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) +(FSQRT (FMOVDconst [x])) && auxTo64F(x) >= 0 -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) (FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))]) (FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))]) (FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))]) diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules index cdcbc28c30..bf2b904baf 100644 --- a/src/cmd/compile/internal/ssa/gen/Wasm.rules +++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules @@ -357,7 +357,7 @@ (I64Or (I64Const [x]) (I64Const [y])) -> (I64Const [x | y]) (I64Xor (I64Const [x]) (I64Const [y])) -> (I64Const [x ^ y]) (F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))]) -(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))]) +(F64Mul (F64Const [x]) (F64Const [y])) && !math.IsNaN(auxTo64F(x) * auxTo64F(y)) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))]) (I64Eq (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [1]) (I64Eq (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [0]) (I64Ne (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [0]) @@ -367,15 +367,16 @@ (I64ShrU (I64Const [x]) (I64Const [y])) -> (I64Const [int64(uint64(x) >> uint64(y))]) (I64ShrS (I64Const [x]) (I64Const [y])) -> (I64Const [x >> uint64(y)]) -(I64Add (I64Const [x]) y) -> (I64Add y (I64Const [x])) -(I64Mul (I64Const [x]) y) -> (I64Mul y (I64Const [x])) -(I64And (I64Const [x]) y) -> (I64And y (I64Const [x])) -(I64Or (I64Const [x]) y) -> (I64Or y (I64Const [x])) -(I64Xor (I64Const [x]) y) -> (I64Xor y (I64Const [x])) -(F64Add (F64Const [x]) y) -> (F64Add y (F64Const [x])) -(F64Mul (F64Const [x]) y) -> (F64Mul y (F64Const [x])) -(I64Eq (I64Const [x]) y) -> (I64Eq y (I64Const [x])) -(I64Ne (I64Const [x]) y) -> (I64Ne y (I64Const [x])) +// TODO: declare these operations as commutative and get rid of these rules? +(I64Add (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Add y (I64Const [x])) +(I64Mul (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Mul y (I64Const [x])) +(I64And (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64And y (I64Const [x])) +(I64Or (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Or y (I64Const [x])) +(I64Xor (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Xor y (I64Const [x])) +(F64Add (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Add y (F64Const [x])) +(F64Mul (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Mul y (F64Const [x])) +(I64Eq (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Eq y (I64Const [x])) +(I64Ne (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Ne y (I64Const [x])) (I64Eq x (I64Const [0])) -> (I64Eqz x) (I64Ne x (I64Const [0])) -> (I64Eqz (I64Eqz x)) diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index bc16f5a7af..8ec22d86e7 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -119,8 +119,8 @@ (Mul16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c*d))]) (Mul32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c*d))]) (Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d]) -(Mul32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) -(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) +(Mul32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) +(Mul64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) * auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) (And8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c&d))]) (And16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c&d))]) @@ -145,8 +145,8 @@ (Div16u (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(uint16(c)/uint16(d)))]) (Div32u (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(uint32(c)/uint32(d)))]) (Div64u (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c)/uint64(d))]) -(Div32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) -(Div64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) +(Div32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) +(Div64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) / auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) (Select0 (Div128u (Const64 [0]) lo y)) -> (Div64u lo y) (Select1 (Div128u (Const64 [0]) lo y)) -> (Mod64u lo y) @@ -623,8 +623,8 @@ -> x // Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits -(Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) -> (Const64F [x]) -(Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) + (Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) -> (Const64F [x]) + (Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) (Load p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) -> (Const64 [x]) (Load p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(auxTo32F(x))))]) @@ -1893,7 +1893,7 @@ (Div32F x (Const32F [c])) && reciprocalExact32(auxTo32F(c)) -> (Mul32F x (Const32F [auxFrom32F(1/auxTo32F(c))])) (Div64F x (Const64F [c])) && reciprocalExact64(auxTo64F(c)) -> (Mul64F x (Const64F [auxFrom64F(1/auxTo64F(c))])) -(Sqrt (Const64F [c])) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) +(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(auxTo64F(c))) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) // recognize runtime.newobject and don't Zero/Nilcheck it (Zero (Load (OffPtr [c] (SP)) mem) mem) diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index 54c6968c5b..b7e91a1f20 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -323,7 +323,12 @@ var genericOps = []opData{ {name: "Const32", aux: "Int32"}, // auxint is sign-extended 32 bits // Note: ConstX are sign-extended even when the type of the value is unsigned. // For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa. - {name: "Const64", aux: "Int64"}, // value is auxint + {name: "Const64", aux: "Int64"}, // value is auxint + // Note: for both Const32F and Const64F, we disallow encoding NaNs. + // Signaling NaNs are tricky because if you do anything with them, they become quiet. + // Particularly, converting a 32 bit sNaN to 64 bit and back converts it to a qNaN. + // See issue 36399 and 36400. + // Encodings of +inf, -inf, and -0 are fine. {name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32 {name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint)) {name: "ConstInterface"}, // nil interface diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index fcbb76cf34..238e243096 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -487,11 +487,17 @@ func DivisionNeedsFixUp(v *Value) bool { // auxFrom64F encodes a float64 value so it can be stored in an AuxInt. func auxFrom64F(f float64) int64 { + if f != f { + panic("can't encode a NaN in AuxInt field") + } return int64(math.Float64bits(f)) } // auxFrom32F encodes a float32 value so it can be stored in an AuxInt. func auxFrom32F(f float32) int64 { + if f != f { + panic("can't encode a NaN in AuxInt field") + } return int64(math.Float64bits(extend32Fto64F(f))) } diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index fe15e71a3e..0094ba1b74 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -5191,12 +5191,16 @@ func rewriteValuePPC64_OpPPC64FNEG(v *Value) bool { func rewriteValuePPC64_OpPPC64FSQRT(v *Value) bool { v_0 := v.Args[0] // match: (FSQRT (FMOVDconst [x])) + // cond: auxTo64F(x) >= 0 // result: (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) for { if v_0.Op != OpPPC64FMOVDconst { break } x := v_0.AuxInt + if !(auxTo64F(x) >= 0) { + break + } v.reset(OpPPC64FMOVDconst) v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(x))) return true diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index 2c7add4996..be1b51e7aa 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -3,6 +3,7 @@ package ssa +import "math" import "cmd/internal/objabi" import "cmd/compile/internal/types" @@ -3467,6 +3468,7 @@ func rewriteValueWasm_OpWasmF64Add(v *Value) bool { return true } // match: (F64Add (F64Const [x]) y) + // cond: y.Op != OpWasmF64Const // result: (F64Add y (F64Const [x])) for { if v_0.Op != OpWasmF64Const { @@ -3474,6 +3476,9 @@ func rewriteValueWasm_OpWasmF64Add(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmF64Const) { + break + } v.reset(OpWasmF64Add) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) v0.AuxInt = x @@ -3488,6 +3493,7 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (F64Mul (F64Const [x]) (F64Const [y])) + // cond: !math.IsNaN(auxTo64F(x) * auxTo64F(y)) // result: (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))]) for { if v_0.Op != OpWasmF64Const { @@ -3498,11 +3504,15 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool { break } y := v_1.AuxInt + if !(!math.IsNaN(auxTo64F(x) * auxTo64F(y))) { + break + } v.reset(OpWasmF64Const) v.AuxInt = auxFrom64F(auxTo64F(x) * auxTo64F(y)) return true } // match: (F64Mul (F64Const [x]) y) + // cond: y.Op != OpWasmF64Const // result: (F64Mul y (F64Const [x])) for { if v_0.Op != OpWasmF64Const { @@ -3510,6 +3520,9 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmF64Const) { + break + } v.reset(OpWasmF64Mul) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) v0.AuxInt = x @@ -3539,6 +3552,7 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool { return true } // match: (I64Add (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Add y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -3546,6 +3560,9 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Add) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x @@ -3622,6 +3639,7 @@ func rewriteValueWasm_OpWasmI64And(v *Value) bool { return true } // match: (I64And (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64And y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -3629,6 +3647,9 @@ func rewriteValueWasm_OpWasmI64And(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64And) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x @@ -3681,6 +3702,7 @@ func rewriteValueWasm_OpWasmI64Eq(v *Value) bool { return true } // match: (I64Eq (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Eq y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -3688,6 +3710,9 @@ func rewriteValueWasm_OpWasmI64Eq(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Eq) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x @@ -3993,6 +4018,7 @@ func rewriteValueWasm_OpWasmI64Mul(v *Value) bool { return true } // match: (I64Mul (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Mul y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4000,6 +4026,9 @@ func rewriteValueWasm_OpWasmI64Mul(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Mul) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x @@ -4052,6 +4081,7 @@ func rewriteValueWasm_OpWasmI64Ne(v *Value) bool { return true } // match: (I64Ne (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Ne y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4059,6 +4089,9 @@ func rewriteValueWasm_OpWasmI64Ne(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Ne) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x @@ -4101,6 +4134,7 @@ func rewriteValueWasm_OpWasmI64Or(v *Value) bool { return true } // match: (I64Or (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Or y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4108,6 +4142,9 @@ func rewriteValueWasm_OpWasmI64Or(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Or) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x @@ -4301,6 +4338,7 @@ func rewriteValueWasm_OpWasmI64Xor(v *Value) bool { return true } // match: (I64Xor (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const // result: (I64Xor y (I64Const [x])) for { if v_0.Op != OpWasmI64Const { @@ -4308,6 +4346,9 @@ func rewriteValueWasm_OpWasmI64Xor(v *Value) bool { } x := v_0.AuxInt y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } v.reset(OpWasmI64Xor) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0.AuxInt = x diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 9e743838ab..13873b2ac8 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -4734,6 +4734,7 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Div32F (Const32F [c]) (Const32F [d])) + // cond: !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d))) // result: (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) for { if v_0.Op != OpConst32F { @@ -4744,6 +4745,9 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool { break } d := v_1.AuxInt + if !(!math.IsNaN(float64(auxTo32F(c) / auxTo32F(d)))) { + break + } v.reset(OpConst32F) v.AuxInt = auxFrom32F(auxTo32F(c) / auxTo32F(d)) return true @@ -5171,6 +5175,7 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Div64F (Const64F [c]) (Const64F [d])) + // cond: !math.IsNaN(auxTo64F(c) / auxTo64F(d)) // result: (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) for { if v_0.Op != OpConst64F { @@ -5181,6 +5186,9 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool { break } d := v_1.AuxInt + if !(!math.IsNaN(auxTo64F(c) / auxTo64F(d))) { + break + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(auxTo64F(c) / auxTo64F(d)) return true @@ -10240,7 +10248,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const64 [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) // result: (Const64F [x]) for { t1 := v.Type @@ -10256,7 +10264,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) { break } v.reset(OpConst64F) @@ -10264,7 +10272,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const32 [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) // result: (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) for { t1 := v.Type @@ -10280,7 +10288,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) { break } v.reset(OpConst32F) @@ -13970,6 +13978,7 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Mul32F (Const32F [c]) (Const32F [d])) + // cond: !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d))) // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -13981,6 +13990,9 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { continue } d := v_1.AuxInt + if !(!math.IsNaN(float64(auxTo32F(c) * auxTo32F(d)))) { + continue + } v.reset(OpConst32F) v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d)) return true @@ -14210,6 +14222,7 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Mul64F (Const64F [c]) (Const64F [d])) + // cond: !math.IsNaN(auxTo64F(c) * auxTo64F(d)) // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -14221,6 +14234,9 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { continue } d := v_1.AuxInt + if !(!math.IsNaN(auxTo64F(c) * auxTo64F(d))) { + continue + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d)) return true @@ -20966,12 +20982,16 @@ func rewriteValuegeneric_OpSlicemask(v *Value) bool { func rewriteValuegeneric_OpSqrt(v *Value) bool { v_0 := v.Args[0] // match: (Sqrt (Const64F [c])) + // cond: !math.IsNaN(math.Sqrt(auxTo64F(c))) // result: (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) for { if v_0.Op != OpConst64F { break } c := v_0.AuxInt + if !(!math.IsNaN(math.Sqrt(auxTo64F(c)))) { + break + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(c))) return true diff --git a/test/codegen/math.go b/test/codegen/math.go index 80e5d60d96..1ebfda0405 100644 --- a/test/codegen/math.go +++ b/test/codegen/math.go @@ -151,13 +151,13 @@ func toFloat32(u32 uint32) float32 { func constantCheck64() bool { // amd64:"MOVB\t[$]0",-"FCMP",-"MOVB\t[$]1" // s390x:"MOV(B|BZ|D)\t[$]0,",-"FCMPU",-"MOV(B|BZ|D)\t[$]1," - return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63)) || math.NaN() == math.NaN() + return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63)) } func constantCheck32() bool { // amd64:"MOVB\t[$]1",-"FCMP",-"MOVB\t[$]0" // s390x:"MOV(B|BZ|D)\t[$]1,",-"FCMPU",-"MOV(B|BZ|D)\t[$]0," - return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31)) && float32(math.NaN()) != float32(math.NaN()) + return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31)) } // Test that integer constants are converted to floating point constants @@ -186,3 +186,32 @@ func constantConvertInt32(x uint32) uint32 { } return x } + +func nanGenerate64() float64 { + // Test to make sure we don't generate a NaN while constant propagating. + // See issue 36400. + zero := 0.0 + // amd64:-"DIVSD" + inf := 1 / zero // +inf. We can constant propagate this one. + negone := -1.0 + + // amd64:"DIVSD" + z0 := zero / zero + // amd64:"MULSD" + z1 := zero * inf + // amd64:"SQRTSD" + z2 := math.Sqrt(negone) + return z0 + z1 + z2 +} + +func nanGenerate32() float32 { + zero := float32(0.0) + // amd64:-"DIVSS" + inf := 1 / zero // +inf. We can constant propagate this one. + + // amd64:"DIVSS" + z0 := zero / zero + // amd64:"MULSS" + z1 := zero * inf + return z0 + z1 +} -- 2.50.0