ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVXS, ssa.OpRISCV64FMVDX, ssa.OpRISCV64FMVXD,
ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD,
+ ssa.OpRISCV64FCLASSS, ssa.OpRISCV64FCLASSD,
ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW, ssa.OpRISCV64CLZ, ssa.OpRISCV64CLZW, ssa.OpRISCV64CTZ, ssa.OpRISCV64CTZW,
ssa.OpRISCV64REV8, ssa.OpRISCV64CPOP, ssa.OpRISCV64CPOPW:
p := s.Prog(v.Op.Asm())
(F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
+// Test for -∞ (bit 0) using 64 bit classify instruction.
+(FLTD x (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))]))) => (ANDI [1] (FCLASSD x))
+(FLED (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))])) x) => (SNEZ (ANDI <typ.Int64> [0xff &^ 1] (FCLASSD x)))
+(FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))]))) => (ANDI [1] (FCLASSD x))
+(FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))]))) => (SEQZ (ANDI <typ.Int64> [1] (FCLASSD x)))
+
+// Test for +∞ (bit 7) using 64 bit classify instruction.
+(FLTD (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))])) x) => (SNEZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
+(FLED x (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))]))) => (SNEZ (ANDI <typ.Int64> [0xff &^ (1<<7)] (FCLASSD x)))
+(FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) => (SNEZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
+(FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) => (SEQZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
+
//
// Optimisations for rva22u64 and above.
//
{name: "FLED", argLength: 2, reg: fp2gp, asm: "FLED"}, // arg0 <= arg1
{name: "LoweredFMIND", argLength: 2, reg: fp21, resultNotInArgs: true, asm: "FMIND", commutative: true, typ: "Float64"}, // min(arg0, arg1)
{name: "LoweredFMAXD", argLength: 2, reg: fp21, resultNotInArgs: true, asm: "FMAXD", commutative: true, typ: "Float64"}, // max(arg0, arg1)
+
+ // Floating point classify (in the F and D extensions).
+ //
+ // The FCLASS instructions will always set exactly one bit in the output
+ // register, all other bits will be cleared.
+ //
+ // Bit | Class
+ // ====+=============================
+ // 0 | -∞
+ // 1 | a negative normal number
+ // 2 | a negative subnormal number
+ // 3 | -0
+ // 4 | +0
+ // 5 | a positive subnormal number
+ // 6 | a positive normal number
+ // 7 | +∞
+ // 8 | qNaN
+ // 9 | sNaN
+ // ====+=============================
+ {name: "FCLASSS", argLength: 1, reg: fpgp, asm: "FCLASSS", typ: "Int64"}, // classify float32
+ {name: "FCLASSD", argLength: 1, reg: fpgp, asm: "FCLASSD", typ: "Int64"}, // classify float64
}
RISCV64blocks := []blockData{
OpRISCV64FLED
OpRISCV64LoweredFMIND
OpRISCV64LoweredFMAXD
+ OpRISCV64FCLASSS
+ OpRISCV64FCLASSD
OpS390XFADDS
OpS390XFADD
},
},
},
+ {
+ name: "FCLASSS",
+ argLen: 1,
+ asm: riscv.AFCLASSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCLASSD",
+ argLen: 1,
+ asm: riscv.AFCLASSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
{
name: "FADDS",
return rewriteValueRISCV64_OpRISCV64FADDD(v)
case OpRISCV64FADDS:
return rewriteValueRISCV64_OpRISCV64FADDS(v)
+ case OpRISCV64FEQD:
+ return rewriteValueRISCV64_OpRISCV64FEQD(v)
+ case OpRISCV64FLED:
+ return rewriteValueRISCV64_OpRISCV64FLED(v)
+ case OpRISCV64FLTD:
+ return rewriteValueRISCV64_OpRISCV64FLTD(v)
case OpRISCV64FMADDD:
return rewriteValueRISCV64_OpRISCV64FMADDD(v)
case OpRISCV64FMADDS:
return rewriteValueRISCV64_OpRISCV64FMSUBD(v)
case OpRISCV64FMSUBS:
return rewriteValueRISCV64_OpRISCV64FMSUBS(v)
+ case OpRISCV64FNED:
+ return rewriteValueRISCV64_OpRISCV64FNED(v)
case OpRISCV64FNMADDD:
return rewriteValueRISCV64_OpRISCV64FNMADDD(v)
case OpRISCV64FNMADDS:
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FEQD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))])))
+ // result: (ANDI [1] (FCLASSD x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(-1))) {
+ continue
+ }
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))])))
+ // result: (SNEZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(1))) {
+ continue
+ }
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1 << 7)
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FLED(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FLED (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))])) x)
+ // result: (SNEZ (ANDI <typ.Int64> [0xff &^ 1] (FCLASSD x)))
+ for {
+ if v_0.Op != OpRISCV64FMVDX {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != int64(math.Float64bits(-math.MaxFloat64)) {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff &^ 1)
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (FLED x (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))])))
+ // result: (SNEZ (ANDI <typ.Int64> [0xff &^ (1<<7)] (FCLASSD x)))
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.MaxFloat64)) {
+ break
+ }
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff &^ (1 << 7))
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FLTD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FLTD x (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))])))
+ // result: (ANDI [1] (FCLASSD x))
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(-math.MaxFloat64)) {
+ break
+ }
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (FLTD (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))])) x)
+ // result: (SNEZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
+ for {
+ if v_0.Op != OpRISCV64FMVDX {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != int64(math.Float64bits(math.MaxFloat64)) {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1 << 7)
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64FMADDD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FNED(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))])))
+ // result: (SEQZ (ANDI <typ.Int64> [1] (FCLASSD x)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(-1))) {
+ continue
+ }
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))])))
+ // result: (SEQZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(1))) {
+ continue
+ }
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1 << 7)
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64FNMADDD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
}
}
+//go:noinline
+func isPosInf(x float64) bool {
+ return math.IsInf(x, 1)
+}
+
+//go:noinline
+func isPosInfEq(x float64) bool {
+ return x == math.Inf(1)
+}
+
+//go:noinline
+func isPosInfCmp(x float64) bool {
+ return x > math.MaxFloat64
+}
+
+//go:noinline
+func isNotPosInf(x float64) bool {
+ return !math.IsInf(x, 1)
+}
+
+//go:noinline
+func isNotPosInfEq(x float64) bool {
+ return x != math.Inf(1)
+}
+
+//go:noinline
+func isNotPosInfCmp(x float64) bool {
+ return x <= math.MaxFloat64
+}
+
+//go:noinline
+func isNegInf(x float64) bool {
+ return math.IsInf(x, -1)
+}
+
+//go:noinline
+func isNegInfEq(x float64) bool {
+ return x == math.Inf(-1)
+}
+
+//go:noinline
+func isNegInfCmp(x float64) bool {
+ return x < -math.MaxFloat64
+}
+
+//go:noinline
+func isNotNegInf(x float64) bool {
+ return !math.IsInf(x, -1)
+}
+
+//go:noinline
+func isNotNegInfEq(x float64) bool {
+ return x != math.Inf(-1)
+}
+
+//go:noinline
+func isNotNegInfCmp(x float64) bool {
+ return x >= -math.MaxFloat64
+}
+
+func TestInf(t *testing.T) {
+ tests := []struct {
+ value float64
+ isPosInf bool
+ isNegInf bool
+ isNaN bool
+ }{
+ {value: math.Inf(1), isPosInf: true},
+ {value: math.MaxFloat64},
+ {value: math.Inf(-1), isNegInf: true},
+ {value: -math.MaxFloat64},
+ {value: math.NaN(), isNaN: true},
+ }
+
+ check := func(name string, f func(x float64) bool, value float64, want bool) {
+ got := f(value)
+ if got != want {
+ t.Errorf("%v(%g): want %v, got %v", name, value, want, got)
+ }
+ }
+
+ for _, test := range tests {
+ check("isPosInf", isPosInf, test.value, test.isPosInf)
+ check("isPosInfEq", isPosInfEq, test.value, test.isPosInf)
+ check("isPosInfCmp", isPosInfCmp, test.value, test.isPosInf)
+
+ check("isNotPosInf", isNotPosInf, test.value, !test.isPosInf)
+ check("isNotPosInfEq", isNotPosInfEq, test.value, !test.isPosInf)
+ check("isNotPosInfCmp", isNotPosInfCmp, test.value, !test.isPosInf && !test.isNaN)
+
+ check("isNegInf", isNegInf, test.value, test.isNegInf)
+ check("isNegInfEq", isNegInfEq, test.value, test.isNegInf)
+ check("isNegInfCmp", isNegInfCmp, test.value, test.isNegInf)
+
+ check("isNotNegInf", isNotNegInf, test.value, !test.isNegInf)
+ check("isNotNegInfEq", isNotNegInfEq, test.value, !test.isNegInf)
+ check("isNotNegInfCmp", isNotNegInfCmp, test.value, !test.isNegInf && !test.isNaN)
+ }
+}
+
var sinkFloat float64
func BenchmarkMul2(b *testing.B) {
return math.FMA(x, -y, -z)
}
+func isPosInf(x float64) bool {
+ // riscv64:"FCLASSD"
+ return math.IsInf(x, 1)
+}
+
+func isPosInfEq(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x == math.Inf(1)
+}
+
+func isPosInfCmp(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x > math.MaxFloat64
+}
+
+func isNotPosInf(x float64) bool {
+ // riscv64:"FCLASSD"
+ return !math.IsInf(x, 1)
+}
+
+func isNotPosInfEq(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x != math.Inf(1)
+}
+
+func isNotPosInfCmp(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x <= math.MaxFloat64
+}
+
+func isNegInf(x float64) bool {
+ // riscv64:"FCLASSD"
+ return math.IsInf(x, -1)
+}
+
+func isNegInfEq(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x == math.Inf(-1)
+}
+
+func isNegInfCmp(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x < -math.MaxFloat64
+}
+
+func isNotNegInf(x float64) bool {
+ // riscv64:"FCLASSD"
+ return !math.IsInf(x, -1)
+}
+
+func isNotNegInfEq(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x != math.Inf(-1)
+}
+
+func isNotNegInfCmp(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x >= -math.MaxFloat64
+}
+
func fromFloat64(f64 float64) uint64 {
// amd64:"MOVQ\tX.*, [^X].*"
// arm64:"FMOVD\tF.*, R.*"