From da6556968f3545aa6c4e5a3c3aef151bb9b1d439 Mon Sep 17 00:00:00 2001 From: Wayne Zuo Date: Thu, 11 Aug 2022 00:07:40 +0800 Subject: [PATCH] cmd/compile: simplify bounded shift on riscv64 The prove pass will mark some shifts bounded, and then we can use that information to generate better code on riscv64. Change-Id: Ia22f43d0598453c9417adac7017db28d7240948b Reviewed-on: https://go-review.googlesource.com/c/go/+/422616 TryBot-Result: Gopher Robot Reviewed-by: Joel Sing Run-TryBot: Wayne Zuo Reviewed-by: Keith Randall Reviewed-by: Keith Randall Auto-Submit: Keith Randall Run-TryBot: Keith Randall Reviewed-by: Cherry Mui --- .../compile/internal/ssa/gen/RISCV64.rules | 111 ++- .../compile/internal/ssa/rewriteRISCV64.go | 920 +++++++++++++++++- test/codegen/shift.go | 34 +- 3 files changed, 996 insertions(+), 69 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules index 11506e837e..82a9c83284 100644 --- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules +++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules @@ -172,41 +172,51 @@ // If y < 64, this is the value we want. Otherwise, we want zero. // // So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise. -(Lsh8x8 x y) => (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) -(Lsh8x16 x y) => (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) -(Lsh8x32 x y) => (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) -(Lsh8x64 x y) => (AND (SLL x y) (Neg8 (SLTIU [64] y))) -(Lsh16x8 x y) => (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) -(Lsh16x16 x y) => (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) -(Lsh16x32 x y) => (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) -(Lsh16x64 x y) => (AND (SLL x y) (Neg16 (SLTIU [64] y))) -(Lsh32x8 x y) => (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) -(Lsh32x16 x y) => (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) -(Lsh32x32 x y) => (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) -(Lsh32x64 x y) => (AND (SLL x y) (Neg32 (SLTIU [64] y))) -(Lsh64x8 x y) => (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) -(Lsh64x16 x y) => (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) -(Lsh64x32 x y) => (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) -(Lsh64x64 x y) => (AND (SLL x y) (Neg64 (SLTIU [64] y))) +(Lsh8x8 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh8x16 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh8x32 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh8x64 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg8 (SLTIU [64] y))) +(Lsh16x8 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh16x16 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh16x32 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh16x64 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg16 (SLTIU [64] y))) +(Lsh32x8 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh32x16 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh32x32 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh32x64 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg32 (SLTIU [64] y))) +(Lsh64x8 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh64x16 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh64x32 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh64x64 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg64 (SLTIU [64] y))) + +(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) +(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) +(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) +(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) // SRL only considers the bottom 6 bits of y. If y > 64, the result should // always be 0. See Lsh above for a detailed description. -(Rsh8Ux8 x y) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) -(Rsh8Ux16 x y) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) -(Rsh8Ux32 x y) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) -(Rsh8Ux64 x y) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] y))) -(Rsh16Ux8 x y) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) -(Rsh16Ux16 x y) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) -(Rsh16Ux32 x y) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) -(Rsh16Ux64 x y) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] y))) -(Rsh32Ux8 x y) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) -(Rsh32Ux16 x y) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) -(Rsh32Ux32 x y) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) -(Rsh32Ux64 x y) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] y))) -(Rsh64Ux8 x y) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) -(Rsh64Ux16 x y) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) -(Rsh64Ux32 x y) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) -(Rsh64Ux64 x y) => (AND (SRL x y) (Neg64 (SLTIU [64] y))) +(Rsh8Ux8 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh8Ux16 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh8Ux32 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh8Ux64 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] y))) +(Rsh16Ux8 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh16Ux16 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh16Ux32 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh16Ux64 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] y))) +(Rsh32Ux8 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh32Ux16 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh32Ux32 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh32Ux64 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] y))) +(Rsh64Ux8 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh64Ux16 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh64Ux32 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh64Ux64 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] y))) + +(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y) +(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y) +(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt32to64 x) y) +(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y) // SRA only considers the bottom 6 bits of y. If y > 64, the result should // be either 0 or -1 based on the sign bit. @@ -218,22 +228,27 @@ // // We don't need to sign-extend the OR result, as it will be at minimum 8 bits, // more than the 6 bits SRA cares about. -(Rsh8x8 x y) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) -(Rsh8x16 x y) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) -(Rsh8x32 x y) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) -(Rsh8x64 x y) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) -(Rsh16x8 x y) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) -(Rsh16x16 x y) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) -(Rsh16x32 x y) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) -(Rsh16x64 x y) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) -(Rsh32x8 x y) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) -(Rsh32x16 x y) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) -(Rsh32x32 x y) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) -(Rsh32x64 x y) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) -(Rsh64x8 x y) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) -(Rsh64x16 x y) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) -(Rsh64x32 x y) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) -(Rsh64x64 x y) => (SRA x (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh8x8 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh8x16 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh8x32 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh8x64 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh16x8 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh16x16 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh16x32 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh16x64 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh32x8 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh32x16 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh32x32 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh32x64 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh64x8 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh64x16 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh64x32 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh64x64 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] y)))) + +(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y) +(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y) +(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt32to64 x) y) +(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y) // Rotates. (RotateLeft8 x (MOVDconst [c])) => (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index 70eca6c513..a46664fc90 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -1607,11 +1607,15 @@ func rewriteValueRISCV64_OpLsh16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1625,6 +1629,20 @@ func rewriteValueRISCV64_OpLsh16x16(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh16x32(v *Value) bool { v_1 := v.Args[1] @@ -1632,11 +1650,15 @@ func rewriteValueRISCV64_OpLsh16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1650,17 +1672,35 @@ func rewriteValueRISCV64_OpLsh16x32(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg16 (SLTIU [64] y))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1672,6 +1712,20 @@ func rewriteValueRISCV64_OpLsh16x64(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh16x8(v *Value) bool { v_1 := v.Args[1] @@ -1679,11 +1733,15 @@ func rewriteValueRISCV64_OpLsh16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1697,6 +1755,20 @@ func rewriteValueRISCV64_OpLsh16x8(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh32x16(v *Value) bool { v_1 := v.Args[1] @@ -1704,11 +1776,15 @@ func rewriteValueRISCV64_OpLsh32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1722,6 +1798,20 @@ func rewriteValueRISCV64_OpLsh32x16(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh32x32(v *Value) bool { v_1 := v.Args[1] @@ -1729,11 +1819,15 @@ func rewriteValueRISCV64_OpLsh32x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1747,17 +1841,35 @@ func rewriteValueRISCV64_OpLsh32x32(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg32 (SLTIU [64] y))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1769,6 +1881,20 @@ func rewriteValueRISCV64_OpLsh32x64(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh32x8(v *Value) bool { v_1 := v.Args[1] @@ -1776,11 +1902,15 @@ func rewriteValueRISCV64_OpLsh32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1794,6 +1924,20 @@ func rewriteValueRISCV64_OpLsh32x8(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh64x16(v *Value) bool { v_1 := v.Args[1] @@ -1801,11 +1945,15 @@ func rewriteValueRISCV64_OpLsh64x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1819,6 +1967,20 @@ func rewriteValueRISCV64_OpLsh64x16(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh64x32(v *Value) bool { v_1 := v.Args[1] @@ -1826,11 +1988,15 @@ func rewriteValueRISCV64_OpLsh64x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1844,17 +2010,35 @@ func rewriteValueRISCV64_OpLsh64x32(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg64 (SLTIU [64] y))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1866,6 +2050,20 @@ func rewriteValueRISCV64_OpLsh64x64(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh64x8(v *Value) bool { v_1 := v.Args[1] @@ -1873,11 +2071,15 @@ func rewriteValueRISCV64_OpLsh64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1891,6 +2093,20 @@ func rewriteValueRISCV64_OpLsh64x8(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh8x16(v *Value) bool { v_1 := v.Args[1] @@ -1898,11 +2114,15 @@ func rewriteValueRISCV64_OpLsh8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1916,6 +2136,20 @@ func rewriteValueRISCV64_OpLsh8x16(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh8x32(v *Value) bool { v_1 := v.Args[1] @@ -1923,11 +2157,15 @@ func rewriteValueRISCV64_OpLsh8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1941,17 +2179,35 @@ func rewriteValueRISCV64_OpLsh8x32(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg8 (SLTIU [64] y))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1963,6 +2219,20 @@ func rewriteValueRISCV64_OpLsh8x64(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpLsh8x8(v *Value) bool { v_1 := v.Args[1] @@ -1970,11 +2240,15 @@ func rewriteValueRISCV64_OpLsh8x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) v0.AddArg2(x, y) @@ -1988,6 +2262,20 @@ func rewriteValueRISCV64_OpLsh8x8(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpMod16(v *Value) bool { v_1 := v.Args[1] @@ -5418,11 +5706,15 @@ func rewriteValueRISCV64_OpRsh16Ux16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux16 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -5438,6 +5730,22 @@ func rewriteValueRISCV64_OpRsh16Ux16(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool { v_1 := v.Args[1] @@ -5445,11 +5753,15 @@ func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux32 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -5465,6 +5777,22 @@ func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh16Ux64(v *Value) bool { v_1 := v.Args[1] @@ -5472,11 +5800,15 @@ func rewriteValueRISCV64_OpRsh16Ux64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] y))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -5490,6 +5822,22 @@ func rewriteValueRISCV64_OpRsh16Ux64(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh16Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool { v_1 := v.Args[1] @@ -5497,11 +5845,15 @@ func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux8 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -5517,6 +5869,22 @@ func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh16x16(v *Value) bool { v_1 := v.Args[1] @@ -5524,11 +5892,15 @@ func rewriteValueRISCV64_OpRsh16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x16 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) @@ -5546,6 +5918,22 @@ func rewriteValueRISCV64_OpRsh16x16(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh16x32(v *Value) bool { v_1 := v.Args[1] @@ -5553,11 +5941,15 @@ func rewriteValueRISCV64_OpRsh16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x32 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) @@ -5575,6 +5967,22 @@ func rewriteValueRISCV64_OpRsh16x32(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh16x64(v *Value) bool { v_1 := v.Args[1] @@ -5582,11 +5990,15 @@ func rewriteValueRISCV64_OpRsh16x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) @@ -5602,18 +6014,38 @@ func rewriteValueRISCV64_OpRsh16x64(v *Value) bool { v.AddArg2(v0, v1) return true } -} -func rewriteValueRISCV64_OpRsh16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] + // match: (Rsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x8 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) @@ -5631,6 +6063,22 @@ func rewriteValueRISCV64_OpRsh16x8(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool { v_1 := v.Args[1] @@ -5638,11 +6086,15 @@ func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux16 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -5658,6 +6110,22 @@ func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt32to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool { v_1 := v.Args[1] @@ -5665,11 +6133,15 @@ func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux32 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -5685,6 +6157,22 @@ func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt32to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool { v_1 := v.Args[1] @@ -5692,11 +6180,15 @@ func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux64 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] y))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -5710,6 +6202,22 @@ func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh32Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt32to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool { v_1 := v.Args[1] @@ -5717,11 +6225,15 @@ func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux8 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -5737,6 +6249,22 @@ func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt32to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh32x16(v *Value) bool { v_1 := v.Args[1] @@ -5744,11 +6272,15 @@ func rewriteValueRISCV64_OpRsh32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x16 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -5766,6 +6298,22 @@ func rewriteValueRISCV64_OpRsh32x16(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh32x32(v *Value) bool { v_1 := v.Args[1] @@ -5773,11 +6321,15 @@ func rewriteValueRISCV64_OpRsh32x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x32 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -5795,6 +6347,22 @@ func rewriteValueRISCV64_OpRsh32x32(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh32x64(v *Value) bool { v_1 := v.Args[1] @@ -5802,11 +6370,15 @@ func rewriteValueRISCV64_OpRsh32x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x64 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -5822,6 +6394,22 @@ func rewriteValueRISCV64_OpRsh32x64(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh32x8(v *Value) bool { v_1 := v.Args[1] @@ -5829,11 +6417,15 @@ func rewriteValueRISCV64_OpRsh32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x8 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -5851,6 +6443,22 @@ func rewriteValueRISCV64_OpRsh32x8(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool { v_1 := v.Args[1] @@ -5858,11 +6466,15 @@ func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux16 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v0.AddArg2(x, y) @@ -5876,6 +6488,20 @@ func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh64Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool { v_1 := v.Args[1] @@ -5883,11 +6509,15 @@ func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux32 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v0.AddArg2(x, y) @@ -5901,17 +6531,35 @@ func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh64Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh64Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux64 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL x y) (Neg64 (SLTIU [64] y))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v0.AddArg2(x, y) @@ -5923,6 +6571,20 @@ func rewriteValueRISCV64_OpRsh64Ux64(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh64Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool { v_1 := v.Args[1] @@ -5930,11 +6592,15 @@ func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux8 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v0.AddArg2(x, y) @@ -5948,6 +6614,20 @@ func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh64Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh64x16(v *Value) bool { v_1 := v.Args[1] @@ -5955,11 +6635,15 @@ func rewriteValueRISCV64_OpRsh64x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x16 x y) + // cond: !shiftIsBounded(v) // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) @@ -5975,6 +6659,20 @@ func rewriteValueRISCV64_OpRsh64x16(v *Value) bool { v.AddArg2(x, v0) return true } + // match: (Rsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh64x32(v *Value) bool { v_1 := v.Args[1] @@ -5982,11 +6680,15 @@ func rewriteValueRISCV64_OpRsh64x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x32 x y) + // cond: !shiftIsBounded(v) // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) @@ -6002,17 +6704,35 @@ func rewriteValueRISCV64_OpRsh64x32(v *Value) bool { v.AddArg2(x, v0) return true } + // match: (Rsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh64x64 x y) + // cond: !shiftIsBounded(v) // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) @@ -6026,6 +6746,20 @@ func rewriteValueRISCV64_OpRsh64x64(v *Value) bool { v.AddArg2(x, v0) return true } + // match: (Rsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh64x8(v *Value) bool { v_1 := v.Args[1] @@ -6033,11 +6767,15 @@ func rewriteValueRISCV64_OpRsh64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x8 x y) + // cond: !shiftIsBounded(v) // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) @@ -6053,6 +6791,20 @@ func rewriteValueRISCV64_OpRsh64x8(v *Value) bool { v.AddArg2(x, v0) return true } + // match: (Rsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.AddArg2(x, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool { v_1 := v.Args[1] @@ -6060,11 +6812,15 @@ func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux16 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -6080,6 +6836,22 @@ func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool { v_1 := v.Args[1] @@ -6087,11 +6859,15 @@ func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux32 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -6107,6 +6883,22 @@ func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh8Ux64(v *Value) bool { v_1 := v.Args[1] @@ -6114,11 +6906,15 @@ func rewriteValueRISCV64_OpRsh8Ux64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] y))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -6132,6 +6928,22 @@ func rewriteValueRISCV64_OpRsh8Ux64(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh8Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool { v_1 := v.Args[1] @@ -6139,11 +6951,15 @@ func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux8 x y) + // cond: !shiftIsBounded(v) // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64AND) v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -6159,6 +6975,22 @@ func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool { v.AddArg2(v0, v2) return true } + // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh8x16(v *Value) bool { v_1 := v.Args[1] @@ -6166,11 +6998,15 @@ func rewriteValueRISCV64_OpRsh8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x16 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) @@ -6188,6 +7024,22 @@ func rewriteValueRISCV64_OpRsh8x16(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh8x32(v *Value) bool { v_1 := v.Args[1] @@ -6195,11 +7047,15 @@ func rewriteValueRISCV64_OpRsh8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x32 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) @@ -6217,6 +7073,22 @@ func rewriteValueRISCV64_OpRsh8x32(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh8x64(v *Value) bool { v_1 := v.Args[1] @@ -6224,11 +7096,15 @@ func rewriteValueRISCV64_OpRsh8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) @@ -6244,6 +7120,22 @@ func rewriteValueRISCV64_OpRsh8x64(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpRsh8x8(v *Value) bool { v_1 := v.Args[1] @@ -6251,11 +7143,15 @@ func rewriteValueRISCV64_OpRsh8x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x8 x y) + // cond: !shiftIsBounded(v) // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(OpRISCV64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) @@ -6273,6 +7169,22 @@ func rewriteValueRISCV64_OpRsh8x8(v *Value) bool { v.AddArg2(v0, v1) return true } + // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false } func rewriteValueRISCV64_OpSelect0(v *Value) bool { v_0 := v.Args[0] diff --git a/test/codegen/shift.go b/test/codegen/shift.go index 66a5c62391..293924a3db 100644 --- a/test/codegen/shift.go +++ b/test/codegen/shift.go @@ -80,7 +80,7 @@ func rshConst64x32(v int64) int64 { func lshMask64x64(v int64, s uint64) int64 { // ppc64:"ANDCC",-"ORN",-"ISEL" // ppc64le:"ANDCC",-"ORN",-"ISEL" - // riscv64:"SLL","AND","SLTIU" + // riscv64:"SLL",-"AND\t",-"SLTIU" // s390x:-"RISBGZ",-"AND",-"LOCGR" return v << (s & 63) } @@ -88,7 +88,7 @@ func lshMask64x64(v int64, s uint64) int64 { func rshMask64Ux64(v uint64, s uint64) uint64 { // ppc64:"ANDCC",-"ORN",-"ISEL" // ppc64le:"ANDCC",-"ORN",-"ISEL" - // riscv64:"SRL","AND","SLTIU" + // riscv64:"SRL",-"AND\t",-"SLTIU" // s390x:-"RISBGZ",-"AND",-"LOCGR" return v >> (s & 63) } @@ -96,7 +96,7 @@ func rshMask64Ux64(v uint64, s uint64) uint64 { func rshMask64x64(v int64, s uint64) int64 { // ppc64:"ANDCC",-"ORN",-"ISEL" // ppc64le:"ANDCC",-ORN",-"ISEL" - // riscv64:"SRA","OR","SLTIU" + // riscv64:"SRA",-"OR",-"SLTIU" // s390x:-"RISBGZ",-"AND",-"LOCGR" return v >> (s & 63) } @@ -128,7 +128,7 @@ func rshMask32x64(v int32, s uint64) int32 { func lshMask64x32(v int64, s uint32) int64 { // ppc64:"ANDCC",-"ORN" // ppc64le:"ANDCC",-"ORN" - // riscv64:"SLL","AND","SLTIU" + // riscv64:"SLL",-"AND\t",-"SLTIU" // s390x:-"RISBGZ",-"AND",-"LOCGR" return v << (s & 63) } @@ -136,7 +136,7 @@ func lshMask64x32(v int64, s uint32) int64 { func rshMask64Ux32(v uint64, s uint32) uint64 { // ppc64:"ANDCC",-"ORN" // ppc64le:"ANDCC",-"ORN" - // riscv64:"SRL","AND","SLTIU" + // riscv64:"SRL",-"AND\t",-"SLTIU" // s390x:-"RISBGZ",-"AND",-"LOCGR" return v >> (s & 63) } @@ -144,7 +144,7 @@ func rshMask64Ux32(v uint64, s uint32) uint64 { func rshMask64x32(v int64, s uint32) int64 { // ppc64:"ANDCC",-"ORN",-"ISEL" // ppc64le:"ANDCC",-"ORN",-"ISEL" - // riscv64:"SRA","OR","SLTIU" + // riscv64:"SRA",-"OR",-"SLTIU" // s390x:-"RISBGZ",-"AND",-"LOCGR" return v >> (s & 63) } @@ -207,7 +207,7 @@ func lshSignedMasked(v8 int8, v16 int16, v32 int32, v64 int64, x int) { func lshGuarded64(v int64, s uint) int64 { if s < 64 { - // riscv64:"SLL","AND","SLTIU" + // riscv64:"SLL",-"AND",-"SLTIU" // s390x:-"RISBGZ",-"AND",-"LOCGR" // wasm:-"Select",-".*LtU" return v << s @@ -217,7 +217,7 @@ func lshGuarded64(v int64, s uint) int64 { func rshGuarded64U(v uint64, s uint) uint64 { if s < 64 { - // riscv64:"SRL","AND","SLTIU" + // riscv64:"SRL",-"AND",-"SLTIU" // s390x:-"RISBGZ",-"AND",-"LOCGR" // wasm:-"Select",-".*LtU" return v >> s @@ -227,7 +227,7 @@ func rshGuarded64U(v uint64, s uint) uint64 { func rshGuarded64(v int64, s uint) int64 { if s < 64 { - // riscv64:"SRA","OR","SLTIU" + // riscv64:"SRA",-"OR",-"SLTIU" // s390x:-"RISBGZ",-"AND",-"LOCGR" // wasm:-"Select",-".*LtU" return v >> s @@ -357,17 +357,17 @@ func checkShiftAndMask32(v []uint32) { } func checkMergedShifts32(a [256]uint32, b [256]uint64, u uint32, v uint32) { - //ppc64le: -"CLRLSLDI", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]29, R[0-9]+" - //ppc64: -"CLRLSLDI", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]29, R[0-9]+" + // ppc64le: -"CLRLSLDI", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]29, R[0-9]+" + // ppc64: -"CLRLSLDI", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]29, R[0-9]+" a[0] = a[uint8(v>>24)] - //ppc64le: -"CLRLSLDI", "RLWNM\t[$]11, R[0-9]+, [$]21, [$]28, R[0-9]+" - //ppc64: -"CLRLSLDI", "RLWNM\t[$]11, R[0-9]+, [$]21, [$]28, R[0-9]+" + // ppc64le: -"CLRLSLDI", "RLWNM\t[$]11, R[0-9]+, [$]21, [$]28, R[0-9]+" + // ppc64: -"CLRLSLDI", "RLWNM\t[$]11, R[0-9]+, [$]21, [$]28, R[0-9]+" b[0] = b[uint8(v>>24)] - //ppc64le: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+" - //ppc64: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+" + // ppc64le: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+" + // ppc64: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+" b[1] = b[(v>>20)&0xFF] - //ppc64le: -"SLD", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]28, R[0-9]+" - //ppc64: -"SLD", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]28, R[0-9]+" + // ppc64le: -"SLD", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]28, R[0-9]+" + // ppc64: -"SLD", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]28, R[0-9]+" b[2] = b[v>>25] } -- 2.50.0