// SRL only considers the bottom 6 bits of y, similarly SRLW only considers the
// bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds
// the maximum value. See Lsh above for a detailed description.
-(Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
-(Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
-(Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
-(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
-(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
-(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [32] y)))
-(Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
-
-(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y)
-(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y)
-(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt32to64 x) y)
-(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
+(Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
+(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
+(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
+(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] y)))
+(Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
// SRA only considers the bottom 6 bits of y, similarly SRAW only considers the
// bottom 5 bits. If y is greater than the maximum value (either 63 or 31
//
// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
// more than the 5 or 6 bits SRAW and SRA care about.
-(Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
-(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
-(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
-(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
-(Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-
-(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y)
-(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y)
-(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt32to64 x) y)
-(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
+(Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
+(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
+(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
+(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
+(Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
// Rotates.
(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
// Avoid unnecessary zero and sign extension when right shifting.
-(SRL <t> (MOVWUreg x) y) => (SRLW <t> x y)
-(SRLI <t> [x] (MOVWUreg y)) => (SRLIW <t> [int64(x&31)] y)
-(SRA <t> (MOVWreg x) y) => (SRAW <t> x y)
-(SRAI <t> [x] (MOVWreg y)) => (SRAIW <t> [int64(x&31)] y)
+(SRAI <t> [x] (MOVWreg y)) && x >= 0 && x <= 31 => (SRAIW <t> [int64(x)] y)
+(SRLI <t> [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW <t> [int64(x)] y)
+
+// Replace right shifts that exceed size of signed type.
+(SRAI <t> [x] (MOVBreg y)) && x >= 8 => (SRAI [63] (SLLI <t> [56] y))
+(SRAI <t> [x] (MOVHreg y)) && x >= 16 => (SRAI [63] (SLLI <t> [48] y))
+(SRAI <t> [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y)
+
+// Eliminate right shifts that exceed size of unsigned type.
+(SRLI <t> [x] (MOVBUreg y)) && x >= 8 => (MOVDconst <t> [0])
+(SRLI <t> [x] (MOVHUreg y)) && x >= 16 => (MOVDconst <t> [0])
+(SRLI <t> [x] (MOVWUreg y)) && x >= 32 => (MOVDconst <t> [0])
// Fold constant into immediate instructions where possible.
(ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (SRA <t> (MOVWreg x) y)
- // result: (SRAW <t> x y)
- for {
- t := v.Type
- if v_0.Op != OpRISCV64MOVWreg {
- break
- }
- x := v_0.Args[0]
- y := v_1
- v.reset(OpRISCV64SRAW)
- v.Type = t
- v.AddArg2(x, y)
- return true
- }
// match: (SRA x (MOVDconst [val]))
// result: (SRAI [int64(val&63)] x)
for {
}
func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
// match: (SRAI <t> [x] (MOVWreg y))
- // result: (SRAIW <t> [int64(x&31)] y)
+ // cond: x >= 0 && x <= 31
+ // result: (SRAIW <t> [int64(x)] y)
for {
t := v.Type
x := auxIntToInt64(v.AuxInt)
break
}
y := v_0.Args[0]
+ if !(x >= 0 && x <= 31) {
+ break
+ }
v.reset(OpRISCV64SRAIW)
v.Type = t
- v.AuxInt = int64ToAuxInt(int64(x & 31))
+ v.AuxInt = int64ToAuxInt(int64(x))
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRAI <t> [x] (MOVBreg y))
+ // cond: x >= 8
+ // result: (SRAI [63] (SLLI <t> [56] y))
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVBreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 8) {
+ break
+ }
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = int64ToAuxInt(56)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRAI <t> [x] (MOVHreg y))
+ // cond: x >= 16
+ // result: (SRAI [63] (SLLI <t> [48] y))
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVHreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 16) {
+ break
+ }
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = int64ToAuxInt(48)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRAI <t> [x] (MOVWreg y))
+ // cond: x >= 32
+ // result: (SRAIW [31] y)
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 32) {
+ break
+ }
+ v.reset(OpRISCV64SRAIW)
+ v.AuxInt = int64ToAuxInt(31)
v.AddArg(y)
return true
}
func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (SRL <t> (MOVWUreg x) y)
- // result: (SRLW <t> x y)
- for {
- t := v.Type
- if v_0.Op != OpRISCV64MOVWUreg {
- break
- }
- x := v_0.Args[0]
- y := v_1
- v.reset(OpRISCV64SRLW)
- v.Type = t
- v.AddArg2(x, y)
- return true
- }
// match: (SRL x (MOVDconst [val]))
// result: (SRLI [int64(val&63)] x)
for {
func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool {
v_0 := v.Args[0]
// match: (SRLI <t> [x] (MOVWUreg y))
- // result: (SRLIW <t> [int64(x&31)] y)
+ // cond: x >= 0 && x <= 31
+ // result: (SRLIW <t> [int64(x)] y)
for {
t := v.Type
x := auxIntToInt64(v.AuxInt)
break
}
y := v_0.Args[0]
+ if !(x >= 0 && x <= 31) {
+ break
+ }
v.reset(OpRISCV64SRLIW)
v.Type = t
- v.AuxInt = int64ToAuxInt(int64(x & 31))
+ v.AuxInt = int64ToAuxInt(int64(x))
v.AddArg(y)
return true
}
+ // match: (SRLI <t> [x] (MOVBUreg y))
+ // cond: x >= 8
+ // result: (MOVDconst <t> [0])
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ if !(x >= 8) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLI <t> [x] (MOVHUreg y))
+ // cond: x >= 16
+ // result: (MOVDconst <t> [0])
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ if !(x >= 16) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLI <t> [x] (MOVWUreg y))
+ // cond: x >= 32
+ // result: (MOVDconst <t> [0])
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ if !(x >= 32) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
// match: (SRLI [x] (MOVDconst [y]))
// result: (MOVDconst [int64(uint64(y) >> uint32(x))])
for {
typ := &b.Func.Config.Types
// match: (Rsh32Ux16 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
for {
t := v.Type
x := v_0
break
}
v.reset(OpRISCV64AND)
- v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1.AddArg(x)
- v0.AddArg2(v1, y)
- v2 := b.NewValue0(v.Pos, OpNeg32, t)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = int64ToAuxInt(32)
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v.AddArg2(v0, v2)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux16 x y)
// cond: shiftIsBounded(v)
- // result: (SRL (ZeroExt32to64 x) y)
+ // result: (SRLW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
return true
}
return false
typ := &b.Func.Config.Types
// match: (Rsh32Ux32 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
for {
t := v.Type
x := v_0
break
}
v.reset(OpRISCV64AND)
- v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1.AddArg(x)
- v0.AddArg2(v1, y)
- v2 := b.NewValue0(v.Pos, OpNeg32, t)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = int64ToAuxInt(32)
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v.AddArg2(v0, v2)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux32 x y)
// cond: shiftIsBounded(v)
- // result: (SRL (ZeroExt32to64 x) y)
+ // result: (SRLW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
return true
}
return false
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- typ := &b.Func.Config.Types
// match: (Rsh32Ux64 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [32] y)))
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] y)))
for {
t := v.Type
x := v_0
break
}
v.reset(OpRISCV64AND)
- v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1.AddArg(x)
- v0.AddArg2(v1, y)
- v2 := b.NewValue0(v.Pos, OpNeg32, t)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = int64ToAuxInt(32)
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg2(v0, v2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux64 x y)
// cond: shiftIsBounded(v)
- // result: (SRL (ZeroExt32to64 x) y)
+ // result: (SRLW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
return true
}
return false
typ := &b.Func.Config.Types
// match: (Rsh32Ux8 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
for {
t := v.Type
x := v_0
break
}
v.reset(OpRISCV64AND)
- v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1.AddArg(x)
- v0.AddArg2(v1, y)
- v2 := b.NewValue0(v.Pos, OpNeg32, t)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = int64ToAuxInt(32)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v.AddArg2(v0, v2)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
return true
}
// match: (Rsh32Ux8 x y)
// cond: shiftIsBounded(v)
- // result: (SRL (ZeroExt32to64 x) y)
+ // result: (SRLW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
return true
}
return false
typ := &b.Func.Config.Types
// match: (Rsh32x16 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
for {
t := v.Type
x := v_0
if !(!shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
+ v.reset(OpRISCV64SRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
- v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = int64ToAuxInt(-1)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = int64ToAuxInt(32)
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v1.AddArg2(y, v2)
- v.AddArg2(v0, v1)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x16 x y)
// cond: shiftIsBounded(v)
- // result: (SRA (SignExt32to64 x) y)
+ // result: (SRAW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
return true
}
return false
typ := &b.Func.Config.Types
// match: (Rsh32x32 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
for {
t := v.Type
x := v_0
if !(!shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
+ v.reset(OpRISCV64SRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
- v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = int64ToAuxInt(-1)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = int64ToAuxInt(32)
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v1.AddArg2(y, v2)
- v.AddArg2(v0, v1)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x32 x y)
// cond: shiftIsBounded(v)
- // result: (SRA (SignExt32to64 x) y)
+ // result: (SRAW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
return true
}
return false
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- typ := &b.Func.Config.Types
// match: (Rsh32x64 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
for {
t := v.Type
x := v_0
if !(!shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
+ v.reset(OpRISCV64SRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
- v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = int64ToAuxInt(-1)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = int64ToAuxInt(32)
- v3.AddArg(y)
- v2.AddArg(v3)
- v1.AddArg2(y, v2)
- v.AddArg2(v0, v1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x64 x y)
// cond: shiftIsBounded(v)
- // result: (SRA (SignExt32to64 x) y)
+ // result: (SRAW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
return true
}
return false
typ := &b.Func.Config.Types
// match: (Rsh32x8 <t> x y)
// cond: !shiftIsBounded(v)
- // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
for {
t := v.Type
x := v_0
if !(!shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
+ v.reset(OpRISCV64SRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
- v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = int64ToAuxInt(-1)
- v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = int64ToAuxInt(32)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
- v4.AddArg(y)
- v3.AddArg(v4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
v2.AddArg(v3)
- v1.AddArg2(y, v2)
- v.AddArg2(v0, v1)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
return true
}
// match: (Rsh32x8 x y)
// cond: shiftIsBounded(v)
- // result: (SRA (SignExt32to64 x) y)
+ // result: (SRAW x y)
for {
x := v_0
y := v_1
if !(shiftIsBounded(v)) {
break
}
- v.reset(OpRISCV64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
- v0.AddArg(x)
- v.AddArg2(v0, y)
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
return true
}
return false