// If y < 64, this is the value we want. Otherwise, we want zero.
//
// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
-(Lsh8x8 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Lsh8x16 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Lsh8x32 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Lsh8x64 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
-(Lsh16x8 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Lsh16x16 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Lsh16x32 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Lsh16x64 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
-(Lsh32x8 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Lsh32x16 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Lsh32x32 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Lsh32x64 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
-(Lsh64x8 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Lsh64x16 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Lsh64x32 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Lsh64x64 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+(Lsh8x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh8x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh8x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh8x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Lsh16x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh16x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh16x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Lsh32x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh32x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh32x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
+(Lsh64x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh64x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh64x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
// SRL only considers the bottom 6 bits of y. If y > 64, the result should
// always be 0. See Lsh above for a detailed description.
-(Rsh8Ux8 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh8Ux16 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh8Ux32 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh8Ux64 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
-(Rsh16Ux8 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh16Ux16 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh16Ux32 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh16Ux64 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
-(Rsh32Ux8 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh32Ux16 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh32Ux32 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh32Ux64 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
-(Rsh64Ux8 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh64Ux16 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh64Ux32 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh64Ux64 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+(Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
+(Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt32to64 x) y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
// SRA only considers the bottom 6 bits of y. If y > 64, the result should
// be either 0 or -1 based on the sign bit.
//
// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
// more than the 6 bits SRA cares about.
-(Rsh8x8 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh8x16 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh8x32 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh8x64 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh16x8 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh16x16 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh16x32 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh16x64 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh32x8 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh32x16 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh32x32 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh32x64 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh64x8 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh64x16 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh64x32 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh64x64 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt32to64 x) y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
// Rotates.
(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh16x32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh16x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
// match: (Lsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh16x8(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh32x16(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh32x32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh32x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
// match: (Lsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh32x8(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh64x16(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh64x32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh64x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
// match: (Lsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh64x8(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh8x16(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh8x32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh8x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
// match: (Lsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpLsh8x8(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Lsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpMod16(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh16Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh16Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh16Ux64(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh16Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh16Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh16x16(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh16x32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh16x64(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
-}
-func rewriteValueRISCV64_OpRsh16x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh32Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt32to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh32Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt32to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh32Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt32to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh32Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt32to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh32x16(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt32to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh32x32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt32to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh32x64(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt32to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh32x8(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt32to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh64Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh64Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh64Ux64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
// match: (Rsh64Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh64Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh64x16(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v.AddArg2(x, v0)
return true
}
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh64x32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v.AddArg2(x, v0)
return true
}
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh64x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
// match: (Rsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v.AddArg2(x, v0)
return true
}
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh64x8(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v.AddArg2(x, v0)
return true
}
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh8Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh8Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh8Ux64(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh8Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh8Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64AND)
v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh8x16(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh8x32(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh8x64(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpRsh8x8(v *Value) bool {
v_1 := v.Args[1]
b := v.Block
typ := &b.Func.Config.Types
// match: (Rsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpRISCV64SRA)
v.Type = t
v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v.AddArg2(v0, v1)
return true
}
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
}
func rewriteValueRISCV64_OpSelect0(v *Value) bool {
v_0 := v.Args[0]
func lshMask64x64(v int64, s uint64) int64 {
// ppc64:"ANDCC",-"ORN",-"ISEL"
// ppc64le:"ANDCC",-"ORN",-"ISEL"
- // riscv64:"SLL","AND","SLTIU"
+ // riscv64:"SLL",-"AND\t",-"SLTIU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
return v << (s & 63)
}
func rshMask64Ux64(v uint64, s uint64) uint64 {
// ppc64:"ANDCC",-"ORN",-"ISEL"
// ppc64le:"ANDCC",-"ORN",-"ISEL"
- // riscv64:"SRL","AND","SLTIU"
+ // riscv64:"SRL",-"AND\t",-"SLTIU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
return v >> (s & 63)
}
func rshMask64x64(v int64, s uint64) int64 {
// ppc64:"ANDCC",-"ORN",-"ISEL"
// ppc64le:"ANDCC",-ORN",-"ISEL"
- // riscv64:"SRA","OR","SLTIU"
+ // riscv64:"SRA",-"OR",-"SLTIU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
return v >> (s & 63)
}
func lshMask64x32(v int64, s uint32) int64 {
// ppc64:"ANDCC",-"ORN"
// ppc64le:"ANDCC",-"ORN"
- // riscv64:"SLL","AND","SLTIU"
+ // riscv64:"SLL",-"AND\t",-"SLTIU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
return v << (s & 63)
}
func rshMask64Ux32(v uint64, s uint32) uint64 {
// ppc64:"ANDCC",-"ORN"
// ppc64le:"ANDCC",-"ORN"
- // riscv64:"SRL","AND","SLTIU"
+ // riscv64:"SRL",-"AND\t",-"SLTIU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
return v >> (s & 63)
}
func rshMask64x32(v int64, s uint32) int64 {
// ppc64:"ANDCC",-"ORN",-"ISEL"
// ppc64le:"ANDCC",-"ORN",-"ISEL"
- // riscv64:"SRA","OR","SLTIU"
+ // riscv64:"SRA",-"OR",-"SLTIU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
return v >> (s & 63)
}
func lshGuarded64(v int64, s uint) int64 {
if s < 64 {
- // riscv64:"SLL","AND","SLTIU"
+ // riscv64:"SLL",-"AND",-"SLTIU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// wasm:-"Select",-".*LtU"
return v << s
func rshGuarded64U(v uint64, s uint) uint64 {
if s < 64 {
- // riscv64:"SRL","AND","SLTIU"
+ // riscv64:"SRL",-"AND",-"SLTIU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// wasm:-"Select",-".*LtU"
return v >> s
func rshGuarded64(v int64, s uint) int64 {
if s < 64 {
- // riscv64:"SRA","OR","SLTIU"
+ // riscv64:"SRA",-"OR",-"SLTIU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// wasm:-"Select",-".*LtU"
return v >> s
}
func checkMergedShifts32(a [256]uint32, b [256]uint64, u uint32, v uint32) {
- //ppc64le: -"CLRLSLDI", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]29, R[0-9]+"
- //ppc64: -"CLRLSLDI", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]29, R[0-9]+"
+ // ppc64le: -"CLRLSLDI", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]29, R[0-9]+"
+ // ppc64: -"CLRLSLDI", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]29, R[0-9]+"
a[0] = a[uint8(v>>24)]
- //ppc64le: -"CLRLSLDI", "RLWNM\t[$]11, R[0-9]+, [$]21, [$]28, R[0-9]+"
- //ppc64: -"CLRLSLDI", "RLWNM\t[$]11, R[0-9]+, [$]21, [$]28, R[0-9]+"
+ // ppc64le: -"CLRLSLDI", "RLWNM\t[$]11, R[0-9]+, [$]21, [$]28, R[0-9]+"
+ // ppc64: -"CLRLSLDI", "RLWNM\t[$]11, R[0-9]+, [$]21, [$]28, R[0-9]+"
b[0] = b[uint8(v>>24)]
- //ppc64le: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+"
- //ppc64: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+"
+ // ppc64le: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+"
+ // ppc64: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+"
b[1] = b[(v>>20)&0xFF]
- //ppc64le: -"SLD", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]28, R[0-9]+"
- //ppc64: -"SLD", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]28, R[0-9]+"
+ // ppc64le: -"SLD", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]28, R[0-9]+"
+ // ppc64: -"SLD", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]28, R[0-9]+"
b[2] = b[v>>25]
}