// shifts
// hardware instruction uses only the low 6 bits of the shift
// we compare to 64 to ensure Go semantics for large shifts
-(Lsh64x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
-(Lsh64x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
-(Lsh64x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
-(Lsh64x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
-
-(Lsh32x64 <t> x y) => (MASKEQZ (SLL <t> x y) (SGTU (MOVVconst <typ.UInt64> [32]) y))
-(Lsh32x32 <t> x y) => (MASKEQZ (SLL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
-(Lsh32x16 <t> x y) => (MASKEQZ (SLL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
-(Lsh32x8 <t> x y) => (MASKEQZ (SLL <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64 y)))
-
-(Lsh16x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
-(Lsh16x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
-(Lsh16x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
-(Lsh16x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
-
-(Lsh8x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
-(Lsh8x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
-(Lsh8x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
-(Lsh8x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
-
-(Rsh64Ux64 <t> x y) => (MASKEQZ (SRLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
-(Rsh64Ux32 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
-(Rsh64Ux16 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
-(Rsh64Ux8 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
-
-(Rsh32Ux64 <t> x y) => (MASKEQZ (SRL <t> x y) (SGTU (MOVVconst <typ.UInt64> [32]) y))
-(Rsh32Ux32 <t> x y) => (MASKEQZ (SRL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
-(Rsh32Ux16 <t> x y) => (MASKEQZ (SRL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
-(Rsh32Ux8 <t> x y) => (MASKEQZ (SRL <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64 y)))
-
-(Rsh16Ux64 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
-(Rsh16Ux32 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
-(Rsh16Ux16 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
-(Rsh16Ux8 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
-
-(Rsh8Ux64 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
-(Rsh8Ux32 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
-(Rsh8Ux16 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
-(Rsh8Ux8 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
-
-(Rsh64x64 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
-(Rsh64x32 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
-(Rsh64x16 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
-(Rsh64x8 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
-
-(Rsh32x64 <t> x y) => (SRA x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [31]))) y))
-(Rsh32x32 <t> x y) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt32to64 y)))
-(Rsh32x16 <t> x y) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt16to64 y)))
-(Rsh32x8 <t> x y) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt8to64 y)))
-
-(Rsh16x64 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
-(Rsh16x32 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
-(Rsh16x16 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
-(Rsh16x8 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
-
-(Rsh8x64 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
-(Rsh8x32 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
-(Rsh8x16 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
-(Rsh8x8 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+// left shift
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLLV x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLLV x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLLV x y)
+
+(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Lsh64x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x y) (SGTU (MOVVconst <typ.UInt64> [32]) y))
+(Lsh32x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Lsh16x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Lsh8x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+// unsigned right shift
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLV x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLV (ZeroExt16to64 x) y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLV (ZeroExt8to64 x) y)
+
+(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x y) (SGTU (MOVVconst <typ.UInt64> [32]) y))
+(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+// signed right shift
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAV x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAV (SignExt16to64 x) y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAV (SignExt8to64 x) y)
+
+(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [31]))) y))
+(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt32to64 y)))
+(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt16to64 y)))
+(Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt8to64 y)))
+
+(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
// bitfield ops
(ROTR x (MOVVconst [c])) => (ROTRconst x [c&31])
(ROTRV x (MOVVconst [c])) => (ROTRVconst x [c&63])
+// SLLV/SRLV/SRAV only considers the bottom 6 bits of y, similarly SLL/SRL/SRA only considers the
+// bottom 5 bits of y.
+(SLL x (ANDconst [31] y)) => (SLL x y)
+(SRL x (ANDconst [31] y)) => (SRL x y)
+(SRA x (ANDconst [31] y)) => (SRA x y)
+(SLLV x (ANDconst [63] y)) => (SLLV x y)
+(SRLV x (ANDconst [63] y)) => (SRLV x y)
+(SRAV x (ANDconst [63] y)) => (SRAV x y)
+
// Avoid unnecessary zero and sign extension when right shifting.
(SRLVconst [rc] (MOVWUreg y)) && rc >= 0 && rc <= 31 => (SRLconst [int64(rc)] y)
(SRAVconst [rc] (MOVWreg y)) && rc >= 0 && rc <= 31 => (SRAconst [int64(rc)] y)
v.AddArg(x)
return true
}
+ // match: (SLL x (ANDconst [31] y))
+ // result: (SLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64ANDconst || auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpLOONG64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64SLLV(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (SLLV x (ANDconst [63] y))
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64ANDconst || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64SLLVconst(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (SRA x (ANDconst [31] y))
+ // result: (SRA x y)
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64ANDconst || auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpLOONG64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64SRAV(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (SRAV x (ANDconst [63] y))
+ // result: (SRAV x y)
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64ANDconst || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpLOONG64SRAV)
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64SRAVconst(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (SRL x (ANDconst [31] y))
+ // result: (SRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64ANDconst || auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpLOONG64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64SRLV(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (SRLV x (ANDconst [63] y))
+ // result: (SRLV x y)
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64ANDconst || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpLOONG64SRLV)
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64SRLVconst(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh16x32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh16x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh16x8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh32x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh32x32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh32x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLL <t> x y) (SGTU (MOVVconst <typ.UInt64> [32]) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh32x8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLL <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh64x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh64x32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh64x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh64x8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh8x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh8x32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh8x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpLsh8x8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SLLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Lsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpMod16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh16Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v3)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh16Ux32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh16Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v3)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh16Ux64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh16Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> (ZeroExt16to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh16Ux8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh16Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v3)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh16x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh16x32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh16x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh16x8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh32Ux16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh32Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh32Ux32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh32Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh32Ux64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh32Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRL <t> x y) (SGTU (MOVVconst <typ.UInt64> [32]) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRL, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh32Ux8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh32Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRL <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRL, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh32x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRA)
v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
v.AddArg2(x, v0)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh32x32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRA)
v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
v.AddArg2(x, v0)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh32x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [31]))) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRA)
v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
v.AddArg2(x, v0)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh32x8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRA)
v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
v.AddArg2(x, v0)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh64Ux16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh64Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh64Ux32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh64Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh64Ux64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh64Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v0.AddArg2(x, y)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh64Ux8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh64Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh64x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
v.AddArg2(x, v0)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh64x32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
v.AddArg2(x, v0)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh64x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
v.AddArg2(x, v0)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh64x8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (Rsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
v.AddArg2(x, v0)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh8Ux16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh8Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v3)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh8Ux32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh8Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v3)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh8Ux64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh8Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> (ZeroExt8to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v2)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh8Ux8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLV (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRLV)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh8Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64MASKEQZ)
v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v.AddArg2(v0, v3)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh8x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh8x32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh8x64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpRsh8x8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAV (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
// match: (Rsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
// result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v_0
y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpLOONG64SRAV)
v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueLOONG64_OpSelect0(v *Value) bool {
v_0 := v.Args[0]