// Lowering shifts
// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
-(Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
-(Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
-(Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
-(Lsh64x8 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
-
-(Lsh32x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
-(Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-(Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-(Lsh32x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-
-(Lsh16x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
-(Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-(Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-(Lsh16x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-
-(Lsh8x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
-(Lsh8x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-(Lsh8x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-(Lsh8x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-
-(Rsh64Ux64 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
-(Rsh64Ux32 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
-(Rsh64Ux16 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
-(Rsh64Ux8 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
-
-(Rsh32Ux64 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
-(Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-(Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-(Rsh32Ux8 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-
-(Rsh16Ux64 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
-(Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
-(Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
-(Rsh16Ux8 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
-
-(Rsh8Ux64 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
-(Rsh8Ux32 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
-(Rsh8Ux16 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
-(Rsh8Ux8 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+(Lsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
+(Lsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Lsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Lsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SHLQ x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SHLL x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SHLL x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SHLL x y)
+
+(Rsh64Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
+(Rsh32Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Rsh16Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [16])))
+(Rsh8Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [8])))
+
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SHRQ x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SHRL x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SHRW x y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SHRB x y)
// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
-(Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
-(Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
-(Rsh64x16 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
-(Rsh64x8 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
-
-(Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
-(Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
-(Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
-(Rsh32x8 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
-
-(Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
-(Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
-(Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
-(Rsh16x8 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
-
-(Rsh8x64 <t> x y) -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
-(Rsh8x32 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
-(Rsh8x16 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
-(Rsh8x8 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+(Rsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (SARQ <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [64])))))
+(Rsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (SARL <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [32])))))
+(Rsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (SARW <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [16])))))
+(Rsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (SARB <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [8])))))
+
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SARQ x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SARL x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SARW x y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SARB x y)
// Lowering comparisons
(Less(64|32|16|8) x y) -> (SETL (CMP(Q|L|W|B) x y))
// For shifts, AxB means the shifted value has A bits and the shift amount has B bits.
// Shift amounts are considered unsigned.
- {name: "Lsh8x8", argLength: 2}, // arg0 << arg1
- {name: "Lsh8x16", argLength: 2},
- {name: "Lsh8x32", argLength: 2},
- {name: "Lsh8x64", argLength: 2},
- {name: "Lsh16x8", argLength: 2},
- {name: "Lsh16x16", argLength: 2},
- {name: "Lsh16x32", argLength: 2},
- {name: "Lsh16x64", argLength: 2},
- {name: "Lsh32x8", argLength: 2},
- {name: "Lsh32x16", argLength: 2},
- {name: "Lsh32x32", argLength: 2},
- {name: "Lsh32x64", argLength: 2},
- {name: "Lsh64x8", argLength: 2},
- {name: "Lsh64x16", argLength: 2},
- {name: "Lsh64x32", argLength: 2},
- {name: "Lsh64x64", argLength: 2},
-
- {name: "Rsh8x8", argLength: 2}, // arg0 >> arg1, signed
- {name: "Rsh8x16", argLength: 2},
- {name: "Rsh8x32", argLength: 2},
- {name: "Rsh8x64", argLength: 2},
- {name: "Rsh16x8", argLength: 2},
- {name: "Rsh16x16", argLength: 2},
- {name: "Rsh16x32", argLength: 2},
- {name: "Rsh16x64", argLength: 2},
- {name: "Rsh32x8", argLength: 2},
- {name: "Rsh32x16", argLength: 2},
- {name: "Rsh32x32", argLength: 2},
- {name: "Rsh32x64", argLength: 2},
- {name: "Rsh64x8", argLength: 2},
- {name: "Rsh64x16", argLength: 2},
- {name: "Rsh64x32", argLength: 2},
- {name: "Rsh64x64", argLength: 2},
-
- {name: "Rsh8Ux8", argLength: 2}, // arg0 >> arg1, unsigned
- {name: "Rsh8Ux16", argLength: 2},
- {name: "Rsh8Ux32", argLength: 2},
- {name: "Rsh8Ux64", argLength: 2},
- {name: "Rsh16Ux8", argLength: 2},
- {name: "Rsh16Ux16", argLength: 2},
- {name: "Rsh16Ux32", argLength: 2},
- {name: "Rsh16Ux64", argLength: 2},
- {name: "Rsh32Ux8", argLength: 2},
- {name: "Rsh32Ux16", argLength: 2},
- {name: "Rsh32Ux32", argLength: 2},
- {name: "Rsh32Ux64", argLength: 2},
- {name: "Rsh64Ux8", argLength: 2},
- {name: "Rsh64Ux16", argLength: 2},
- {name: "Rsh64Ux32", argLength: 2},
- {name: "Rsh64Ux64", argLength: 2},
+ // If arg1 is known to be less than the number of bits in arg0,
+ // then aux may be set to true.
+ // This enables better code generation on some platforms.
+ {name: "Lsh8x8", argLength: 2, aux: "Bool"}, // arg0 << arg1
+ {name: "Lsh8x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh8x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh8x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x64", argLength: 2, aux: "Bool"},
+
+ {name: "Rsh8x8", argLength: 2, aux: "Bool"}, // arg0 >> arg1, signed
+ {name: "Rsh8x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh8x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh8x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x64", argLength: 2, aux: "Bool"},
+
+ {name: "Rsh8Ux8", argLength: 2, aux: "Bool"}, // arg0 >> arg1, unsigned
+ {name: "Rsh8Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh8Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh8Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux64", argLength: 2, aux: "Bool"},
// 2-input comparisons
{name: "Eq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1
b := v.Block
_ = b
// match: (Lsh16x16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpMod16_0(v *Value) bool {
b := v.Block
b := v.Block
_ = b
// match: (Rsh16Ux16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARW)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARW)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARW)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARW)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARL)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARL)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARL)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARL)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDQ)
v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARQ)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARQ)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARQ)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARQ)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
v0.AddArg(x)
v.AddArg(v1)
return true
}
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x16 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARB)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x32 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARB)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x64 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARB)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x8 <t> x y)
- // cond:
+ // cond: !shiftIsBounded(v)
// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
for {
t := v.Type
_ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
+ if !(!shiftIsBounded(v)) {
+ break
+ }
v.reset(OpAMD64SARB)
v.Type = t
v.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpSelect0_0(v *Value) bool {
b := v.Block