(Add64 x y) -> (ADDQ x y)
(AddPtr x y) -> (ADDQ x y)
(Add32 x y) -> (ADDL x y)
-(Add16 x y) -> (ADDW x y)
-(Add8 x y) -> (ADDB x y)
+(Add16 x y) -> (ADDL x y)
+(Add8 x y) -> (ADDL x y)
(Add32F x y) -> (ADDSS x y)
(Add64F x y) -> (ADDSD x y)
(Sub64 x y) -> (SUBQ x y)
(SubPtr x y) -> (SUBQ x y)
(Sub32 x y) -> (SUBL x y)
-(Sub16 x y) -> (SUBW x y)
-(Sub8 x y) -> (SUBB x y)
+(Sub16 x y) -> (SUBL x y)
+(Sub8 x y) -> (SUBL x y)
(Sub32F x y) -> (SUBSS x y)
(Sub64F x y) -> (SUBSD x y)
(Mul64 x y) -> (MULQ x y)
(Mul32 x y) -> (MULL x y)
-(Mul16 x y) -> (MULW x y)
-(Mul8 x y) -> (MULB x y)
+(Mul16 x y) -> (MULL x y)
+(Mul8 x y) -> (MULL x y)
(Mul32F x y) -> (MULSS x y)
(Mul64F x y) -> (MULSD x y)
(And64 x y) -> (ANDQ x y)
(And32 x y) -> (ANDL x y)
-(And16 x y) -> (ANDW x y)
-(And8 x y) -> (ANDB x y)
+(And16 x y) -> (ANDL x y)
+(And8 x y) -> (ANDL x y)
(Or64 x y) -> (ORQ x y)
(Or32 x y) -> (ORL x y)
-(Or16 x y) -> (ORW x y)
-(Or8 x y) -> (ORB x y)
+(Or16 x y) -> (ORL x y)
+(Or8 x y) -> (ORL x y)
(Xor64 x y) -> (XORQ x y)
(Xor32 x y) -> (XORL x y)
-(Xor16 x y) -> (XORW x y)
-(Xor8 x y) -> (XORB x y)
+(Xor16 x y) -> (XORL x y)
+(Xor8 x y) -> (XORL x y)
(Neg64 x) -> (NEGQ x)
(Neg32 x) -> (NEGL x)
-(Neg16 x) -> (NEGW x)
-(Neg8 x) -> (NEGB x)
+(Neg16 x) -> (NEGL x)
+(Neg8 x) -> (NEGL x)
(Neg32F x) -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
(Neg64F x) -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
(Com64 x) -> (NOTQ x)
(Com32 x) -> (NOTL x)
-(Com16 x) -> (NOTW x)
-(Com8 x) -> (NOTB x)
+(Com16 x) -> (NOTL x)
+(Com8 x) -> (NOTL x)
// CMPQconst 0 below is redundant because BSF sets Z but how to remove?
(Ctz64 <t> x) -> (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
(Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
(Lsh32x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-(Lsh16x64 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
-(Lsh16x32 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
-(Lsh16x16 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
-(Lsh16x8 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+(Lsh16x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+(Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+(Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+(Lsh16x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-(Lsh8x64 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
-(Lsh8x32 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
-(Lsh8x16 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
-(Lsh8x8 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+(Lsh8x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+(Lsh8x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+(Lsh8x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+(Lsh8x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
(Lrot64 <t> x [c]) -> (ROLQconst <t> [c&63] x)
(Lrot32 <t> x [c]) -> (ROLLconst <t> [c&31] x)
(Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
(Rsh32Ux8 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-(Rsh16Ux64 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
-(Rsh16Ux32 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
-(Rsh16Ux16 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
-(Rsh16Ux8 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+(Rsh16Ux64 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
+(Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+(Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+(Rsh16Ux8 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
-(Rsh8Ux64 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
-(Rsh8Ux32 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
-(Rsh8Ux16 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
-(Rsh8Ux8 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+(Rsh8Ux64 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
+(Rsh8Ux32 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+(Rsh8Ux16 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+(Rsh8Ux8 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
// Note: for small shift widths we generate 32 bits of mask even when we don't need it all.
(Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
(Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
-(Rsh64x16 <t> x y) -> (SARQ <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
-(Rsh64x8 <t> x y) -> (SARQ <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
+(Rsh64x16 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
+(Rsh64x8 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
(Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
(Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
-(Rsh32x16 <t> x y) -> (SARL <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
-(Rsh32x8 <t> x y) -> (SARL <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+(Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+(Rsh32x8 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
(Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
(Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
-(Rsh16x16 <t> x y) -> (SARW <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
-(Rsh16x8 <t> x y) -> (SARW <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+(Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+(Rsh16x8 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
(Rsh8x64 <t> x y) -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
(Rsh8x32 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
-(Rsh8x16 <t> x y) -> (SARB <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
-(Rsh8x8 <t> x y) -> (SARB <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+(Rsh8x16 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+(Rsh8x8 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
(Less64 x y) -> (SETL (CMPQ x y))
(Less32 x y) -> (SETL (CMPL x y))
(Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 ->
(REPMOVSQ dst src (MOVQconst [size/8]) mem)
-(Not x) -> (XORBconst [1] x)
+(Not x) -> (XORLconst [1] x)
(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
-(Const8 [val]) -> (MOVBconst [val])
-(Const16 [val]) -> (MOVWconst [val])
+(Const8 [val]) -> (MOVLconst [val])
+(Const16 [val]) -> (MOVLconst [val])
(Const32 [val]) -> (MOVLconst [val])
(Const64 [val]) -> (MOVQconst [val])
(Const32F [val]) -> (MOVSSconst [val])
(Const64F [val]) -> (MOVSDconst [val])
(ConstNil) -> (MOVQconst [0])
-(ConstBool [b]) -> (MOVBconst [b])
+(ConstBool [b]) -> (MOVLconst [b])
(Addr {sym} base) -> (LEAQ {sym} base)
(ADDQ (MOVQconst [c]) x) && is32Bit(c) -> (ADDQconst [c] x)
(ADDL x (MOVLconst [c])) -> (ADDLconst [c] x)
(ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x)
-(ADDW x (MOVWconst [c])) -> (ADDWconst [c] x)
-(ADDW (MOVWconst [c]) x) -> (ADDWconst [c] x)
-(ADDB x (MOVBconst [c])) -> (ADDBconst [c] x)
-(ADDB (MOVBconst [c]) x) -> (ADDBconst [c] x)
(SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c])
(SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst <v.Type> x [c]))
(SUBL x (MOVLconst [c])) -> (SUBLconst x [c])
(SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c]))
-(SUBW x (MOVWconst [c])) -> (SUBWconst x [c])
-(SUBW (MOVWconst [c]) x) -> (NEGW (SUBWconst <v.Type> x [c]))
-(SUBB x (MOVBconst [c])) -> (SUBBconst x [c])
-(SUBB (MOVBconst [c]) x) -> (NEGB (SUBBconst <v.Type> x [c]))
(MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x)
(MULQ (MOVQconst [c]) x) && is32Bit(c) -> (MULQconst [c] x)
(MULL x (MOVLconst [c])) -> (MULLconst [c] x)
(MULL (MOVLconst [c]) x) -> (MULLconst [c] x)
-(MULW x (MOVWconst [c])) -> (MULWconst [c] x)
-(MULW (MOVWconst [c]) x) -> (MULWconst [c] x)
-(MULB x (MOVBconst [c])) -> (MULBconst [c] x)
-(MULB (MOVBconst [c]) x) -> (MULBconst [c] x)
(ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x)
(ANDQ (MOVQconst [c]) x) && is32Bit(c) -> (ANDQconst [c] x)
(ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
(ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x)
-(ANDW x (MOVLconst [c])) -> (ANDWconst [c] x)
-(ANDW (MOVLconst [c]) x) -> (ANDWconst [c] x)
-(ANDW x (MOVWconst [c])) -> (ANDWconst [c] x)
-(ANDW (MOVWconst [c]) x) -> (ANDWconst [c] x)
-(ANDB x (MOVLconst [c])) -> (ANDBconst [c] x)
-(ANDB (MOVLconst [c]) x) -> (ANDBconst [c] x)
-(ANDB x (MOVBconst [c])) -> (ANDBconst [c] x)
-(ANDB (MOVBconst [c]) x) -> (ANDBconst [c] x)
-
-(ANDBconst [c] (ANDBconst [d] x)) -> (ANDBconst [c & d] x)
-(ANDWconst [c] (ANDWconst [d] x)) -> (ANDWconst [c & d] x)
+
(ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
(ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x)
(ORQ (MOVQconst [c]) x) && is32Bit(c) -> (ORQconst [c] x)
(ORL x (MOVLconst [c])) -> (ORLconst [c] x)
(ORL (MOVLconst [c]) x) -> (ORLconst [c] x)
-(ORW x (MOVWconst [c])) -> (ORWconst [c] x)
-(ORW (MOVWconst [c]) x) -> (ORWconst [c] x)
-(ORB x (MOVBconst [c])) -> (ORBconst [c] x)
-(ORB (MOVBconst [c]) x) -> (ORBconst [c] x)
(XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x)
(XORQ (MOVQconst [c]) x) && is32Bit(c) -> (XORQconst [c] x)
(XORL x (MOVLconst [c])) -> (XORLconst [c] x)
(XORL (MOVLconst [c]) x) -> (XORLconst [c] x)
-(XORW x (MOVWconst [c])) -> (XORWconst [c] x)
-(XORW (MOVWconst [c]) x) -> (XORWconst [c] x)
-(XORB x (MOVBconst [c])) -> (XORBconst [c] x)
-(XORB (MOVBconst [c]) x) -> (XORBconst [c] x)
(SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x)
(SHLQ x (MOVLconst [c])) -> (SHLQconst [c&63] x)
-(SHLQ x (MOVWconst [c])) -> (SHLQconst [c&63] x)
-(SHLQ x (MOVBconst [c])) -> (SHLQconst [c&63] x)
(SHLL x (MOVQconst [c])) -> (SHLLconst [c&31] x)
(SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
-(SHLL x (MOVWconst [c])) -> (SHLLconst [c&31] x)
-(SHLL x (MOVBconst [c])) -> (SHLLconst [c&31] x)
-
-(SHLW x (MOVQconst [c])) -> (SHLWconst [c&31] x)
-(SHLW x (MOVLconst [c])) -> (SHLWconst [c&31] x)
-(SHLW x (MOVWconst [c])) -> (SHLWconst [c&31] x)
-(SHLW x (MOVBconst [c])) -> (SHLWconst [c&31] x)
-
-(SHLB x (MOVQconst [c])) -> (SHLBconst [c&31] x)
-(SHLB x (MOVLconst [c])) -> (SHLBconst [c&31] x)
-(SHLB x (MOVWconst [c])) -> (SHLBconst [c&31] x)
-(SHLB x (MOVBconst [c])) -> (SHLBconst [c&31] x)
(SHRQ x (MOVQconst [c])) -> (SHRQconst [c&63] x)
(SHRQ x (MOVLconst [c])) -> (SHRQconst [c&63] x)
-(SHRQ x (MOVWconst [c])) -> (SHRQconst [c&63] x)
-(SHRQ x (MOVBconst [c])) -> (SHRQconst [c&63] x)
(SHRL x (MOVQconst [c])) -> (SHRLconst [c&31] x)
(SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
-(SHRL x (MOVWconst [c])) -> (SHRLconst [c&31] x)
-(SHRL x (MOVBconst [c])) -> (SHRLconst [c&31] x)
(SHRW x (MOVQconst [c])) -> (SHRWconst [c&31] x)
(SHRW x (MOVLconst [c])) -> (SHRWconst [c&31] x)
-(SHRW x (MOVWconst [c])) -> (SHRWconst [c&31] x)
-(SHRW x (MOVBconst [c])) -> (SHRWconst [c&31] x)
(SHRB x (MOVQconst [c])) -> (SHRBconst [c&31] x)
(SHRB x (MOVLconst [c])) -> (SHRBconst [c&31] x)
-(SHRB x (MOVWconst [c])) -> (SHRBconst [c&31] x)
-(SHRB x (MOVBconst [c])) -> (SHRBconst [c&31] x)
(SARQ x (MOVQconst [c])) -> (SARQconst [c&63] x)
(SARQ x (MOVLconst [c])) -> (SARQconst [c&63] x)
-(SARQ x (MOVWconst [c])) -> (SARQconst [c&63] x)
-(SARQ x (MOVBconst [c])) -> (SARQconst [c&63] x)
(SARL x (MOVQconst [c])) -> (SARLconst [c&31] x)
(SARL x (MOVLconst [c])) -> (SARLconst [c&31] x)
-(SARL x (MOVWconst [c])) -> (SARLconst [c&31] x)
-(SARL x (MOVBconst [c])) -> (SARLconst [c&31] x)
(SARW x (MOVQconst [c])) -> (SARWconst [c&31] x)
(SARW x (MOVLconst [c])) -> (SARWconst [c&31] x)
-(SARW x (MOVWconst [c])) -> (SARWconst [c&31] x)
-(SARW x (MOVBconst [c])) -> (SARWconst [c&31] x)
(SARB x (MOVQconst [c])) -> (SARBconst [c&31] x)
(SARB x (MOVLconst [c])) -> (SARBconst [c&31] x)
-(SARB x (MOVWconst [c])) -> (SARBconst [c&31] x)
-(SARB x (MOVBconst [c])) -> (SARBconst [c&31] x)
-(SARB x (ANDBconst [31] y)) -> (SARB x y)
-(SARW x (ANDWconst [31] y)) -> (SARW x y)
(SARL x (ANDLconst [31] y)) -> (SARL x y)
(SARQ x (ANDQconst [63] y)) -> (SARQ x y)
-(SHLB x (ANDBconst [31] y)) -> (SHLB x y)
-(SHLW x (ANDWconst [31] y)) -> (SHLW x y)
(SHLL x (ANDLconst [31] y)) -> (SHLL x y)
(SHLQ x (ANDQconst [63] y)) -> (SHLQ x y)
-(SHRB x (ANDBconst [31] y)) -> (SHRB x y)
-(SHRW x (ANDWconst [31] y)) -> (SHRW x y)
(SHRL x (ANDLconst [31] y)) -> (SHRL x y)
(SHRQ x (ANDQconst [63] y)) -> (SHRQ x y)
// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
// because the x86 instructions are defined to use all 5 bits of the shift even
// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
-// (SHLW x (MOVWconst [24])), but just in case.
+// (SHRW x (MOVLconst [24])), but just in case.
(CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c])
(CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst x [c]))
(CMPL x (MOVLconst [c])) -> (CMPLconst x [c])
(CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c]))
-(CMPW x (MOVWconst [c])) -> (CMPWconst x [c])
-(CMPW (MOVWconst [c]) x) -> (InvertFlags (CMPWconst x [c]))
-(CMPB x (MOVBconst [c])) -> (CMPBconst x [c])
-(CMPB (MOVBconst [c]) x) -> (InvertFlags (CMPBconst x [c]))
+(CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))])
+(CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))]))
+(CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))])
+(CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))]))
// Using MOVBQZX instead of ANDQ is cheaper.
(ANDQconst [0xFF] x) -> (MOVBQZX x)
(MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
// Fold extensions and ANDs together.
-(MOVBQZX (ANDBconst [c] x)) -> (ANDQconst [c & 0xff] x)
-(MOVWQZX (ANDWconst [c] x)) -> (ANDQconst [c & 0xffff] x)
-(MOVLQZX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x)
-(MOVBQSX (ANDBconst [c] x)) && c & 0x80 == 0 -> (ANDQconst [c & 0x7f] x)
-(MOVWQSX (ANDWconst [c] x)) && c & 0x8000 == 0 -> (ANDQconst [c & 0x7fff] x)
-(MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x)
+(MOVBQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x)
+(MOVWQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x)
+(MOVLQZX (ANDLconst [c] x)) -> (ANDLconst [c] x)
+(MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x)
+(MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x)
+(MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDLconst [c & 0x7fffffff] x)
// Don't extend before storing
(MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem)
(MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
(MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
-(MOVWstore [off] {sym} ptr (MOVWconst [c]) mem) && validOff(off) ->
+(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
(MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
-(MOVBstore [off] {sym} ptr (MOVBconst [c]) mem) && validOff(off) ->
+(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
// Fold address offsets into constant stores.
(CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
(CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
(CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
-(CMPWconst (MOVWconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ)
-(CMPWconst (MOVWconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT)
-(CMPWconst (MOVWconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT)
-(CMPWconst (MOVWconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT)
-(CMPWconst (MOVWconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT)
-(CMPBconst (MOVBconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ)
-(CMPBconst (MOVBconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT)
-(CMPBconst (MOVBconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT)
-(CMPBconst (MOVBconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
-(CMPBconst (MOVBconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
// Other known comparisons.
(CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT)
(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT_ULT)
(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT)
-(CMPWconst (ANDWconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT)
-(CMPBconst (ANDBconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT)
+(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT)
+(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT)
// TODO: DIVxU also.
// Absorb flag constants into SBB ops.
(UGE (FlagGT_UGT) yes no) -> (First nil yes no)
// Absorb flag constants into SETxx ops.
-(SETEQ (FlagEQ)) -> (MOVBconst [1])
-(SETEQ (FlagLT_ULT)) -> (MOVBconst [0])
-(SETEQ (FlagLT_UGT)) -> (MOVBconst [0])
-(SETEQ (FlagGT_ULT)) -> (MOVBconst [0])
-(SETEQ (FlagGT_UGT)) -> (MOVBconst [0])
-
-(SETNE (FlagEQ)) -> (MOVBconst [0])
-(SETNE (FlagLT_ULT)) -> (MOVBconst [1])
-(SETNE (FlagLT_UGT)) -> (MOVBconst [1])
-(SETNE (FlagGT_ULT)) -> (MOVBconst [1])
-(SETNE (FlagGT_UGT)) -> (MOVBconst [1])
-
-(SETL (FlagEQ)) -> (MOVBconst [0])
-(SETL (FlagLT_ULT)) -> (MOVBconst [1])
-(SETL (FlagLT_UGT)) -> (MOVBconst [1])
-(SETL (FlagGT_ULT)) -> (MOVBconst [0])
-(SETL (FlagGT_UGT)) -> (MOVBconst [0])
-
-(SETLE (FlagEQ)) -> (MOVBconst [1])
-(SETLE (FlagLT_ULT)) -> (MOVBconst [1])
-(SETLE (FlagLT_UGT)) -> (MOVBconst [1])
-(SETLE (FlagGT_ULT)) -> (MOVBconst [0])
-(SETLE (FlagGT_UGT)) -> (MOVBconst [0])
-
-(SETG (FlagEQ)) -> (MOVBconst [0])
-(SETG (FlagLT_ULT)) -> (MOVBconst [0])
-(SETG (FlagLT_UGT)) -> (MOVBconst [0])
-(SETG (FlagGT_ULT)) -> (MOVBconst [1])
-(SETG (FlagGT_UGT)) -> (MOVBconst [1])
-
-(SETGE (FlagEQ)) -> (MOVBconst [1])
-(SETGE (FlagLT_ULT)) -> (MOVBconst [0])
-(SETGE (FlagLT_UGT)) -> (MOVBconst [0])
-(SETGE (FlagGT_ULT)) -> (MOVBconst [1])
-(SETGE (FlagGT_UGT)) -> (MOVBconst [1])
-
-(SETB (FlagEQ)) -> (MOVBconst [0])
-(SETB (FlagLT_ULT)) -> (MOVBconst [1])
-(SETB (FlagLT_UGT)) -> (MOVBconst [0])
-(SETB (FlagGT_ULT)) -> (MOVBconst [1])
-(SETB (FlagGT_UGT)) -> (MOVBconst [0])
-
-(SETBE (FlagEQ)) -> (MOVBconst [1])
-(SETBE (FlagLT_ULT)) -> (MOVBconst [1])
-(SETBE (FlagLT_UGT)) -> (MOVBconst [0])
-(SETBE (FlagGT_ULT)) -> (MOVBconst [1])
-(SETBE (FlagGT_UGT)) -> (MOVBconst [0])
-
-(SETA (FlagEQ)) -> (MOVBconst [0])
-(SETA (FlagLT_ULT)) -> (MOVBconst [0])
-(SETA (FlagLT_UGT)) -> (MOVBconst [1])
-(SETA (FlagGT_ULT)) -> (MOVBconst [0])
-(SETA (FlagGT_UGT)) -> (MOVBconst [1])
-
-(SETAE (FlagEQ)) -> (MOVBconst [1])
-(SETAE (FlagLT_ULT)) -> (MOVBconst [0])
-(SETAE (FlagLT_UGT)) -> (MOVBconst [1])
-(SETAE (FlagGT_ULT)) -> (MOVBconst [0])
-(SETAE (FlagGT_UGT)) -> (MOVBconst [1])
+(SETEQ (FlagEQ)) -> (MOVLconst [1])
+(SETEQ (FlagLT_ULT)) -> (MOVLconst [0])
+(SETEQ (FlagLT_UGT)) -> (MOVLconst [0])
+(SETEQ (FlagGT_ULT)) -> (MOVLconst [0])
+(SETEQ (FlagGT_UGT)) -> (MOVLconst [0])
+
+(SETNE (FlagEQ)) -> (MOVLconst [0])
+(SETNE (FlagLT_ULT)) -> (MOVLconst [1])
+(SETNE (FlagLT_UGT)) -> (MOVLconst [1])
+(SETNE (FlagGT_ULT)) -> (MOVLconst [1])
+(SETNE (FlagGT_UGT)) -> (MOVLconst [1])
+
+(SETL (FlagEQ)) -> (MOVLconst [0])
+(SETL (FlagLT_ULT)) -> (MOVLconst [1])
+(SETL (FlagLT_UGT)) -> (MOVLconst [1])
+(SETL (FlagGT_ULT)) -> (MOVLconst [0])
+(SETL (FlagGT_UGT)) -> (MOVLconst [0])
+
+(SETLE (FlagEQ)) -> (MOVLconst [1])
+(SETLE (FlagLT_ULT)) -> (MOVLconst [1])
+(SETLE (FlagLT_UGT)) -> (MOVLconst [1])
+(SETLE (FlagGT_ULT)) -> (MOVLconst [0])
+(SETLE (FlagGT_UGT)) -> (MOVLconst [0])
+
+(SETG (FlagEQ)) -> (MOVLconst [0])
+(SETG (FlagLT_ULT)) -> (MOVLconst [0])
+(SETG (FlagLT_UGT)) -> (MOVLconst [0])
+(SETG (FlagGT_ULT)) -> (MOVLconst [1])
+(SETG (FlagGT_UGT)) -> (MOVLconst [1])
+
+(SETGE (FlagEQ)) -> (MOVLconst [1])
+(SETGE (FlagLT_ULT)) -> (MOVLconst [0])
+(SETGE (FlagLT_UGT)) -> (MOVLconst [0])
+(SETGE (FlagGT_ULT)) -> (MOVLconst [1])
+(SETGE (FlagGT_UGT)) -> (MOVLconst [1])
+
+(SETB (FlagEQ)) -> (MOVLconst [0])
+(SETB (FlagLT_ULT)) -> (MOVLconst [1])
+(SETB (FlagLT_UGT)) -> (MOVLconst [0])
+(SETB (FlagGT_ULT)) -> (MOVLconst [1])
+(SETB (FlagGT_UGT)) -> (MOVLconst [0])
+
+(SETBE (FlagEQ)) -> (MOVLconst [1])
+(SETBE (FlagLT_ULT)) -> (MOVLconst [1])
+(SETBE (FlagLT_UGT)) -> (MOVLconst [0])
+(SETBE (FlagGT_ULT)) -> (MOVLconst [1])
+(SETBE (FlagGT_UGT)) -> (MOVLconst [0])
+
+(SETA (FlagEQ)) -> (MOVLconst [0])
+(SETA (FlagLT_ULT)) -> (MOVLconst [0])
+(SETA (FlagLT_UGT)) -> (MOVLconst [1])
+(SETA (FlagGT_ULT)) -> (MOVLconst [0])
+(SETA (FlagGT_UGT)) -> (MOVLconst [1])
+
+(SETAE (FlagEQ)) -> (MOVLconst [1])
+(SETAE (FlagLT_ULT)) -> (MOVLconst [0])
+(SETAE (FlagLT_UGT)) -> (MOVLconst [1])
+(SETAE (FlagGT_ULT)) -> (MOVLconst [0])
+(SETAE (FlagGT_UGT)) -> (MOVLconst [1])
// Remove redundant *const ops
(ADDQconst [0] x) -> x
(ADDLconst [c] x) && int32(c)==0 -> x
-(ADDWconst [c] x) && int16(c)==0 -> x
-(ADDBconst [c] x) && int8(c)==0 -> x
(SUBQconst [0] x) -> x
(SUBLconst [c] x) && int32(c) == 0 -> x
-(SUBWconst [c] x) && int16(c) == 0 -> x
-(SUBBconst [c] x) && int8(c) == 0 -> x
(ANDQconst [0] _) -> (MOVQconst [0])
(ANDLconst [c] _) && int32(c)==0 -> (MOVLconst [0])
-(ANDWconst [c] _) && int16(c)==0 -> (MOVWconst [0])
-(ANDBconst [c] _) && int8(c)==0 -> (MOVBconst [0])
(ANDQconst [-1] x) -> x
(ANDLconst [c] x) && int32(c)==-1 -> x
-(ANDWconst [c] x) && int16(c)==-1 -> x
-(ANDBconst [c] x) && int8(c)==-1 -> x
(ORQconst [0] x) -> x
(ORLconst [c] x) && int32(c)==0 -> x
-(ORWconst [c] x) && int16(c)==0 -> x
-(ORBconst [c] x) && int8(c)==0 -> x
(ORQconst [-1] _) -> (MOVQconst [-1])
(ORLconst [c] _) && int32(c)==-1 -> (MOVLconst [-1])
-(ORWconst [c] _) && int16(c)==-1 -> (MOVWconst [-1])
-(ORBconst [c] _) && int8(c)==-1 -> (MOVBconst [-1])
(XORQconst [0] x) -> x
(XORLconst [c] x) && int32(c)==0 -> x
-(XORWconst [c] x) && int16(c)==0 -> x
-(XORBconst [c] x) && int8(c)==0 -> x
+// TODO: since we got rid of the W/B versions, we might miss
+// things like (ANDLconst [0x100] x) which were formerly
+// (ANDBconst [0] x). Probably doesn't happen very often.
+// If we cared, we might do:
+// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
// Convert constant subtracts to constant adds
(SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x)
(SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x)
-(SUBWconst [c] x) -> (ADDWconst [int64(int16(-c))] x)
-(SUBBconst [c] x) -> (ADDBconst [int64(int8(-c))] x)
// generic constant folding
// TODO: more of this
(ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d])
(ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))])
-(ADDWconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int16(c+d))])
-(ADDBconst [c] (MOVBconst [d])) -> (MOVBconst [int64(int8(c+d))])
(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x)
(ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x)
-(ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [int64(int16(c+d))] x)
-(ADDBconst [c] (ADDBconst [d] x)) -> (ADDBconst [int64(int8(c+d))] x)
(SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c])
(SUBLconst (MOVLconst [d]) [c]) -> (MOVLconst [int64(int32(d-c))])
-(SUBWconst (MOVWconst [d]) [c]) -> (MOVWconst [int64(int16(d-c))])
-(SUBBconst (MOVBconst [d]) [c]) -> (MOVBconst [int64(int8(d-c))])
(SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x)
(SUBLconst (SUBLconst x [d]) [c]) -> (ADDLconst [int64(int32(-c-d))] x)
-(SUBWconst (SUBWconst x [d]) [c]) -> (ADDWconst [int64(int16(-c-d))] x)
-(SUBBconst (SUBBconst x [d]) [c]) -> (ADDBconst [int64(int8(-c-d))] x)
(SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
(SARLconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
(SARWconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
(SARBconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
(NEGQ (MOVQconst [c])) -> (MOVQconst [-c])
(NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))])
-(NEGW (MOVWconst [c])) -> (MOVWconst [int64(int16(-c))])
-(NEGB (MOVBconst [c])) -> (MOVBconst [int64(int8(-c))])
(MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d])
(MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))])
-(MULWconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int16(c*d))])
-(MULBconst [c] (MOVBconst [d])) -> (MOVBconst [int64(int8(c*d))])
(ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d])
(ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d])
-(ANDWconst [c] (MOVWconst [d])) -> (MOVWconst [c&d])
-(ANDBconst [c] (MOVBconst [d])) -> (MOVBconst [c&d])
(ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d])
(ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d])
-(ORWconst [c] (MOVWconst [d])) -> (MOVWconst [c|d])
-(ORBconst [c] (MOVBconst [d])) -> (MOVBconst [c|d])
(XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d])
(XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
-(XORWconst [c] (MOVWconst [d])) -> (MOVWconst [c^d])
-(XORBconst [c] (MOVBconst [d])) -> (MOVBconst [c^d])
(NOTQ (MOVQconst [c])) -> (MOVQconst [^c])
(NOTL (MOVLconst [c])) -> (MOVLconst [^c])
-(NOTW (MOVWconst [c])) -> (MOVWconst [^c])
-(NOTB (MOVBconst [c])) -> (MOVBconst [^c])
// generic simplifications
// TODO: more of this
(ADDQ x (NEGQ y)) -> (SUBQ x y)
(ADDL x (NEGL y)) -> (SUBL x y)
-(ADDW x (NEGW y)) -> (SUBW x y)
-(ADDB x (NEGB y)) -> (SUBB x y)
(SUBQ x x) -> (MOVQconst [0])
(SUBL x x) -> (MOVLconst [0])
-(SUBW x x) -> (MOVWconst [0])
-(SUBB x x) -> (MOVBconst [0])
(ANDQ x x) -> x
(ANDL x x) -> x
-(ANDW x x) -> x
-(ANDB x x) -> x
(ORQ x x) -> x
(ORL x x) -> x
-(ORW x x) -> x
-(ORB x x) -> x
(XORQ x x) -> (MOVQconst [0])
(XORL x x) -> (MOVLconst [0])
-(XORW x x) -> (MOVWconst [0])
-(XORB x x) -> (MOVBconst [0])
// checking AND against 0.
(CMPQconst (ANDQ x y) [0]) -> (TESTQ x y)
(CMPLconst (ANDL x y) [0]) -> (TESTL x y)
-(CMPWconst (ANDW x y) [0]) -> (TESTW x y)
-(CMPBconst (ANDB x y) [0]) -> (TESTB x y)
+(CMPWconst (ANDL x y) [0]) -> (TESTW x y)
+(CMPBconst (ANDL x y) [0]) -> (TESTB x y)
(CMPQconst (ANDQconst [c] x) [0]) -> (TESTQconst [c] x)
(CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
-(CMPWconst (ANDWconst [c] x) [0]) -> (TESTWconst [c] x)
-(CMPBconst (ANDBconst [c] x) [0]) -> (TESTBconst [c] x)
+(CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x)
+(CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x)
// TEST %reg,%reg is shorter than CMP
(CMPQconst x [0]) -> (TESTQ x x)
// Combining byte loads into larger (unaligned) loads.
// There are many ways these combinations could occur. This is
// designed to match the way encoding/binary.LittleEndian does it.
-(ORW x0:(MOVBload [i] {s} p mem)
- s0:(SHLWconst [8] x1:(MOVBload [i+1] {s} p mem)))
+(ORL x0:(MOVBload [i] {s} p mem)
+ s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
&& x0.Uses == 1
&& x1.Uses == 1
&& s0.Uses == 1
&& clobber(o5)
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem)
-(ORW x0:(MOVBloadidx1 [i] {s} p idx mem)
- s0:(SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
+(ORL x0:(MOVBloadidx1 [i] {s} p idx mem)
+ s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
&& x0.Uses == 1
&& x1.Uses == 1
&& s0.Uses == 1
var _ = math.MinInt8 // in case not otherwise used
func rewriteValueAMD64(v *Value, config *Config) bool {
switch v.Op {
- case OpAMD64ADDB:
- return rewriteValueAMD64_OpAMD64ADDB(v, config)
- case OpAMD64ADDBconst:
- return rewriteValueAMD64_OpAMD64ADDBconst(v, config)
case OpAMD64ADDL:
return rewriteValueAMD64_OpAMD64ADDL(v, config)
case OpAMD64ADDLconst:
return rewriteValueAMD64_OpAMD64ADDQ(v, config)
case OpAMD64ADDQconst:
return rewriteValueAMD64_OpAMD64ADDQconst(v, config)
- case OpAMD64ADDW:
- return rewriteValueAMD64_OpAMD64ADDW(v, config)
- case OpAMD64ADDWconst:
- return rewriteValueAMD64_OpAMD64ADDWconst(v, config)
- case OpAMD64ANDB:
- return rewriteValueAMD64_OpAMD64ANDB(v, config)
- case OpAMD64ANDBconst:
- return rewriteValueAMD64_OpAMD64ANDBconst(v, config)
case OpAMD64ANDL:
return rewriteValueAMD64_OpAMD64ANDL(v, config)
case OpAMD64ANDLconst:
return rewriteValueAMD64_OpAMD64ANDQ(v, config)
case OpAMD64ANDQconst:
return rewriteValueAMD64_OpAMD64ANDQconst(v, config)
- case OpAMD64ANDW:
- return rewriteValueAMD64_OpAMD64ANDW(v, config)
- case OpAMD64ANDWconst:
- return rewriteValueAMD64_OpAMD64ANDWconst(v, config)
case OpAdd16:
return rewriteValueAMD64_OpAdd16(v, config)
case OpAdd32:
return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config)
case OpAMD64MOVWstoreidx2:
return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
- case OpAMD64MULB:
- return rewriteValueAMD64_OpAMD64MULB(v, config)
- case OpAMD64MULBconst:
- return rewriteValueAMD64_OpAMD64MULBconst(v, config)
case OpAMD64MULL:
return rewriteValueAMD64_OpAMD64MULL(v, config)
case OpAMD64MULLconst:
return rewriteValueAMD64_OpAMD64MULQ(v, config)
case OpAMD64MULQconst:
return rewriteValueAMD64_OpAMD64MULQconst(v, config)
- case OpAMD64MULW:
- return rewriteValueAMD64_OpAMD64MULW(v, config)
- case OpAMD64MULWconst:
- return rewriteValueAMD64_OpAMD64MULWconst(v, config)
case OpMod16:
return rewriteValueAMD64_OpMod16(v, config)
case OpMod16u:
return rewriteValueAMD64_OpMul64F(v, config)
case OpMul8:
return rewriteValueAMD64_OpMul8(v, config)
- case OpAMD64NEGB:
- return rewriteValueAMD64_OpAMD64NEGB(v, config)
case OpAMD64NEGL:
return rewriteValueAMD64_OpAMD64NEGL(v, config)
case OpAMD64NEGQ:
return rewriteValueAMD64_OpAMD64NEGQ(v, config)
- case OpAMD64NEGW:
- return rewriteValueAMD64_OpAMD64NEGW(v, config)
- case OpAMD64NOTB:
- return rewriteValueAMD64_OpAMD64NOTB(v, config)
case OpAMD64NOTL:
return rewriteValueAMD64_OpAMD64NOTL(v, config)
case OpAMD64NOTQ:
return rewriteValueAMD64_OpAMD64NOTQ(v, config)
- case OpAMD64NOTW:
- return rewriteValueAMD64_OpAMD64NOTW(v, config)
case OpNeg16:
return rewriteValueAMD64_OpNeg16(v, config)
case OpNeg32:
return rewriteValueAMD64_OpNilCheck(v, config)
case OpNot:
return rewriteValueAMD64_OpNot(v, config)
- case OpAMD64ORB:
- return rewriteValueAMD64_OpAMD64ORB(v, config)
- case OpAMD64ORBconst:
- return rewriteValueAMD64_OpAMD64ORBconst(v, config)
case OpAMD64ORL:
return rewriteValueAMD64_OpAMD64ORL(v, config)
case OpAMD64ORLconst:
return rewriteValueAMD64_OpAMD64ORQ(v, config)
case OpAMD64ORQconst:
return rewriteValueAMD64_OpAMD64ORQconst(v, config)
- case OpAMD64ORW:
- return rewriteValueAMD64_OpAMD64ORW(v, config)
- case OpAMD64ORWconst:
- return rewriteValueAMD64_OpAMD64ORWconst(v, config)
case OpOffPtr:
return rewriteValueAMD64_OpOffPtr(v, config)
case OpOr16:
return rewriteValueAMD64_OpAMD64SETLE(v, config)
case OpAMD64SETNE:
return rewriteValueAMD64_OpAMD64SETNE(v, config)
- case OpAMD64SHLB:
- return rewriteValueAMD64_OpAMD64SHLB(v, config)
case OpAMD64SHLL:
return rewriteValueAMD64_OpAMD64SHLL(v, config)
case OpAMD64SHLQ:
return rewriteValueAMD64_OpAMD64SHLQ(v, config)
- case OpAMD64SHLW:
- return rewriteValueAMD64_OpAMD64SHLW(v, config)
case OpAMD64SHRB:
return rewriteValueAMD64_OpAMD64SHRB(v, config)
case OpAMD64SHRL:
return rewriteValueAMD64_OpAMD64SHRQ(v, config)
case OpAMD64SHRW:
return rewriteValueAMD64_OpAMD64SHRW(v, config)
- case OpAMD64SUBB:
- return rewriteValueAMD64_OpAMD64SUBB(v, config)
- case OpAMD64SUBBconst:
- return rewriteValueAMD64_OpAMD64SUBBconst(v, config)
case OpAMD64SUBL:
return rewriteValueAMD64_OpAMD64SUBL(v, config)
case OpAMD64SUBLconst:
return rewriteValueAMD64_OpAMD64SUBQ(v, config)
case OpAMD64SUBQconst:
return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
- case OpAMD64SUBW:
- return rewriteValueAMD64_OpAMD64SUBW(v, config)
- case OpAMD64SUBWconst:
- return rewriteValueAMD64_OpAMD64SUBWconst(v, config)
case OpSignExt16to32:
return rewriteValueAMD64_OpSignExt16to32(v, config)
case OpSignExt16to64:
return rewriteValueAMD64_OpTrunc64to32(v, config)
case OpTrunc64to8:
return rewriteValueAMD64_OpTrunc64to8(v, config)
- case OpAMD64XORB:
- return rewriteValueAMD64_OpAMD64XORB(v, config)
- case OpAMD64XORBconst:
- return rewriteValueAMD64_OpAMD64XORBconst(v, config)
case OpAMD64XORL:
return rewriteValueAMD64_OpAMD64XORL(v, config)
case OpAMD64XORLconst:
return rewriteValueAMD64_OpAMD64XORQ(v, config)
case OpAMD64XORQconst:
return rewriteValueAMD64_OpAMD64XORQconst(v, config)
- case OpAMD64XORW:
- return rewriteValueAMD64_OpAMD64XORW(v, config)
- case OpAMD64XORWconst:
- return rewriteValueAMD64_OpAMD64XORWconst(v, config)
case OpXor16:
return rewriteValueAMD64_OpXor16(v, config)
case OpXor32:
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ADDB x (MOVBconst [c]))
- // cond:
- // result: (ADDBconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64ADDBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ADDB (MOVBconst [c]) x)
- // cond:
- // result: (ADDBconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64ADDBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ADDB x (NEGB y))
- // cond:
- // result: (SUBB x y)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64NEGB {
- break
- }
- y := v_1.Args[0]
- v.reset(OpAMD64SUBB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64ADDBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ADDBconst [c] x)
- // cond: int8(c)==0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int8(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (ADDBconst [c] (MOVBconst [d]))
- // cond:
- // result: (MOVBconst [int64(int8(c+d))])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = int64(int8(c + d))
- return true
- }
- // match: (ADDBconst [c] (ADDBconst [d] x))
- // cond:
- // result: (ADDBconst [int64(int8(c+d))] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDBconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpAMD64ADDBconst)
- v.AuxInt = int64(int8(c + d))
- v.AddArg(x)
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool {
b := v.Block
_ = b
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ADDW x (MOVWconst [c]))
- // cond:
- // result: (ADDWconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64ADDWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ADDW (MOVWconst [c]) x)
- // cond:
- // result: (ADDWconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64ADDWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ADDW x (NEGW y))
- // cond:
- // result: (SUBW x y)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64NEGW {
- break
- }
- y := v_1.Args[0]
- v.reset(OpAMD64SUBW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64ADDWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ADDWconst [c] x)
- // cond: int16(c)==0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int16(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (ADDWconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [int64(int16(c+d))])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = int64(int16(c + d))
- return true
- }
- // match: (ADDWconst [c] (ADDWconst [d] x))
- // cond:
- // result: (ADDWconst [int64(int16(c+d))] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDWconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpAMD64ADDWconst)
- v.AuxInt = int64(int16(c + d))
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64ANDB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ANDB x (MOVLconst [c]))
- // cond:
- // result: (ANDBconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64ANDBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ANDB (MOVLconst [c]) x)
- // cond:
- // result: (ANDBconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64ANDBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ANDB x (MOVBconst [c]))
- // cond:
- // result: (ANDBconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64ANDBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ANDB (MOVBconst [c]) x)
- // cond:
- // result: (ANDBconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64ANDBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ANDB x x)
- // cond:
- // result: x
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64ANDBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ANDBconst [c] (ANDBconst [d] x))
- // cond:
- // result: (ANDBconst [c & d] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDBconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpAMD64ANDBconst)
- v.AuxInt = c & d
- v.AddArg(x)
- return true
- }
- // match: (ANDBconst [c] _)
- // cond: int8(c)==0
- // result: (MOVBconst [0])
- for {
- c := v.AuxInt
- if !(int8(c) == 0) {
- break
- }
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = 0
- return true
- }
- // match: (ANDBconst [c] x)
- // cond: int8(c)==-1
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int8(c) == -1) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (ANDBconst [c] (MOVBconst [d]))
- // cond:
- // result: (MOVBconst [c&d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = c & d
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool {
b := v.Block
_ = b
}
return false
}
-func rewriteValueAMD64_OpAMD64ANDW(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (ANDW x (MOVLconst [c]))
+ // match: (Add16 x y)
// cond:
- // result: (ANDWconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64ANDWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ANDW (MOVLconst [c]) x)
- // cond:
- // result: (ANDWconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64ANDWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ANDW x (MOVWconst [c]))
- // cond:
- // result: (ANDWconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64ANDWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ANDW (MOVWconst [c]) x)
- // cond:
- // result: (ANDWconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64ANDWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ANDW x x)
- // cond:
- // result: x
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64ANDWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ANDWconst [c] (ANDWconst [d] x))
- // cond:
- // result: (ANDWconst [c & d] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDWconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpAMD64ANDWconst)
- v.AuxInt = c & d
- v.AddArg(x)
- return true
- }
- // match: (ANDWconst [c] _)
- // cond: int16(c)==0
- // result: (MOVWconst [0])
- for {
- c := v.AuxInt
- if !(int16(c) == 0) {
- break
- }
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = 0
- return true
- }
- // match: (ANDWconst [c] x)
- // cond: int16(c)==-1
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int16(c) == -1) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (ANDWconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [c&d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = c & d
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add16 x y)
- // cond:
- // result: (ADDW x y)
+ // result: (ADDL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ADDW)
+ v.reset(OpAMD64ADDL)
v.AddArg(x)
v.AddArg(y)
return true
_ = b
// match: (Add8 x y)
// cond:
- // result: (ADDB x y)
+ // result: (ADDL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ADDB)
+ v.reset(OpAMD64ADDL)
v.AddArg(x)
v.AddArg(y)
return true
_ = b
// match: (And16 x y)
// cond:
- // result: (ANDW x y)
+ // result: (ANDL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDW)
+ v.reset(OpAMD64ANDL)
v.AddArg(x)
v.AddArg(y)
return true
_ = b
// match: (And8 x y)
// cond:
- // result: (ANDB x y)
+ // result: (ANDL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDB)
+ v.reset(OpAMD64ANDL)
v.AddArg(x)
v.AddArg(y)
return true
func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (CMPB x (MOVBconst [c]))
+ // match: (CMPB x (MOVLconst [c]))
// cond:
- // result: (CMPBconst x [c])
+ // result: (CMPBconst x [int64(int8(c))])
for {
x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
+ if v_1.Op != OpAMD64MOVLconst {
break
}
c := v_1.AuxInt
v.reset(OpAMD64CMPBconst)
v.AddArg(x)
- v.AuxInt = c
+ v.AuxInt = int64(int8(c))
return true
}
- // match: (CMPB (MOVBconst [c]) x)
+ // match: (CMPB (MOVLconst [c]) x)
// cond:
- // result: (InvertFlags (CMPBconst x [c]))
+ // result: (InvertFlags (CMPBconst x [int64(int8(c))]))
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
c := v_0.AuxInt
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
v0.AddArg(x)
- v0.AuxInt = c
+ v0.AuxInt = int64(int8(c))
v.AddArg(v0)
return true
}
func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (CMPBconst (MOVBconst [x]) [y])
+ // match: (CMPBconst (MOVLconst [x]) [y])
// cond: int8(x)==int8(y)
// result: (FlagEQ)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
v.reset(OpAMD64FlagEQ)
return true
}
- // match: (CMPBconst (MOVBconst [x]) [y])
+ // match: (CMPBconst (MOVLconst [x]) [y])
// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
// result: (FlagLT_ULT)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMPBconst (MOVBconst [x]) [y])
+ // match: (CMPBconst (MOVLconst [x]) [y])
// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
// result: (FlagLT_UGT)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
v.reset(OpAMD64FlagLT_UGT)
return true
}
- // match: (CMPBconst (MOVBconst [x]) [y])
+ // match: (CMPBconst (MOVLconst [x]) [y])
// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
// result: (FlagGT_ULT)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
v.reset(OpAMD64FlagGT_ULT)
return true
}
- // match: (CMPBconst (MOVBconst [x]) [y])
+ // match: (CMPBconst (MOVLconst [x]) [y])
// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
// result: (FlagGT_UGT)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
v.reset(OpAMD64FlagGT_UGT)
return true
}
- // match: (CMPBconst (ANDBconst _ [m]) [n])
+ // match: (CMPBconst (ANDLconst _ [m]) [n])
// cond: 0 <= int8(m) && int8(m) < int8(n)
// result: (FlagLT_ULT)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDBconst {
+ if v_0.Op != OpAMD64ANDLconst {
break
}
m := v_0.AuxInt
v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMPBconst (ANDB x y) [0])
+ // match: (CMPBconst (ANDL x y) [0])
// cond:
// result: (TESTB x y)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDB {
+ if v_0.Op != OpAMD64ANDL {
break
}
x := v_0.Args[0]
v.AddArg(y)
return true
}
- // match: (CMPBconst (ANDBconst [c] x) [0])
+ // match: (CMPBconst (ANDLconst [c] x) [0])
// cond:
- // result: (TESTBconst [c] x)
+ // result: (TESTBconst [int64(int8(c))] x)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDBconst {
+ if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
break
}
v.reset(OpAMD64TESTBconst)
- v.AuxInt = c
+ v.AuxInt = int64(int8(c))
v.AddArg(x)
return true
}
func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (CMPW x (MOVWconst [c]))
+ // match: (CMPW x (MOVLconst [c]))
// cond:
- // result: (CMPWconst x [c])
+ // result: (CMPWconst x [int64(int16(c))])
for {
x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
+ if v_1.Op != OpAMD64MOVLconst {
break
}
c := v_1.AuxInt
v.reset(OpAMD64CMPWconst)
v.AddArg(x)
- v.AuxInt = c
+ v.AuxInt = int64(int16(c))
return true
}
- // match: (CMPW (MOVWconst [c]) x)
+ // match: (CMPW (MOVLconst [c]) x)
// cond:
- // result: (InvertFlags (CMPWconst x [c]))
+ // result: (InvertFlags (CMPWconst x [int64(int16(c))]))
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
c := v_0.AuxInt
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
v0.AddArg(x)
- v0.AuxInt = c
+ v0.AuxInt = int64(int16(c))
v.AddArg(v0)
return true
}
func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (CMPWconst (MOVWconst [x]) [y])
+ // match: (CMPWconst (MOVLconst [x]) [y])
// cond: int16(x)==int16(y)
// result: (FlagEQ)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
v.reset(OpAMD64FlagEQ)
return true
}
- // match: (CMPWconst (MOVWconst [x]) [y])
+ // match: (CMPWconst (MOVLconst [x]) [y])
// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
// result: (FlagLT_ULT)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMPWconst (MOVWconst [x]) [y])
+ // match: (CMPWconst (MOVLconst [x]) [y])
// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
// result: (FlagLT_UGT)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
v.reset(OpAMD64FlagLT_UGT)
return true
}
- // match: (CMPWconst (MOVWconst [x]) [y])
+ // match: (CMPWconst (MOVLconst [x]) [y])
// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
// result: (FlagGT_ULT)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
v.reset(OpAMD64FlagGT_ULT)
return true
}
- // match: (CMPWconst (MOVWconst [x]) [y])
+ // match: (CMPWconst (MOVLconst [x]) [y])
// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
// result: (FlagGT_UGT)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
v.reset(OpAMD64FlagGT_UGT)
return true
}
- // match: (CMPWconst (ANDWconst _ [m]) [n])
+ // match: (CMPWconst (ANDLconst _ [m]) [n])
// cond: 0 <= int16(m) && int16(m) < int16(n)
// result: (FlagLT_ULT)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDWconst {
+ if v_0.Op != OpAMD64ANDLconst {
break
}
m := v_0.AuxInt
v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMPWconst (ANDW x y) [0])
+ // match: (CMPWconst (ANDL x y) [0])
// cond:
// result: (TESTW x y)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDW {
+ if v_0.Op != OpAMD64ANDL {
break
}
x := v_0.Args[0]
v.AddArg(y)
return true
}
- // match: (CMPWconst (ANDWconst [c] x) [0])
+ // match: (CMPWconst (ANDLconst [c] x) [0])
// cond:
- // result: (TESTWconst [c] x)
+ // result: (TESTWconst [int64(int16(c))] x)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDWconst {
+ if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
break
}
v.reset(OpAMD64TESTWconst)
- v.AuxInt = c
+ v.AuxInt = int64(int16(c))
v.AddArg(x)
return true
}
_ = b
// match: (Com16 x)
// cond:
- // result: (NOTW x)
+ // result: (NOTL x)
for {
x := v.Args[0]
- v.reset(OpAMD64NOTW)
+ v.reset(OpAMD64NOTL)
v.AddArg(x)
return true
}
_ = b
// match: (Com8 x)
// cond:
- // result: (NOTB x)
+ // result: (NOTL x)
for {
x := v.Args[0]
- v.reset(OpAMD64NOTB)
+ v.reset(OpAMD64NOTL)
v.AddArg(x)
return true
}
_ = b
// match: (Const16 [val])
// cond:
- // result: (MOVWconst [val])
+ // result: (MOVLconst [val])
for {
val := v.AuxInt
- v.reset(OpAMD64MOVWconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = val
return true
}
_ = b
// match: (Const8 [val])
// cond:
- // result: (MOVBconst [val])
+ // result: (MOVLconst [val])
for {
val := v.AuxInt
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = val
return true
}
_ = b
// match: (ConstBool [b])
// cond:
- // result: (MOVBconst [b])
+ // result: (MOVLconst [b])
for {
b := v.AuxInt
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = b
return true
}
_ = b
// match: (Lsh16x16 <t> x y)
// cond:
- // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDW)
- v0 := b.NewValue0(v.Line, OpAMD64SHLW, t)
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
v2.AddArg(y)
- v2.AuxInt = 16
+ v2.AuxInt = 32
v1.AddArg(v2)
v.AddArg(v1)
return true
_ = b
// match: (Lsh16x32 <t> x y)
// cond:
- // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDW)
- v0 := b.NewValue0(v.Line, OpAMD64SHLW, t)
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
v2.AddArg(y)
- v2.AuxInt = 16
+ v2.AuxInt = 32
v1.AddArg(v2)
v.AddArg(v1)
return true
_ = b
// match: (Lsh16x64 <t> x y)
// cond:
- // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDW)
- v0 := b.NewValue0(v.Line, OpAMD64SHLW, t)
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
v2.AddArg(y)
- v2.AuxInt = 16
+ v2.AuxInt = 32
v1.AddArg(v2)
v.AddArg(v1)
return true
_ = b
// match: (Lsh16x8 <t> x y)
// cond:
- // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDW)
- v0 := b.NewValue0(v.Line, OpAMD64SHLW, t)
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
v2.AddArg(y)
- v2.AuxInt = 16
+ v2.AuxInt = 32
v1.AddArg(v2)
v.AddArg(v1)
return true
_ = b
// match: (Lsh8x16 <t> x y)
// cond:
- // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDB)
- v0 := b.NewValue0(v.Line, OpAMD64SHLB, t)
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
v2.AddArg(y)
- v2.AuxInt = 8
+ v2.AuxInt = 32
v1.AddArg(v2)
v.AddArg(v1)
return true
_ = b
// match: (Lsh8x32 <t> x y)
// cond:
- // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDB)
- v0 := b.NewValue0(v.Line, OpAMD64SHLB, t)
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
v2.AddArg(y)
- v2.AuxInt = 8
+ v2.AuxInt = 32
v1.AddArg(v2)
v.AddArg(v1)
return true
_ = b
// match: (Lsh8x64 <t> x y)
// cond:
- // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDB)
- v0 := b.NewValue0(v.Line, OpAMD64SHLB, t)
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
v2.AddArg(y)
- v2.AuxInt = 8
+ v2.AuxInt = 32
v1.AddArg(v2)
v.AddArg(v1)
return true
_ = b
// match: (Lsh8x8 <t> x y)
// cond:
- // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDB)
- v0 := b.NewValue0(v.Line, OpAMD64SHLB, t)
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
v2.AddArg(y)
- v2.AuxInt = 8
+ v2.AuxInt = 32
v1.AddArg(v2)
v.AddArg(v1)
return true
v0.AddArg(mem)
return true
}
- // match: (MOVBQSX (ANDBconst [c] x))
+ // match: (MOVBQSX (ANDLconst [c] x))
// cond: c & 0x80 == 0
- // result: (ANDQconst [c & 0x7f] x)
+ // result: (ANDLconst [c & 0x7f] x)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDBconst {
+ if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
if !(c&0x80 == 0) {
break
}
- v.reset(OpAMD64ANDQconst)
+ v.reset(OpAMD64ANDLconst)
v.AuxInt = c & 0x7f
v.AddArg(x)
return true
v0.AddArg(mem)
return true
}
- // match: (MOVBQZX (ANDBconst [c] x))
+ // match: (MOVBQZX (ANDLconst [c] x))
// cond:
- // result: (ANDQconst [c & 0xff] x)
+ // result: (ANDLconst [c & 0xff] x)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDBconst {
+ if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- v.reset(OpAMD64ANDQconst)
+ v.reset(OpAMD64ANDLconst)
v.AuxInt = c & 0xff
v.AddArg(x)
return true
v.AddArg(mem)
return true
}
- // match: (MOVBstore [off] {sym} ptr (MOVBconst [c]) mem)
+ // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
// cond: validOff(off)
// result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
for {
sym := v.Aux
ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
+ if v_1.Op != OpAMD64MOVLconst {
break
}
c := v_1.AuxInt
}
// match: (MOVLQSX (ANDLconst [c] x))
// cond: c & 0x80000000 == 0
- // result: (ANDQconst [c & 0x7fffffff] x)
+ // result: (ANDLconst [c & 0x7fffffff] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
if !(c&0x80000000 == 0) {
break
}
- v.reset(OpAMD64ANDQconst)
+ v.reset(OpAMD64ANDLconst)
v.AuxInt = c & 0x7fffffff
v.AddArg(x)
return true
return true
}
// match: (MOVLQZX (ANDLconst [c] x))
- // cond: c & 0x80000000 == 0
- // result: (ANDQconst [c & 0x7fffffff] x)
+ // cond:
+ // result: (ANDLconst [c] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
}
c := v_0.AuxInt
x := v_0.Args[0]
- if !(c&0x80000000 == 0) {
- break
- }
- v.reset(OpAMD64ANDQconst)
- v.AuxInt = c & 0x7fffffff
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = c
v.AddArg(x)
return true
}
v0.AddArg(mem)
return true
}
- // match: (MOVWQSX (ANDWconst [c] x))
+ // match: (MOVWQSX (ANDLconst [c] x))
// cond: c & 0x8000 == 0
- // result: (ANDQconst [c & 0x7fff] x)
+ // result: (ANDLconst [c & 0x7fff] x)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDWconst {
+ if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
if !(c&0x8000 == 0) {
break
}
- v.reset(OpAMD64ANDQconst)
+ v.reset(OpAMD64ANDLconst)
v.AuxInt = c & 0x7fff
v.AddArg(x)
return true
v0.AddArg(mem)
return true
}
- // match: (MOVWQZX (ANDWconst [c] x))
+ // match: (MOVWQZX (ANDLconst [c] x))
// cond:
- // result: (ANDQconst [c & 0xffff] x)
+ // result: (ANDLconst [c & 0xffff] x)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDWconst {
+ if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- v.reset(OpAMD64ANDQconst)
+ v.reset(OpAMD64ANDLconst)
v.AuxInt = c & 0xffff
v.AddArg(x)
return true
v.AddArg(mem)
return true
}
- // match: (MOVWstore [off] {sym} ptr (MOVWconst [c]) mem)
+ // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
// cond: validOff(off)
// result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
for {
sym := v.Aux
ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
+ if v_1.Op != OpAMD64MOVLconst {
break
}
c := v_1.AuxInt
}
return false
}
-func rewriteValueAMD64_OpAMD64MULB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MULB x (MOVBconst [c]))
- // cond:
- // result: (MULBconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64MULBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (MULB (MOVBconst [c]) x)
- // cond:
- // result: (MULBconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64MULBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64MULBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MULBconst [c] (MOVBconst [d]))
- // cond:
- // result: (MOVBconst [int64(int8(c*d))])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = int64(int8(c * d))
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool {
b := v.Block
_ = b
}
return false
}
-func rewriteValueAMD64_OpAMD64MULW(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MULW x (MOVWconst [c]))
+ // match: (Mod16 x y)
// cond:
- // result: (MULWconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64MULWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (MULW (MOVWconst [c]) x)
- // cond:
- // result: (MULWconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64MULWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64MULWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MULWconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [int64(int16(c*d))])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = int64(int16(c * d))
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16 x y)
- // cond:
- // result: (MODW x y)
+ // result: (MODW x y)
for {
x := v.Args[0]
y := v.Args[1]
_ = b
// match: (Mul16 x y)
// cond:
- // result: (MULW x y)
+ // result: (MULL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64MULW)
+ v.reset(OpAMD64MULL)
v.AddArg(x)
v.AddArg(y)
return true
_ = b
// match: (Mul8 x y)
// cond:
- // result: (MULB x y)
+ // result: (MULL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64MULB)
+ v.reset(OpAMD64MULL)
v.AddArg(x)
v.AddArg(y)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64NEGB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NEGB (MOVBconst [c]))
- // cond:
- // result: (MOVBconst [int64(int8(-c))])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
- break
- }
- c := v_0.AuxInt
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = int64(int8(-c))
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
b := v.Block
_ = b
}
return false
}
-func rewriteValueAMD64_OpAMD64NEGW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NEGW (MOVWconst [c]))
- // cond:
- // result: (MOVWconst [int64(int16(-c))])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- c := v_0.AuxInt
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = int64(int16(-c))
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64NOTB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NOTB (MOVBconst [c]))
- // cond:
- // result: (MOVBconst [^c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
- break
- }
- c := v_0.AuxInt
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = ^c
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool {
b := v.Block
_ = b
}
return false
}
-func rewriteValueAMD64_OpAMD64NOTW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NOTW (MOVWconst [c]))
- // cond:
- // result: (MOVWconst [^c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- c := v_0.AuxInt
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = ^c
- return true
- }
- return false
-}
func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neg16 x)
// cond:
- // result: (NEGW x)
+ // result: (NEGL x)
for {
x := v.Args[0]
- v.reset(OpAMD64NEGW)
+ v.reset(OpAMD64NEGL)
v.AddArg(x)
return true
}
_ = b
// match: (Neg8 x)
// cond:
- // result: (NEGB x)
+ // result: (NEGL x)
for {
x := v.Args[0]
- v.reset(OpAMD64NEGB)
+ v.reset(OpAMD64NEGL)
v.AddArg(x)
return true
}
_ = b
// match: (Not x)
// cond:
- // result: (XORBconst [1] x)
+ // result: (XORLconst [1] x)
for {
x := v.Args[0]
- v.reset(OpAMD64XORBconst)
+ v.reset(OpAMD64XORLconst)
v.AuxInt = 1
v.AddArg(x)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64ORB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (ORB x (MOVBconst [c]))
+ // match: (ORL x (MOVLconst [c]))
// cond:
- // result: (ORBconst [c] x)
+ // result: (ORLconst [c] x)
for {
x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
+ if v_1.Op != OpAMD64MOVLconst {
break
}
c := v_1.AuxInt
- v.reset(OpAMD64ORBconst)
+ v.reset(OpAMD64ORLconst)
v.AuxInt = c
v.AddArg(x)
return true
}
- // match: (ORB (MOVBconst [c]) x)
+ // match: (ORL (MOVLconst [c]) x)
// cond:
- // result: (ORBconst [c] x)
+ // result: (ORLconst [c] x)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- v.reset(OpAMD64ORBconst)
+ v.reset(OpAMD64ORLconst)
v.AuxInt = c
v.AddArg(x)
return true
}
- // match: (ORB x x)
+ // match: (ORL x x)
// cond:
// result: x
for {
v.AddArg(x)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ORBconst [c] x)
- // cond: int8(c)==0
- // result: x
+ // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int8(c) == 0) {
+ x0 := v.Args[0]
+ if x0.Op != OpAMD64MOVBload {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (ORBconst [c] _)
- // cond: int8(c)==-1
- // result: (MOVBconst [-1])
- for {
- c := v.AuxInt
- if !(int8(c) == -1) {
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ mem := x0.Args[1]
+ s0 := v.Args[1]
+ if s0.Op != OpAMD64SHLLconst {
break
}
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = -1
- return true
- }
- // match: (ORBconst [c] (MOVBconst [d]))
- // cond:
- // result: (MOVBconst [c|d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
+ if s0.AuxInt != 8 {
break
}
- d := v_0.AuxInt
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = c | d
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ORL x (MOVLconst [c]))
- // cond:
- // result: (ORLconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ x1 := s0.Args[0]
+ if x1.Op != OpAMD64MOVBload {
break
}
- c := v_1.AuxInt
- v.reset(OpAMD64ORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ORL (MOVLconst [c]) x)
- // cond:
- // result: (ORLconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ if x1.AuxInt != i+1 {
break
}
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64ORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ORL x x)
- // cond:
- // result: x
- for {
- x := v.Args[0]
- if x != v.Args[1] {
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if mem != x1.Args[1] {
+ break
+ }
+ if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
break
}
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.AddArg(v0)
+ v0.AuxInt = i
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(mem)
return true
}
// match: (ORL o0:(ORL o1:(ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem)))
v0.AddArg(mem)
return true
}
- // match: (ORL o0:(ORL o1:(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))
- // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)
- // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
+ // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
+ // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
for {
- o0 := v.Args[0]
- if o0.Op != OpAMD64ORL {
- break
- }
- o1 := o0.Args[0]
- if o1.Op != OpAMD64ORL {
- break
- }
- x0 := o1.Args[0]
+ x0 := v.Args[0]
if x0.Op != OpAMD64MOVBloadidx1 {
break
}
p := x0.Args[0]
idx := x0.Args[1]
mem := x0.Args[2]
- s0 := o1.Args[1]
+ s0 := v.Args[1]
if s0.Op != OpAMD64SHLLconst {
break
}
if mem != x1.Args[2] {
break
}
- s1 := o0.Args[1]
- if s1.Op != OpAMD64SHLLconst {
- break
- }
- if s1.AuxInt != 16 {
- break
- }
- x2 := s1.Args[0]
- if x2.Op != OpAMD64MOVBloadidx1 {
+ if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
break
}
- if x2.AuxInt != i+2 {
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (ORL o0:(ORL o1:(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
+ for {
+ o0 := v.Args[0]
+ if o0.Op != OpAMD64ORL {
break
}
- if x2.Aux != s {
+ o1 := o0.Args[0]
+ if o1.Op != OpAMD64ORL {
break
}
- if p != x2.Args[0] {
+ x0 := o1.Args[0]
+ if x0.Op != OpAMD64MOVBloadidx1 {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ idx := x0.Args[1]
+ mem := x0.Args[2]
+ s0 := o1.Args[1]
+ if s0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpAMD64MOVBloadidx1 {
+ break
+ }
+ if x1.AuxInt != i+1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if idx != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ s1 := o0.Args[1]
+ if s1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if s1.AuxInt != 16 {
+ break
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpAMD64MOVBloadidx1 {
+ break
+ }
+ if x2.AuxInt != i+2 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
break
}
if idx != x2.Args[1] {
}
return false
}
-func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ORW x (MOVWconst [c]))
- // cond:
- // result: (ORWconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64ORWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ORW (MOVWconst [c]) x)
- // cond:
- // result: (ORWconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64ORWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ORW x x)
- // cond:
- // result: x
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (ORW x0:(MOVBload [i] {s} p mem) s0:(SHLWconst [8] x1:(MOVBload [i+1] {s} p mem)))
- // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
- // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
- for {
- x0 := v.Args[0]
- if x0.Op != OpAMD64MOVBload {
- break
- }
- i := x0.AuxInt
- s := x0.Aux
- p := x0.Args[0]
- mem := x0.Args[1]
- s0 := v.Args[1]
- if s0.Op != OpAMD64SHLWconst {
- break
- }
- if s0.AuxInt != 8 {
- break
- }
- x1 := s0.Args[0]
- if x1.Op != OpAMD64MOVBload {
- break
- }
- if x1.AuxInt != i+1 {
- break
- }
- if x1.Aux != s {
- break
- }
- if p != x1.Args[0] {
- break
- }
- if mem != x1.Args[1] {
- break
- }
- if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
- break
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = i
- v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(mem)
- return true
- }
- // match: (ORW x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
- // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
- // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
- for {
- x0 := v.Args[0]
- if x0.Op != OpAMD64MOVBloadidx1 {
- break
- }
- i := x0.AuxInt
- s := x0.Aux
- p := x0.Args[0]
- idx := x0.Args[1]
- mem := x0.Args[2]
- s0 := v.Args[1]
- if s0.Op != OpAMD64SHLWconst {
- break
- }
- if s0.AuxInt != 8 {
- break
- }
- x1 := s0.Args[0]
- if x1.Op != OpAMD64MOVBloadidx1 {
- break
- }
- if x1.AuxInt != i+1 {
- break
- }
- if x1.Aux != s {
- break
- }
- if p != x1.Args[0] {
- break
- }
- if idx != x1.Args[1] {
- break
- }
- if mem != x1.Args[2] {
- break
- }
- if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
- break
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = i
- v0.Aux = s
- v0.AddArg(p)
- v0.AddArg(idx)
- v0.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64ORWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ORWconst [c] x)
- // cond: int16(c)==0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int16(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (ORWconst [c] _)
- // cond: int16(c)==-1
- // result: (MOVWconst [-1])
- for {
- c := v.AuxInt
- if !(int16(c) == -1) {
- break
- }
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = -1
- return true
- }
- // match: (ORWconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [c|d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = c | d
- return true
- }
- return false
-}
func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
_ = b
// match: (Or16 x y)
// cond:
- // result: (ORW x y)
+ // result: (ORL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ORW)
+ v.reset(OpAMD64ORL)
v.AddArg(x)
v.AddArg(y)
return true
_ = b
// match: (Or8 x y)
// cond:
- // result: (ORB x y)
+ // result: (ORL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ORB)
+ v.reset(OpAMD64ORL)
v.AddArg(x)
v.AddArg(y)
return true
_ = b
// match: (Rsh16Ux16 <t> x y)
// cond:
- // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDW)
+ v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
v0.AddArg(x)
v0.AddArg(y)
_ = b
// match: (Rsh16Ux32 <t> x y)
// cond:
- // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDW)
+ v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
v0.AddArg(x)
v0.AddArg(y)
_ = b
// match: (Rsh16Ux64 <t> x y)
// cond:
- // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDW)
+ v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
v0.AddArg(x)
v0.AddArg(y)
_ = b
// match: (Rsh16Ux8 <t> x y)
// cond:
- // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDW)
+ v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
v0.AddArg(x)
v0.AddArg(y)
_ = b
// match: (Rsh16x16 <t> x y)
// cond:
- // result: (SARW <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
for {
t := v.Type
x := v.Args[0]
v.reset(OpAMD64SARW)
v.Type = t
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
_ = b
// match: (Rsh16x8 <t> x y)
// cond:
- // result: (SARW <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
for {
t := v.Type
x := v.Args[0]
v.reset(OpAMD64SARW)
v.Type = t
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
_ = b
// match: (Rsh32x16 <t> x y)
// cond:
- // result: (SARL <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
for {
t := v.Type
x := v.Args[0]
v.reset(OpAMD64SARL)
v.Type = t
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
_ = b
// match: (Rsh32x8 <t> x y)
// cond:
- // result: (SARL <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
for {
t := v.Type
x := v.Args[0]
v.reset(OpAMD64SARL)
v.Type = t
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
_ = b
// match: (Rsh64x16 <t> x y)
// cond:
- // result: (SARQ <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
for {
t := v.Type
x := v.Args[0]
v.reset(OpAMD64SARQ)
v.Type = t
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
_ = b
// match: (Rsh64x8 <t> x y)
// cond:
- // result: (SARQ <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
for {
t := v.Type
x := v.Args[0]
v.reset(OpAMD64SARQ)
v.Type = t
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
_ = b
// match: (Rsh8Ux16 <t> x y)
// cond:
- // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDB)
+ v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
v0.AddArg(x)
v0.AddArg(y)
_ = b
// match: (Rsh8Ux32 <t> x y)
// cond:
- // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDB)
+ v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
v0.AddArg(x)
v0.AddArg(y)
_ = b
// match: (Rsh8Ux64 <t> x y)
// cond:
- // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDB)
+ v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
v0.AddArg(x)
v0.AddArg(y)
_ = b
// match: (Rsh8Ux8 <t> x y)
// cond:
- // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64ANDB)
+ v.reset(OpAMD64ANDL)
v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
v0.AddArg(x)
v0.AddArg(y)
_ = b
// match: (Rsh8x16 <t> x y)
// cond:
- // result: (SARB <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
for {
t := v.Type
x := v.Args[0]
v.reset(OpAMD64SARB)
v.Type = t
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
_ = b
// match: (Rsh8x8 <t> x y)
// cond:
- // result: (SARB <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
for {
t := v.Type
x := v.Args[0]
v.reset(OpAMD64SARB)
v.Type = t
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
v0.AddArg(y)
v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
v.AddArg(x)
return true
}
- // match: (SARB x (MOVWconst [c]))
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SARBconst [c] (MOVQconst [d]))
// cond:
- // result: (SARBconst [c&31] x)
+ // result: (MOVQconst [d>>uint64(c)])
for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SARBconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SARB x (MOVBconst [c]))
- // cond:
- // result: (SARBconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SARBconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SARB x (ANDBconst [31] y))
- // cond:
- // result: (SARB x y)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDBconst {
- break
- }
- if v_1.AuxInt != 31 {
- break
- }
- y := v_1.Args[0]
- v.reset(OpAMD64SARB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SARBconst [c] (MOVQconst [d]))
- // cond:
- // result: (MOVQconst [d>>uint64(c)])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
break
}
d := v_0.AuxInt
v.AddArg(x)
return true
}
- // match: (SARL x (MOVWconst [c]))
- // cond:
- // result: (SARLconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SARLconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SARL x (MOVBconst [c]))
- // cond:
- // result: (SARLconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SARLconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
// match: (SARL x (ANDLconst [31] y))
// cond:
// result: (SARL x y)
v.AddArg(x)
return true
}
- // match: (SARQ x (MOVWconst [c]))
- // cond:
- // result: (SARQconst [c&63] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SARQconst)
- v.AuxInt = c & 63
- v.AddArg(x)
- return true
- }
- // match: (SARQ x (MOVBconst [c]))
- // cond:
- // result: (SARQconst [c&63] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SARQconst)
- v.AuxInt = c & 63
- v.AddArg(x)
- return true
- }
// match: (SARQ x (ANDQconst [63] y))
// cond:
// result: (SARQ x y)
v.AddArg(x)
return true
}
- // match: (SARW x (MOVWconst [c]))
- // cond:
- // result: (SARWconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SARWconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SARW x (MOVBconst [c]))
- // cond:
- // result: (SARWconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SARWconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SARW x (ANDWconst [31] y))
- // cond:
- // result: (SARW x y)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDWconst {
- break
- }
- if v_1.AuxInt != 31 {
- break
- }
- y := v_1.Args[0]
- v.reset(OpAMD64SARW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool {
}
// match: (SETA (FlagEQ))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETA (FlagLT_ULT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETA (FlagLT_UGT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETA (FlagGT_ULT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETA (FlagGT_UGT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
}
// match: (SETAE (FlagEQ))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETAE (FlagLT_ULT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETAE (FlagLT_UGT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETAE (FlagGT_ULT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETAE (FlagGT_UGT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
}
// match: (SETB (FlagEQ))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETB (FlagLT_ULT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETB (FlagLT_UGT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETB (FlagGT_ULT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETB (FlagGT_UGT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
}
// match: (SETBE (FlagEQ))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETBE (FlagLT_ULT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETBE (FlagLT_UGT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETBE (FlagGT_ULT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETBE (FlagGT_UGT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
}
// match: (SETEQ (FlagEQ))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETEQ (FlagLT_ULT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETEQ (FlagLT_UGT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETEQ (FlagGT_ULT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETEQ (FlagGT_UGT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
}
// match: (SETG (FlagEQ))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETG (FlagLT_ULT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETG (FlagLT_UGT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETG (FlagGT_ULT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETG (FlagGT_UGT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
}
// match: (SETGE (FlagEQ))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETGE (FlagLT_ULT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETGE (FlagLT_UGT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETGE (FlagGT_ULT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETGE (FlagGT_UGT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
}
// match: (SETL (FlagEQ))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETL (FlagLT_ULT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETL (FlagLT_UGT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETL (FlagGT_ULT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETL (FlagGT_UGT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
}
// match: (SETLE (FlagEQ))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETLE (FlagLT_ULT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETLE (FlagLT_UGT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETLE (FlagGT_ULT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETLE (FlagGT_UGT))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
}
// match: (SETNE (FlagEQ))
// cond:
- // result: (MOVBconst [0])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
// match: (SETNE (FlagLT_ULT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETNE (FlagLT_UGT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETNE (FlagGT_ULT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
// match: (SETNE (FlagGT_UGT))
// cond:
- // result: (MOVBconst [1])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64MOVBconst)
+ v.reset(OpAMD64MOVLconst)
v.AuxInt = 1
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SHLB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (SHLB x (MOVQconst [c]))
+ // match: (SHLL x (MOVQconst [c]))
// cond:
- // result: (SHLBconst [c&31] x)
+ // result: (SHLLconst [c&31] x)
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
c := v_1.AuxInt
- v.reset(OpAMD64SHLBconst)
+ v.reset(OpAMD64SHLLconst)
v.AuxInt = c & 31
v.AddArg(x)
return true
}
- // match: (SHLB x (MOVLconst [c]))
+ // match: (SHLL x (MOVLconst [c]))
// cond:
- // result: (SHLBconst [c&31] x)
+ // result: (SHLLconst [c&31] x)
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
c := v_1.AuxInt
- v.reset(OpAMD64SHLBconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHLB x (MOVWconst [c]))
- // cond:
- // result: (SHLBconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLBconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHLB x (MOVBconst [c]))
- // cond:
- // result: (SHLBconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLBconst)
+ v.reset(OpAMD64SHLLconst)
v.AuxInt = c & 31
v.AddArg(x)
return true
}
- // match: (SHLB x (ANDBconst [31] y))
+ // match: (SHLL x (ANDLconst [31] y))
// cond:
- // result: (SHLB x y)
+ // result: (SHLL x y)
for {
x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDBconst {
+ if v_1.Op != OpAMD64ANDLconst {
break
}
if v_1.AuxInt != 31 {
break
}
y := v_1.Args[0]
- v.reset(OpAMD64SHLB)
+ v.reset(OpAMD64SHLL)
v.AddArg(x)
v.AddArg(y)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (SHLL x (MOVQconst [c]))
+ // match: (SHLQ x (MOVQconst [c]))
// cond:
- // result: (SHLLconst [c&31] x)
+ // result: (SHLQconst [c&63] x)
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
c := v_1.AuxInt
- v.reset(OpAMD64SHLLconst)
- v.AuxInt = c & 31
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = c & 63
v.AddArg(x)
return true
}
- // match: (SHLL x (MOVLconst [c]))
- // cond:
- // result: (SHLLconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLLconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHLL x (MOVWconst [c]))
- // cond:
- // result: (SHLLconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLLconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHLL x (MOVBconst [c]))
- // cond:
- // result: (SHLLconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLLconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHLL x (ANDLconst [31] y))
- // cond:
- // result: (SHLL x y)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDLconst {
- break
- }
- if v_1.AuxInt != 31 {
- break
- }
- y := v_1.Args[0]
- v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SHLQ x (MOVQconst [c]))
- // cond:
- // result: (SHLQconst [c&63] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLQconst)
- v.AuxInt = c & 63
- v.AddArg(x)
- return true
- }
- // match: (SHLQ x (MOVLconst [c]))
+ // match: (SHLQ x (MOVLconst [c]))
// cond:
// result: (SHLQconst [c&63] x)
for {
v.AddArg(x)
return true
}
- // match: (SHLQ x (MOVWconst [c]))
- // cond:
- // result: (SHLQconst [c&63] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLQconst)
- v.AuxInt = c & 63
- v.AddArg(x)
- return true
- }
- // match: (SHLQ x (MOVBconst [c]))
- // cond:
- // result: (SHLQconst [c&63] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLQconst)
- v.AuxInt = c & 63
- v.AddArg(x)
- return true
- }
// match: (SHLQ x (ANDQconst [63] y))
// cond:
// result: (SHLQ x y)
}
return false
}
-func rewriteValueAMD64_OpAMD64SHLW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SHLW x (MOVQconst [c]))
- // cond:
- // result: (SHLWconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLWconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHLW x (MOVLconst [c]))
- // cond:
- // result: (SHLWconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLWconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHLW x (MOVWconst [c]))
- // cond:
- // result: (SHLWconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLWconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHLW x (MOVBconst [c]))
- // cond:
- // result: (SHLWconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHLWconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHLW x (ANDWconst [31] y))
- // cond:
- // result: (SHLW x y)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDWconst {
- break
- }
- if v_1.AuxInt != 31 {
- break
- }
- y := v_1.Args[0]
- v.reset(OpAMD64SHLW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool {
b := v.Block
_ = b
v.AddArg(x)
return true
}
- // match: (SHRB x (MOVWconst [c]))
- // cond:
- // result: (SHRBconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHRBconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHRB x (MOVBconst [c]))
- // cond:
- // result: (SHRBconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHRBconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHRB x (ANDBconst [31] y))
- // cond:
- // result: (SHRB x y)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDBconst {
- break
- }
- if v_1.AuxInt != 31 {
- break
- }
- y := v_1.Args[0]
- v.reset(OpAMD64SHRB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
- // match: (SHRL x (MOVWconst [c]))
- // cond:
- // result: (SHRLconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHRLconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHRL x (MOVBconst [c]))
- // cond:
- // result: (SHRLconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHRLconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
// match: (SHRL x (ANDLconst [31] y))
// cond:
// result: (SHRL x y)
// result: (SHRQconst [c&63] x)
for {
x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHRQconst)
- v.AuxInt = c & 63
- v.AddArg(x)
- return true
- }
- // match: (SHRQ x (MOVWconst [c]))
- // cond:
- // result: (SHRQconst [c&63] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHRQconst)
- v.AuxInt = c & 63
- v.AddArg(x)
- return true
- }
- // match: (SHRQ x (MOVBconst [c]))
- // cond:
- // result: (SHRQconst [c&63] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHRQconst)
- v.AuxInt = c & 63
- v.AddArg(x)
- return true
- }
- // match: (SHRQ x (ANDQconst [63] y))
- // cond:
- // result: (SHRQ x y)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDQconst {
- break
- }
- if v_1.AuxInt != 63 {
- break
- }
- y := v_1.Args[0]
- v.reset(OpAMD64SHRQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SHRW x (MOVQconst [c]))
- // cond:
- // result: (SHRWconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHRWconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHRW x (MOVLconst [c]))
- // cond:
- // result: (SHRWconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHRWconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHRW x (MOVWconst [c]))
- // cond:
- // result: (SHRWconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHRWconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHRW x (MOVBconst [c]))
- // cond:
- // result: (SHRWconst [c&31] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SHRWconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHRW x (ANDWconst [31] y))
- // cond:
- // result: (SHRW x y)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDWconst {
- break
- }
- if v_1.AuxInt != 31 {
- break
- }
- y := v_1.Args[0]
- v.reset(OpAMD64SHRW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SUBB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBB x (MOVBconst [c]))
- // cond:
- // result: (SUBBconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SUBBconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (SUBB (MOVBconst [c]) x)
- // cond:
- // result: (NEGB (SUBBconst <v.Type> x [c]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64NEGB)
- v0 := b.NewValue0(v.Line, OpAMD64SUBBconst, v.Type)
- v0.AddArg(x)
- v0.AuxInt = c
- v.AddArg(v0)
- return true
- }
- // match: (SUBB x x)
- // cond:
- // result: (MOVBconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SUBBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBBconst [c] x)
- // cond: int8(c) == 0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int8(c) == 0) {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
+ c := v_1.AuxInt
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = c & 63
v.AddArg(x)
return true
}
- // match: (SUBBconst [c] x)
+ // match: (SHRQ x (ANDQconst [63] y))
// cond:
- // result: (ADDBconst [int64(int8(-c))] x)
+ // result: (SHRQ x y)
for {
- c := v.AuxInt
x := v.Args[0]
- v.reset(OpAMD64ADDBconst)
- v.AuxInt = int64(int8(-c))
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ if v_1.AuxInt != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SHRQ)
v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (SUBBconst (MOVBconst [d]) [c])
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SHRW x (MOVQconst [c]))
// cond:
- // result: (MOVBconst [int64(int8(d-c))])
+ // result: (SHRWconst [c&31] x)
for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
- c := v.AuxInt
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = int64(int8(d - c))
+ c := v_1.AuxInt
+ v.reset(OpAMD64SHRWconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
return true
}
- // match: (SUBBconst (SUBBconst x [d]) [c])
+ // match: (SHRW x (MOVLconst [c]))
// cond:
- // result: (ADDBconst [int64(int8(-c-d))] x)
+ // result: (SHRWconst [c&31] x)
for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SUBBconst {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- x := v_0.Args[0]
- d := v_0.AuxInt
- c := v.AuxInt
- v.reset(OpAMD64ADDBconst)
- v.AuxInt = int64(int8(-c - d))
+ c := v_1.AuxInt
+ v.reset(OpAMD64SHRWconst)
+ v.AuxInt = c & 31
v.AddArg(x)
return true
}
}
return false
}
-func rewriteValueAMD64_OpAMD64SUBW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBW x (MOVWconst [c]))
- // cond:
- // result: (SUBWconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SUBWconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (SUBW (MOVWconst [c]) x)
- // cond:
- // result: (NEGW (SUBWconst <v.Type> x [c]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64NEGW)
- v0 := b.NewValue0(v.Line, OpAMD64SUBWconst, v.Type)
- v0.AddArg(x)
- v0.AuxInt = c
- v.AddArg(v0)
- return true
- }
- // match: (SUBW x x)
- // cond:
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SUBWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBWconst [c] x)
- // cond: int16(c) == 0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int16(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (SUBWconst [c] x)
- // cond:
- // result: (ADDWconst [int64(int16(-c))] x)
- for {
- c := v.AuxInt
- x := v.Args[0]
- v.reset(OpAMD64ADDWconst)
- v.AuxInt = int64(int16(-c))
- v.AddArg(x)
- return true
- }
- // match: (SUBWconst (MOVWconst [d]) [c])
- // cond:
- // result: (MOVWconst [int64(int16(d-c))])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- d := v_0.AuxInt
- c := v.AuxInt
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = int64(int16(d - c))
- return true
- }
- // match: (SUBWconst (SUBWconst x [d]) [c])
- // cond:
- // result: (ADDWconst [int64(int16(-c-d))] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SUBWconst {
- break
- }
- x := v_0.Args[0]
- d := v_0.AuxInt
- c := v.AuxInt
- v.reset(OpAMD64ADDWconst)
- v.AuxInt = int64(int16(-c - d))
- v.AddArg(x)
- return true
- }
- return false
-}
func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block
_ = b
_ = b
// match: (Sub16 x y)
// cond:
- // result: (SUBW x y)
+ // result: (SUBL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64SUBW)
+ v.reset(OpAMD64SUBL)
v.AddArg(x)
v.AddArg(y)
return true
_ = b
// match: (Sub8 x y)
// cond:
- // result: (SUBB x y)
+ // result: (SUBL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64SUBB)
+ v.reset(OpAMD64SUBL)
v.AddArg(x)
v.AddArg(y)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64XORB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORB x (MOVBconst [c]))
- // cond:
- // result: (XORBconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64XORBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORB (MOVBconst [c]) x)
- // cond:
- // result: (XORBconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64XORBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORB x x)
- // cond:
- // result: (MOVBconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XORBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORBconst [c] x)
- // cond: int8(c)==0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int8(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORBconst [c] (MOVBconst [d]))
- // cond:
- // result: (MOVBconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVBconst)
- v.AuxInt = c ^ d
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
b := v.Block
_ = b
}
return false
}
-func rewriteValueAMD64_OpAMD64XORW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORW x (MOVWconst [c]))
- // cond:
- // result: (XORWconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64XORWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORW (MOVWconst [c]) x)
- // cond:
- // result: (XORWconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64XORWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORW x x)
- // cond:
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XORWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORWconst [c] x)
- // cond: int16(c)==0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int16(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORWconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVWconst)
- v.AuxInt = c ^ d
- return true
- }
- return false
-}
func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Xor16 x y)
// cond:
- // result: (XORW x y)
+ // result: (XORL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64XORW)
+ v.reset(OpAMD64XORL)
v.AddArg(x)
v.AddArg(y)
return true
_ = b
// match: (Xor8 x y)
// cond:
- // result: (XORB x y)
+ // result: (XORL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpAMD64XORB)
+ v.reset(OpAMD64XORL)
v.AddArg(x)
v.AddArg(y)
return true