// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-(Add(Ptr|32|16|8) ...) -> (ADD ...)
-(Add(32|64)F ...) -> (ADD(F|D) ...)
-(Add32carry ...) -> (ADDS ...)
-(Add32withcarry ...) -> (ADC ...)
-
-(Sub(Ptr|32|16|8) ...) -> (SUB ...)
-(Sub(32|64)F ...) -> (SUB(F|D) ...)
-(Sub32carry ...) -> (SUBS ...)
-(Sub32withcarry ...) -> (SBC ...)
-
-(Mul(32|16|8) ...) -> (MUL ...)
-(Mul(32|64)F ...) -> (MUL(F|D) ...)
-(Hmul(32|32u) ...) -> (HMU(L|LU) ...)
-(Mul32uhilo ...) -> (MULLU ...)
-
-(Div32 x y) ->
+(Add(Ptr|32|16|8) ...) => (ADD ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
+(Add32carry ...) => (ADDS ...)
+(Add32withcarry ...) => (ADC ...)
+
+(Sub(Ptr|32|16|8) ...) => (SUB ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
+(Sub32carry ...) => (SUBS ...)
+(Sub32withcarry ...) => (SBC ...)
+
+(Mul(32|16|8) ...) => (MUL ...)
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+(Hmul(32|32u) ...) => (HMU(L|LU) ...)
+(Mul32uhilo ...) => (MULLU ...)
+
+(Div32 x y) =>
(SUB (XOR <typ.UInt32> // negate the result if one operand is negative
(Select0 <typ.UInt32> (CALLudiv
(SUB <typ.UInt32> (XOR x <typ.UInt32> (Signmask x)) (Signmask x)) // negate x if negative
(SUB <typ.UInt32> (XOR y <typ.UInt32> (Signmask y)) (Signmask y)))) // negate y if negative
(Signmask (XOR <typ.UInt32> x y))) (Signmask (XOR <typ.UInt32> x y)))
-(Div32u x y) -> (Select0 <typ.UInt32> (CALLudiv x y))
-(Div16 x y) -> (Div32 (SignExt16to32 x) (SignExt16to32 y))
-(Div16u x y) -> (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
-(Div8 x y) -> (Div32 (SignExt8to32 x) (SignExt8to32 y))
-(Div8u x y) -> (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
-(Div(32|64)F ...) -> (DIV(F|D) ...)
-
-(Mod32 x y) ->
+(Div32u x y) => (Select0 <typ.UInt32> (CALLudiv x y))
+(Div16 x y) => (Div32 (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (Div32 (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod32 x y) =>
(SUB (XOR <typ.UInt32> // negate the result if x is negative
(Select1 <typ.UInt32> (CALLudiv
(SUB <typ.UInt32> (XOR <typ.UInt32> x (Signmask x)) (Signmask x)) // negate x if negative
(SUB <typ.UInt32> (XOR <typ.UInt32> y (Signmask y)) (Signmask y)))) // negate y if negative
(Signmask x)) (Signmask x))
-(Mod32u x y) -> (Select1 <typ.UInt32> (CALLudiv x y))
-(Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
-(Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
-(Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
-(Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Mod32u x y) => (Select1 <typ.UInt32> (CALLudiv x y))
+(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
// (x + y) / 2 with x>=y -> (x - y) / 2 + y
-(Avg32u <t> x y) -> (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+(Avg32u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
-(And(32|16|8) ...) -> (AND ...)
-(Or(32|16|8) ...) -> (OR ...)
-(Xor(32|16|8) ...) -> (XOR ...)
+(And(32|16|8) ...) => (AND ...)
+(Or(32|16|8) ...) => (OR ...)
+(Xor(32|16|8) ...) => (XOR ...)
// unary ops
-(Neg(32|16|8) x) -> (RSBconst [0] x)
-(Neg(32|64)F ...) -> (NEG(F|D) ...)
+(Neg(32|16|8) x) => (RSBconst [0] x)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
-(Com(32|16|8) ...) -> (MVN ...)
+(Com(32|16|8) ...) => (MVN ...)
-(Sqrt ...) -> (SQRTD ...)
-(Abs ...) -> (ABSD ...)
+(Sqrt ...) => (SQRTD ...)
+(Abs ...) => (ABSD ...)
// TODO: optimize this for ARMv5 and ARMv6
-(Ctz32NonZero ...) -> (Ctz32 ...)
-(Ctz16NonZero ...) -> (Ctz32 ...)
-(Ctz8NonZero ...) -> (Ctz32 ...)
+(Ctz32NonZero ...) => (Ctz32 ...)
+(Ctz16NonZero ...) => (Ctz32 ...)
+(Ctz8NonZero ...) => (Ctz32 ...)
// count trailing zero for ARMv5 and ARMv6
// 32 - CLZ(x&-x - 1)
(Ctz8 <t> x) && objabi.GOARM==7 -> (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
// bit length
-(BitLen32 <t> x) -> (RSBconst [32] (CLZ <t> x))
+(BitLen32 <t> x) => (RSBconst [32] (CLZ <t> x))
// byte swap for ARMv5
// let (a, b, c, d) be the bytes of x from high to low
(Bswap32 x) && objabi.GOARM>=6 -> (REV x)
// boolean ops -- booleans are represented with 0=false, 1=true
-(AndB ...) -> (AND ...)
-(OrB ...) -> (OR ...)
-(EqB x y) -> (XORconst [1] (XOR <typ.Bool> x y))
-(NeqB ...) -> (XOR ...)
-(Not x) -> (XORconst [1] x)
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XORconst [1] (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
// shifts
// hardware instruction uses only the low byte of the shift
// we compare to 256 to ensure Go semantics for large shifts
-(Lsh32x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
-(Lsh32x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-(Lsh32x8 x y) -> (SLL x (ZeroExt8to32 y))
+(Lsh32x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh32x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh32x8 x y) => (SLL x (ZeroExt8to32 y))
-(Lsh16x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
-(Lsh16x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-(Lsh16x8 x y) -> (SLL x (ZeroExt8to32 y))
+(Lsh16x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh16x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh16x8 x y) => (SLL x (ZeroExt8to32 y))
-(Lsh8x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
-(Lsh8x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-(Lsh8x8 x y) -> (SLL x (ZeroExt8to32 y))
+(Lsh8x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh8x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh8x8 x y) => (SLL x (ZeroExt8to32 y))
-(Rsh32Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
-(Rsh32Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-(Rsh32Ux8 x y) -> (SRL x (ZeroExt8to32 y))
+(Rsh32Ux32 x y) => (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+(Rsh32Ux16 x y) => (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh32Ux8 x y) => (SRL x (ZeroExt8to32 y))
-(Rsh16Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
-(Rsh16Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-(Rsh16Ux8 x y) -> (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+(Rsh16Ux32 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+(Rsh16Ux16 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh16Ux8 x y) => (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
-(Rsh8Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
-(Rsh8Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-(Rsh8Ux8 x y) -> (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Rsh8Ux32 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+(Rsh8Ux16 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh8Ux8 x y) => (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
-(Rsh32x32 x y) -> (SRAcond x y (CMPconst [256] y))
-(Rsh32x16 x y) -> (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
-(Rsh32x8 x y) -> (SRA x (ZeroExt8to32 y))
+(Rsh32x32 x y) => (SRAcond x y (CMPconst [256] y))
+(Rsh32x16 x y) => (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh32x8 x y) => (SRA x (ZeroExt8to32 y))
-(Rsh16x32 x y) -> (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
-(Rsh16x16 x y) -> (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
-(Rsh16x8 x y) -> (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+(Rsh16x32 x y) => (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+(Rsh16x16 x y) => (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh16x8 x y) => (SRA (SignExt16to32 x) (ZeroExt8to32 y))
-(Rsh8x32 x y) -> (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
-(Rsh8x16 x y) -> (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
-(Rsh8x8 x y) -> (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+(Rsh8x32 x y) => (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+(Rsh8x16 x y) => (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh8x8 x y) => (SRA (SignExt8to32 x) (ZeroExt8to32 y))
// constant shifts
// generic opt rewrites all constant shifts to shift by Const64
(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24])
// large constant shifts
-(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
-(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
-(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
-(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
-(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
-(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
+(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
// large constant signed right shift, we leave the sign bit
-(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAconst x [31])
-(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
-(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 => (SRAconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
// constants
(Const(8|16|32) ...) -> (MOVWconst ...)
(Const(32F|64F) ...) -> (MOV(F|D)const ...)
-(ConstNil) -> (MOVWconst [0])
+(ConstNil) => (MOVWconst [0])
(ConstBool ...) -> (MOVWconst ...)
// truncations
// Because we ignore high parts of registers, truncates are just copies.
-(Trunc16to8 ...) -> (Copy ...)
-(Trunc32to8 ...) -> (Copy ...)
-(Trunc32to16 ...) -> (Copy ...)
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
// Zero-/Sign-extensions
-(ZeroExt8to16 ...) -> (MOVBUreg ...)
-(ZeroExt8to32 ...) -> (MOVBUreg ...)
-(ZeroExt16to32 ...) -> (MOVHUreg ...)
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
-(SignExt8to16 ...) -> (MOVBreg ...)
-(SignExt8to32 ...) -> (MOVBreg ...)
-(SignExt16to32 ...) -> (MOVHreg ...)
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
-(Signmask x) -> (SRAconst x [31])
-(Zeromask x) -> (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
-(Slicemask <t> x) -> (SRAconst (RSBconst <t> [0] x) [31])
+(Signmask x) => (SRAconst x [31])
+(Zeromask x) => (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
+(Slicemask <t> x) => (SRAconst (RSBconst <t> [0] x) [31])
// float <-> int conversion
-(Cvt32to32F ...) -> (MOVWF ...)
-(Cvt32to64F ...) -> (MOVWD ...)
-(Cvt32Uto32F ...) -> (MOVWUF ...)
-(Cvt32Uto64F ...) -> (MOVWUD ...)
-(Cvt32Fto32 ...) -> (MOVFW ...)
-(Cvt64Fto32 ...) -> (MOVDW ...)
-(Cvt32Fto32U ...) -> (MOVFWU ...)
-(Cvt64Fto32U ...) -> (MOVDWU ...)
-(Cvt32Fto64F ...) -> (MOVFD ...)
-(Cvt64Fto32F ...) -> (MOVDF ...)
+(Cvt32to32F ...) => (MOVWF ...)
+(Cvt32to64F ...) => (MOVWD ...)
+(Cvt32Uto32F ...) => (MOVWUF ...)
+(Cvt32Uto64F ...) => (MOVWUD ...)
+(Cvt32Fto32 ...) => (MOVFW ...)
+(Cvt64Fto32 ...) => (MOVDW ...)
+(Cvt32Fto32U ...) => (MOVFWU ...)
+(Cvt64Fto32U ...) => (MOVDWU ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
-(Round(32|64)F ...) -> (Copy ...)
+(Round(32|64)F ...) => (Copy ...)
-(CvtBoolToUint8 ...) -> (Copy ...)
+(CvtBoolToUint8 ...) => (Copy ...)
// fused-multiply-add
-(FMA x y z) -> (FMULAD z x y)
+(FMA x y z) => (FMULAD z x y)
// comparisons
-(Eq8 x y) -> (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Eq16 x y) -> (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Eq32 x y) -> (Equal (CMP x y))
-(EqPtr x y) -> (Equal (CMP x y))
-(Eq(32|64)F x y) -> (Equal (CMP(F|D) x y))
-
-(Neq8 x y) -> (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Neq16 x y) -> (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Neq32 x y) -> (NotEqual (CMP x y))
-(NeqPtr x y) -> (NotEqual (CMP x y))
-(Neq(32|64)F x y) -> (NotEqual (CMP(F|D) x y))
-
-(Less8 x y) -> (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
-(Less16 x y) -> (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
-(Less32 x y) -> (LessThan (CMP x y))
-(Less(32|64)F x y) -> (GreaterThan (CMP(F|D) y x)) // reverse operands to work around NaN
-
-(Less8U x y) -> (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Less16U x y) -> (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Less32U x y) -> (LessThanU (CMP x y))
-
-(Leq8 x y) -> (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
-(Leq16 x y) -> (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
-(Leq32 x y) -> (LessEqual (CMP x y))
-(Leq(32|64)F x y) -> (GreaterEqual (CMP(F|D) y x)) // reverse operands to work around NaN
-
-(Leq8U x y) -> (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Leq16U x y) -> (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Leq32U x y) -> (LessEqualU (CMP x y))
+(Eq8 x y) => (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (Equal (CMP x y))
+(EqPtr x y) => (Equal (CMP x y))
+(Eq(32|64)F x y) => (Equal (CMP(F|D) x y))
+
+(Neq8 x y) => (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) => (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) => (NotEqual (CMP x y))
+(NeqPtr x y) => (NotEqual (CMP x y))
+(Neq(32|64)F x y) => (NotEqual (CMP(F|D) x y))
+
+(Less8 x y) => (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) => (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) => (LessThan (CMP x y))
+(Less(32|64)F x y) => (GreaterThan (CMP(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) => (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) => (LessThanU (CMP x y))
+
+(Leq8 x y) => (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (LessEqual (CMP x y))
+(Leq(32|64)F x y) => (GreaterEqual (CMP(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (LessEqualU (CMP x y))
(OffPtr [off] ptr:(SP)) -> (MOVWaddr [off] ptr)
(OffPtr [off] ptr) -> (ADDconst [off] ptr)
(Addr ...) -> (MOVWaddr ...)
-(LocalAddr {sym} base _) -> (MOVWaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVWaddr {sym} base)
// loads
-(Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
-(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
-(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
-(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
-(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
-(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) -> (MOVWload ptr mem)
-(Load <t> ptr mem) && is32BitFloat(t) -> (MOVFload ptr mem)
-(Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
// stores
(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
y := v_1
v.reset(OpARMADD)
v0 := b.NewValue0(v.Pos, OpARMSRLconst, t)
- v0.AuxInt = 1
+ v0.AuxInt = int32ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpARMSUB, t)
v1.AddArg2(x, y)
v0.AddArg(v1)
t := v.Type
x := v_0
v.reset(OpARMRSBconst)
- v.AuxInt = 32
+ v.AuxInt = int32ToAuxInt(32)
v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
v0.AddArg(x)
v.AddArg(v0)
// result: (MOVWconst [0])
for {
v.reset(OpARMMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
}
x := v_0
y := v_1
v.reset(OpARMXORconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool)
v0.AddArg2(x, y)
v.AddArg(v0)
// match: (LocalAddr {sym} base _)
// result: (MOVWaddr {sym} base)
for {
- sym := v.Aux
+ sym := auxToSym(v.Aux)
base := v_0
v.reset(OpARMMOVWaddr)
- v.Aux = sym
+ v.Aux = symToAux(sym)
v.AddArg(base)
return true
}
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v2.AuxInt = 256
+ v2.AuxInt = int32ToAuxInt(256)
v2.AddArg(v1)
v.AddArg2(v0, v2)
return true
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v1.AuxInt = 256
+ v1.AuxInt = int32ToAuxInt(256)
v1.AddArg(y)
v.AddArg2(v0, v1)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 16) {
break
}
v.reset(OpConst16)
- v.AuxInt = 0
+ v.AuxInt = int16ToAuxInt(0)
return true
}
return false
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v2.AuxInt = 256
+ v2.AuxInt = int32ToAuxInt(256)
v2.AddArg(v1)
v.AddArg2(v0, v2)
return true
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v1.AuxInt = 256
+ v1.AuxInt = int32ToAuxInt(256)
v1.AddArg(y)
v.AddArg2(v0, v1)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 32) {
break
}
v.reset(OpConst32)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v2.AuxInt = 256
+ v2.AuxInt = int32ToAuxInt(256)
v2.AddArg(v1)
v.AddArg2(v0, v2)
return true
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v1.AuxInt = 256
+ v1.AuxInt = int32ToAuxInt(256)
v1.AddArg(y)
v.AddArg2(v0, v1)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 8) {
break
}
v.reset(OpConst8)
- v.AuxInt = 0
+ v.AuxInt = int8ToAuxInt(0)
return true
}
return false
for {
x := v_0
v.reset(OpARMRSBconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpARMRSBconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpARMRSBconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpARMXORconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v.AddArg(x)
return true
}
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v3.AuxInt = 256
+ v3.AuxInt = int32ToAuxInt(256)
v3.AddArg(v2)
v.AddArg2(v0, v3)
return true
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v2.AuxInt = 256
+ v2.AuxInt = int32ToAuxInt(256)
v2.AddArg(y)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 16) {
break
}
v.reset(OpConst16)
- v.AuxInt = 0
+ v.AuxInt = int16ToAuxInt(0)
return true
}
return false
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v2.AuxInt = 256
+ v2.AuxInt = int32ToAuxInt(256)
v2.AddArg(v1)
v.AddArg3(v0, v1, v2)
return true
v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v1.AuxInt = 256
+ v1.AuxInt = int32ToAuxInt(256)
v1.AddArg(y)
v.AddArg3(v0, y, v1)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 16) {
break
}
v.reset(OpARMSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
- v0.AuxInt = 16
+ v0.AuxInt = int32ToAuxInt(16)
v0.AddArg(x)
v.AddArg(v0)
return true
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v2.AuxInt = 256
+ v2.AuxInt = int32ToAuxInt(256)
v2.AddArg(v1)
v.AddArg2(v0, v2)
return true
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v1.AuxInt = 256
+ v1.AuxInt = int32ToAuxInt(256)
v1.AddArg(y)
v.AddArg2(v0, v1)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 32) {
break
}
v.reset(OpConst32)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v1.AuxInt = 256
+ v1.AuxInt = int32ToAuxInt(256)
v1.AddArg(v0)
v.AddArg3(x, v0, v1)
return true
y := v_1
v.reset(OpARMSRAcond)
v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v0.AuxInt = 256
+ v0.AuxInt = int32ToAuxInt(256)
v0.AddArg(y)
v.AddArg3(x, y, v0)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 32) {
break
}
v.reset(OpARMSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v.AddArg(x)
return true
}
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v3.AuxInt = 256
+ v3.AuxInt = int32ToAuxInt(256)
v3.AddArg(v2)
v.AddArg2(v0, v3)
return true
x := v_0
y := v_1
v.reset(OpARMCMOVWHSconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v2.AuxInt = 256
+ v2.AuxInt = int32ToAuxInt(256)
v2.AddArg(y)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 8) {
break
}
v.reset(OpConst8)
- v.AuxInt = 0
+ v.AuxInt = int8ToAuxInt(0)
return true
}
return false
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v2.AuxInt = 256
+ v2.AuxInt = int32ToAuxInt(256)
v2.AddArg(v1)
v.AddArg3(v0, v1, v2)
return true
v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
- v1.AuxInt = 256
+ v1.AuxInt = int32ToAuxInt(256)
v1.AddArg(y)
v.AddArg3(v0, y, v1)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 8) {
break
}
v.reset(OpARMSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
- v0.AuxInt = 24
+ v0.AuxInt = int32ToAuxInt(24)
v0.AddArg(x)
v.AddArg(v0)
return true
for {
x := v_0
v.reset(OpARMSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v.AddArg(x)
return true
}
t := v.Type
x := v_0
v.reset(OpARMSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v0 := b.NewValue0(v.Pos, OpARMRSBconst, t)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v0.AddArg(x)
v.AddArg(v0)
return true
for {
x := v_0
v.reset(OpARMSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32)
- v0.AuxInt = 1
+ v0.AuxInt = int32ToAuxInt(1)
v0.AddArg2(x, x)
v.AddArg(v0)
return true