// Unsigned divide, not a power of 2. Strength reduce to a multiply.
// For 8-bit divides, we just do a direct 9-bit by 8-bit multiply.
-(Div8u x (Const8 [c])) && umagicOK(8, c) ->
+(Div8u x (Const8 [c])) && umagicOK8(c) =>
(Trunc32to8
(Rsh32Ux64 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(1<<8+umagic(8,c).m)])
+ (Const32 <typ.UInt32> [int32(1<<8+umagic8(c).m)])
(ZeroExt8to32 x))
- (Const64 <typ.UInt64> [8+umagic(8,c).s])))
+ (Const64 <typ.UInt64> [8+umagic8(c).s])))
// For 16-bit divides on 64-bit machines, we do a direct 17-bit by 16-bit multiply.
-(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 8 ->
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 8 =>
(Trunc64to16
(Rsh64Ux64 <typ.UInt64>
(Mul64 <typ.UInt64>
- (Const64 <typ.UInt64> [int64(1<<16+umagic(16,c).m)])
+ (Const64 <typ.UInt64> [int64(1<<16+umagic16(c).m)])
(ZeroExt16to64 x))
- (Const64 <typ.UInt64> [16+umagic(16,c).s])))
+ (Const64 <typ.UInt64> [16+umagic16(c).s])))
// For 16-bit divides on 32-bit machines
-(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 && umagic(16,c).m&1 == 0 ->
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0 =>
(Trunc32to16
(Rsh32Ux64 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(1<<15+umagic(16,c).m/2)])
+ (Const32 <typ.UInt32> [int32(1<<15+umagic16(c).m/2)])
(ZeroExt16to32 x))
- (Const64 <typ.UInt64> [16+umagic(16,c).s-1])))
-(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 && c&1 == 0 ->
+ (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && c&1 == 0 =>
(Trunc32to16
(Rsh32Ux64 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(1<<15+(umagic(16,c).m+1)/2)])
+ (Const32 <typ.UInt32> [int32(1<<15+(umagic16(c).m+1)/2)])
(Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1])))
- (Const64 <typ.UInt64> [16+umagic(16,c).s-2])))
-(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 && config.useAvg ->
+ (Const64 <typ.UInt64> [16+umagic16(c).s-2])))
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && config.useAvg =>
(Trunc32to16
(Rsh32Ux64 <typ.UInt32>
(Avg32u
(Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16]))
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(umagic(16,c).m)])
+ (Const32 <typ.UInt32> [int32(umagic16(c).m)])
(ZeroExt16to32 x)))
- (Const64 <typ.UInt64> [16+umagic(16,c).s-1])))
+ (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
// For 32-bit divides on 32-bit machines
-(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 && umagic(32,c).m&1 == 0 && config.useHmul ->
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul =>
(Rsh32Ux64 <typ.UInt32>
(Hmul32u <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(1<<31+umagic(32,c).m/2))])
+ (Const32 <typ.UInt32> [int32(1<<31+umagic32(c).m/2)])
x)
- (Const64 <typ.UInt64> [umagic(32,c).s-1]))
-(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 && c&1 == 0 && config.useHmul ->
+ (Const64 <typ.UInt64> [umagic32(c).s-1]))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul =>
(Rsh32Ux64 <typ.UInt32>
(Hmul32u <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(1<<31+(umagic(32,c).m+1)/2))])
+ (Const32 <typ.UInt32> [int32(1<<31+(umagic32(c).m+1)/2)])
(Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1])))
- (Const64 <typ.UInt64> [umagic(32,c).s-2]))
-(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 && config.useAvg && config.useHmul ->
+ (Const64 <typ.UInt64> [umagic32(c).s-2]))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul =>
(Rsh32Ux64 <typ.UInt32>
(Avg32u
x
(Hmul32u <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(umagic(32,c).m))])
+ (Const32 <typ.UInt32> [int32(umagic32(c).m)])
x))
- (Const64 <typ.UInt64> [umagic(32,c).s-1]))
+ (Const64 <typ.UInt64> [umagic32(c).s-1]))
// For 32-bit divides on 64-bit machines
// We'll use a regular (non-hi) multiply for this case.
-(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 && umagic(32,c).m&1 == 0 ->
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0 =>
(Trunc64to32
(Rsh64Ux64 <typ.UInt64>
(Mul64 <typ.UInt64>
- (Const64 <typ.UInt64> [int64(1<<31+umagic(32,c).m/2)])
+ (Const64 <typ.UInt64> [int64(1<<31+umagic32(c).m/2)])
(ZeroExt32to64 x))
- (Const64 <typ.UInt64> [32+umagic(32,c).s-1])))
-(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 && c&1 == 0 ->
+ (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && c&1 == 0 =>
(Trunc64to32
(Rsh64Ux64 <typ.UInt64>
(Mul64 <typ.UInt64>
- (Const64 <typ.UInt64> [int64(1<<31+(umagic(32,c).m+1)/2)])
+ (Const64 <typ.UInt64> [int64(1<<31+(umagic32(c).m+1)/2)])
(Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1])))
- (Const64 <typ.UInt64> [32+umagic(32,c).s-2])))
-(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 && config.useAvg ->
+ (Const64 <typ.UInt64> [32+umagic32(c).s-2])))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && config.useAvg =>
(Trunc64to32
(Rsh64Ux64 <typ.UInt64>
(Avg64u
(Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32]))
(Mul64 <typ.UInt64>
- (Const64 <typ.UInt32> [int64(umagic(32,c).m)])
+ (Const64 <typ.UInt32> [int64(umagic32(c).m)])
(ZeroExt32to64 x)))
- (Const64 <typ.UInt64> [32+umagic(32,c).s-1])))
+ (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
// For 64-bit divides on 64-bit machines
// (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.)
-(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 && umagic(64,c).m&1 == 0 && config.useHmul ->
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul =>
(Rsh64Ux64 <typ.UInt64>
(Hmul64u <typ.UInt64>
- (Const64 <typ.UInt64> [int64(1<<63+umagic(64,c).m/2)])
+ (Const64 <typ.UInt64> [int64(1<<63+umagic64(c).m/2)])
x)
- (Const64 <typ.UInt64> [umagic(64,c).s-1]))
-(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 && c&1 == 0 && config.useHmul ->
+ (Const64 <typ.UInt64> [umagic64(c).s-1]))
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul =>
(Rsh64Ux64 <typ.UInt64>
(Hmul64u <typ.UInt64>
- (Const64 <typ.UInt64> [int64(1<<63+(umagic(64,c).m+1)/2)])
+ (Const64 <typ.UInt64> [int64(1<<63+(umagic64(c).m+1)/2)])
(Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1])))
- (Const64 <typ.UInt64> [umagic(64,c).s-2]))
-(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 && config.useAvg && config.useHmul ->
+ (Const64 <typ.UInt64> [umagic64(c).s-2]))
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul =>
(Rsh64Ux64 <typ.UInt64>
(Avg64u
x
(Hmul64u <typ.UInt64>
- (Const64 <typ.UInt64> [int64(umagic(64,c).m)])
+ (Const64 <typ.UInt64> [int64(umagic64(c).m)])
x))
- (Const64 <typ.UInt64> [umagic(64,c).s-1]))
+ (Const64 <typ.UInt64> [umagic64(c).s-1]))
// Signed divide by a negative constant. Rewrite to divide by a positive constant.
(Div8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 => (Neg8 (Div8 <t> n (Const8 <t> [-c])))
(Const64 <typ.UInt64> [int64(log64(c))]))
// Signed divide, not a power of 2. Strength reduce to a multiply.
-(Div8 <t> x (Const8 [c])) && smagicOK(8,c) ->
+(Div8 <t> x (Const8 [c])) && smagicOK8(c) =>
(Sub8 <t>
(Rsh32x64 <t>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(smagic(8,c).m)])
+ (Const32 <typ.UInt32> [int32(smagic8(c).m)])
(SignExt8to32 x))
- (Const64 <typ.UInt64> [8+smagic(8,c).s]))
+ (Const64 <typ.UInt64> [8+smagic8(c).s]))
(Rsh32x64 <t>
(SignExt8to32 x)
(Const64 <typ.UInt64> [31])))
-(Div16 <t> x (Const16 [c])) && smagicOK(16,c) ->
+(Div16 <t> x (Const16 [c])) && smagicOK16(c) =>
(Sub16 <t>
(Rsh32x64 <t>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(smagic(16,c).m)])
+ (Const32 <typ.UInt32> [int32(smagic16(c).m)])
(SignExt16to32 x))
- (Const64 <typ.UInt64> [16+smagic(16,c).s]))
+ (Const64 <typ.UInt64> [16+smagic16(c).s]))
(Rsh32x64 <t>
(SignExt16to32 x)
(Const64 <typ.UInt64> [31])))
-(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 8 ->
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 8 =>
(Sub32 <t>
(Rsh64x64 <t>
(Mul64 <typ.UInt64>
- (Const64 <typ.UInt64> [int64(smagic(32,c).m)])
+ (Const64 <typ.UInt64> [int64(smagic32(c).m)])
(SignExt32to64 x))
- (Const64 <typ.UInt64> [32+smagic(32,c).s]))
+ (Const64 <typ.UInt64> [32+smagic32(c).s]))
(Rsh64x64 <t>
(SignExt32to64 x)
(Const64 <typ.UInt64> [63])))
-(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 == 0 && config.useHmul ->
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul =>
(Sub32 <t>
(Rsh32x64 <t>
(Hmul32 <t>
- (Const32 <typ.UInt32> [int64(int32(smagic(32,c).m/2))])
+ (Const32 <typ.UInt32> [int32(smagic32(c).m/2)])
x)
- (Const64 <typ.UInt64> [smagic(32,c).s-1]))
+ (Const64 <typ.UInt64> [smagic32(c).s-1]))
(Rsh32x64 <t>
x
(Const64 <typ.UInt64> [31])))
-(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 != 0 && config.useHmul ->
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul =>
(Sub32 <t>
(Rsh32x64 <t>
(Add32 <t>
(Hmul32 <t>
- (Const32 <typ.UInt32> [int64(int32(smagic(32,c).m))])
+ (Const32 <typ.UInt32> [int32(smagic32(c).m)])
x)
x)
- (Const64 <typ.UInt64> [smagic(32,c).s]))
+ (Const64 <typ.UInt64> [smagic32(c).s]))
(Rsh32x64 <t>
x
(Const64 <typ.UInt64> [31])))
-(Div64 <t> x (Const64 [c])) && smagicOK(64,c) && smagic(64,c).m&1 == 0 && config.useHmul ->
+(Div64 <t> x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul =>
(Sub64 <t>
(Rsh64x64 <t>
(Hmul64 <t>
- (Const64 <typ.UInt64> [int64(smagic(64,c).m/2)])
+ (Const64 <typ.UInt64> [int64(smagic64(c).m/2)])
x)
- (Const64 <typ.UInt64> [smagic(64,c).s-1]))
+ (Const64 <typ.UInt64> [smagic64(c).s-1]))
(Rsh64x64 <t>
x
(Const64 <typ.UInt64> [63])))
-(Div64 <t> x (Const64 [c])) && smagicOK(64,c) && smagic(64,c).m&1 != 0 && config.useHmul ->
+(Div64 <t> x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul =>
(Sub64 <t>
(Rsh64x64 <t>
(Add64 <t>
(Hmul64 <t>
- (Const64 <typ.UInt64> [int64(smagic(64,c).m)])
+ (Const64 <typ.UInt64> [int64(smagic64(c).m)])
x)
x)
- (Const64 <typ.UInt64> [smagic(64,c).s]))
+ (Const64 <typ.UInt64> [smagic64(c).s]))
(Rsh64x64 <t>
x
(Const64 <typ.UInt64> [63])))
=> (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
// For architectures without rotates on less than 32-bits, promote these checks to 32-bit.
-(Eq8 (Mod8u x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && udivisibleOK(8,c) && !hasSmallRotate(config) ->
- (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [c&0xff])) (Const32 <typ.UInt32> [0]))
-(Eq16 (Mod16u x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && udivisibleOK(16,c) && !hasSmallRotate(config) ->
- (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [c&0xffff])) (Const32 <typ.UInt32> [0]))
-(Eq8 (Mod8 x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && sdivisibleOK(8,c) && !hasSmallRotate(config) ->
- (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [c])) (Const32 <typ.Int32> [0]))
-(Eq16 (Mod16 x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && sdivisibleOK(16,c) && !hasSmallRotate(config) ->
- (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [c])) (Const32 <typ.Int32> [0]))
+(Eq8 (Mod8u x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint8(c))])) (Const32 <typ.UInt32> [0]))
+(Eq16 (Mod16u x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint16(c))])) (Const32 <typ.UInt32> [0]))
+(Eq8 (Mod8 x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+(Eq16 (Mod16 x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
// Divisibility checks x%c == 0 convert to multiply and rotate.
// Note, x%c == 0 is rewritten as x == c*(x/c) during the opt pass
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(1<<8+umagic(8,c).m) && s == 8+umagic(8,c).s
- && x.Op != OpConst8 && udivisibleOK(8,c)
- -> (Leq8U
+ && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s
+ && x.Op != OpConst8 && udivisibleOK8(c)
+ => (Leq8U
(RotateLeft8 <typ.UInt8>
(Mul8 <typ.UInt8>
- (Const8 <typ.UInt8> [int64(int8(udivisible(8,c).m))])
+ (Const8 <typ.UInt8> [int8(udivisible8(c).m)])
x)
- (Const8 <typ.UInt8> [int64(8-udivisible(8,c).k)])
+ (Const8 <typ.UInt8> [int8(8-udivisible8(c).k)])
)
- (Const8 <typ.UInt8> [int64(int8(udivisible(8,c).max))])
+ (Const8 <typ.UInt8> [int8(udivisible8(c).max)])
)
(Eq16 x (Mul16 (Const16 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s
- && x.Op != OpConst16 && udivisibleOK(16,c)
- -> (Leq16U
+ && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
(RotateLeft16 <typ.UInt16>
(Mul16 <typ.UInt16>
- (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).m))])
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
x)
- (Const16 <typ.UInt16> [int64(16-udivisible(16,c).k)])
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
)
- (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).max))])
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
)
(Eq16 x (Mul16 (Const16 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1
- && x.Op != OpConst16 && udivisibleOK(16,c)
- -> (Leq16U
+ && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
(RotateLeft16 <typ.UInt16>
(Mul16 <typ.UInt16>
- (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).m))])
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
x)
- (Const16 <typ.UInt16> [int64(16-udivisible(16,c).k)])
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
)
- (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).max))])
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
)
(Eq16 x (Mul16 (Const16 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2
- && x.Op != OpConst16 && udivisibleOK(16,c)
- -> (Leq16U
+ && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
(RotateLeft16 <typ.UInt16>
(Mul16 <typ.UInt16>
- (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).m))])
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
x)
- (Const16 <typ.UInt16> [int64(16-udivisible(16,c).k)])
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
)
- (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).max))])
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
)
(Eq16 x (Mul16 (Const16 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1
- && x.Op != OpConst16 && udivisibleOK(16,c)
- -> (Leq16U
+ && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
(RotateLeft16 <typ.UInt16>
(Mul16 <typ.UInt16>
- (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).m))])
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
x)
- (Const16 <typ.UInt16> [int64(16-udivisible(16,c).k)])
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
)
- (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).max))])
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
)
(Eq32 x (Mul32 (Const32 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(int32(1<<31+umagic(32,c).m/2)) && s == umagic(32,c).s-1
- && x.Op != OpConst32 && udivisibleOK(32,c)
- -> (Leq32U
+ && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
(RotateLeft32 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
x)
- (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)])
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
)
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
)
(Eq32 x (Mul32 (Const32 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2
- && x.Op != OpConst32 && udivisibleOK(32,c)
- -> (Leq32U
+ && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
(RotateLeft32 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
x)
- (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)])
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
)
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
)
(Eq32 x (Mul32 (Const32 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1
- && x.Op != OpConst32 && udivisibleOK(32,c)
- -> (Leq32U
+ && m == int32(umagic32(c).m) && s == umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
(RotateLeft32 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
x)
- (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)])
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
)
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
)
(Eq32 x (Mul32 (Const32 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1
- && x.Op != OpConst32 && udivisibleOK(32,c)
- -> (Leq32U
+ && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
(RotateLeft32 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
x)
- (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)])
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
)
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
)
(Eq32 x (Mul32 (Const32 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2
- && x.Op != OpConst32 && udivisibleOK(32,c)
- -> (Leq32U
+ && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
(RotateLeft32 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
x)
- (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)])
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
)
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
)
(Eq32 x (Mul32 (Const32 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1
- && x.Op != OpConst32 && udivisibleOK(32,c)
- -> (Leq32U
+ && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
(RotateLeft32 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
x)
- (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)])
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
)
- (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))])
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
)
(Eq64 x (Mul64 (Const64 [c])
(Const64 [s]))
)
) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(1<<63+umagic(64,c).m/2) && s == umagic(64,c).s-1
- && x.Op != OpConst64 && udivisibleOK(64,c)
- -> (Leq64U
+ && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
(RotateLeft64 <typ.UInt64>
(Mul64 <typ.UInt64>
- (Const64 <typ.UInt64> [int64(udivisible(64,c).m)])
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
x)
- (Const64 <typ.UInt64> [int64(64-udivisible(64,c).k)])
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
)
- (Const64 <typ.UInt64> [int64(udivisible(64,c).max)])
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
)
(Eq64 x (Mul64 (Const64 [c])
(Rsh64Ux64
(Const64 [s]))
)
) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(1<<63+(umagic(64,c).m+1)/2) && s == umagic(64,c).s-2
- && x.Op != OpConst64 && udivisibleOK(64,c)
- -> (Leq64U
+ && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
(RotateLeft64 <typ.UInt64>
(Mul64 <typ.UInt64>
- (Const64 <typ.UInt64> [int64(udivisible(64,c).m)])
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
x)
- (Const64 <typ.UInt64> [int64(64-udivisible(64,c).k)])
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
)
- (Const64 <typ.UInt64> [int64(udivisible(64,c).max)])
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
)
(Eq64 x (Mul64 (Const64 [c])
(Rsh64Ux64
(Const64 [s]))
)
) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(umagic(64,c).m) && s == umagic(64,c).s-1
- && x.Op != OpConst64 && udivisibleOK(64,c)
- -> (Leq64U
+ && m == int64(umagic64(c).m) && s == umagic64(c).s-1
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
(RotateLeft64 <typ.UInt64>
(Mul64 <typ.UInt64>
- (Const64 <typ.UInt64> [int64(udivisible(64,c).m)])
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
x)
- (Const64 <typ.UInt64> [int64(64-udivisible(64,c).k)])
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
)
- (Const64 <typ.UInt64> [int64(udivisible(64,c).max)])
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
)
// Signed divisibility checks convert to multiply, add and rotate.
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(smagic(8,c).m) && s == 8+smagic(8,c).s
- && x.Op != OpConst8 && sdivisibleOK(8,c)
- -> (Leq8U
+ && m == int32(smagic8(c).m) && s == 8+smagic8(c).s
+ && x.Op != OpConst8 && sdivisibleOK8(c)
+ => (Leq8U
(RotateLeft8 <typ.UInt8>
(Add8 <typ.UInt8>
(Mul8 <typ.UInt8>
- (Const8 <typ.UInt8> [int64(int8(sdivisible(8,c).m))])
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).m)])
x)
- (Const8 <typ.UInt8> [int64(int8(sdivisible(8,c).a))])
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).a)])
)
- (Const8 <typ.UInt8> [int64(8-sdivisible(8,c).k)])
+ (Const8 <typ.UInt8> [int8(8-sdivisible8(c).k)])
)
- (Const8 <typ.UInt8> [int64(int8(sdivisible(8,c).max))])
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).max)])
)
(Eq16 x (Mul16 (Const16 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s
- && x.Op != OpConst16 && sdivisibleOK(16,c)
- -> (Leq16U
+ && m == int32(smagic16(c).m) && s == 16+smagic16(c).s
+ && x.Op != OpConst16 && sdivisibleOK16(c)
+ => (Leq16U
(RotateLeft16 <typ.UInt16>
(Add16 <typ.UInt16>
(Mul16 <typ.UInt16>
- (Const16 <typ.UInt16> [int64(int16(sdivisible(16,c).m))])
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).m)])
x)
- (Const16 <typ.UInt16> [int64(int16(sdivisible(16,c).a))])
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).a)])
)
- (Const16 <typ.UInt16> [int64(16-sdivisible(16,c).k)])
+ (Const16 <typ.UInt16> [int16(16-sdivisible16(c).k)])
)
- (Const16 <typ.UInt16> [int64(int16(sdivisible(16,c).max))])
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).max)])
)
(Eq32 x (Mul32 (Const32 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s
- && x.Op != OpConst32 && sdivisibleOK(32,c)
- -> (Leq32U
+ && m == int64(smagic32(c).m) && s == 32+smagic32(c).s
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
(RotateLeft32 <typ.UInt32>
(Add32 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).m))])
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
x)
- (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).a))])
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
)
- (Const32 <typ.UInt32> [int64(32-sdivisible(32,c).k)])
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
)
- (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).max))])
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
)
(Eq32 x (Mul32 (Const32 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(int32(smagic(32,c).m/2)) && s == smagic(32,c).s-1
- && x.Op != OpConst32 && sdivisibleOK(32,c)
- -> (Leq32U
+ && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
(RotateLeft32 <typ.UInt32>
(Add32 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).m))])
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
x)
- (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).a))])
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
)
- (Const32 <typ.UInt32> [int64(32-sdivisible(32,c).k)])
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
)
- (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).max))])
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
)
(Eq32 x (Mul32 (Const32 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s
- && x.Op != OpConst32 && sdivisibleOK(32,c)
- -> (Leq32U
+ && m == int32(smagic32(c).m) && s == smagic32(c).s
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
(RotateLeft32 <typ.UInt32>
(Add32 <typ.UInt32>
(Mul32 <typ.UInt32>
- (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).m))])
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
x)
- (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).a))])
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
)
- (Const32 <typ.UInt32> [int64(32-sdivisible(32,c).k)])
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
)
- (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).max))])
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
)
(Eq64 x (Mul64 (Const64 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(smagic(64,c).m/2) && s == smagic(64,c).s-1
- && x.Op != OpConst64 && sdivisibleOK(64,c)
- -> (Leq64U
+ && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1
+ && x.Op != OpConst64 && sdivisibleOK64(c)
+ => (Leq64U
(RotateLeft64 <typ.UInt64>
(Add64 <typ.UInt64>
(Mul64 <typ.UInt64>
- (Const64 <typ.UInt64> [int64(sdivisible(64,c).m)])
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).m)])
x)
- (Const64 <typ.UInt64> [int64(sdivisible(64,c).a)])
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).a)])
)
- (Const64 <typ.UInt64> [int64(64-sdivisible(64,c).k)])
+ (Const64 <typ.UInt64> [64-sdivisible64(c).k])
)
- (Const64 <typ.UInt64> [int64(sdivisible(64,c).max)])
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).max)])
)
(Eq64 x (Mul64 (Const64 [c])
)
)
&& v.Block.Func.pass.name != "opt" && mul.Uses == 1
- && m == int64(smagic(64,c).m) && s == smagic(64,c).s
- && x.Op != OpConst64 && sdivisibleOK(64,c)
- -> (Leq64U
+ && m == int64(smagic64(c).m) && s == smagic64(c).s
+ && x.Op != OpConst64 && sdivisibleOK64(c)
+ => (Leq64U
(RotateLeft64 <typ.UInt64>
(Add64 <typ.UInt64>
(Mul64 <typ.UInt64>
- (Const64 <typ.UInt64> [int64(sdivisible(64,c).m)])
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).m)])
x)
- (Const64 <typ.UInt64> [int64(sdivisible(64,c).a)])
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).a)])
)
- (Const64 <typ.UInt64> [int64(64-sdivisible(64,c).k)])
+ (Const64 <typ.UInt64> [64-sdivisible64(c).k])
)
- (Const64 <typ.UInt64> [int64(sdivisible(64,c).max)])
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).max)])
)
// Divisibility check for signed integers for power of two constant are simple mask.
return true
}
// match: (Div16 <t> x (Const16 [c]))
- // cond: smagicOK(16,c)
- // result: (Sub16 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(smagic(16,c).m)]) (SignExt16to32 x)) (Const64 <typ.UInt64> [16+smagic(16,c).s])) (Rsh32x64 <t> (SignExt16to32 x) (Const64 <typ.UInt64> [31])))
+ // cond: smagicOK16(c)
+ // result: (Sub16 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(smagic16(c).m)]) (SignExt16to32 x)) (Const64 <typ.UInt64> [16+smagic16(c).s])) (Rsh32x64 <t> (SignExt16to32 x) (Const64 <typ.UInt64> [31])))
for {
t := v.Type
x := v_0
if v_1.Op != OpConst16 {
break
}
- c := v_1.AuxInt
- if !(smagicOK(16, c)) {
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(smagicOK16(c)) {
break
}
v.reset(OpSub16)
v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(smagic(16, c).m)
+ v2.AuxInt = int32ToAuxInt(int32(smagic16(c).m))
v3 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v3.AddArg(x)
v1.AddArg2(v2, v3)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = 16 + smagic(16, c).s
+ v4.AuxInt = int64ToAuxInt(16 + smagic16(c).s)
v0.AddArg2(v1, v4)
v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v6.AuxInt = 31
+ v6.AuxInt = int64ToAuxInt(31)
v5.AddArg2(v3, v6)
v.AddArg2(v0, v5)
return true
return true
}
// match: (Div16u x (Const16 [c]))
- // cond: umagicOK(16, c) && config.RegSize == 8
- // result: (Trunc64to16 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<16+umagic(16,c).m)]) (ZeroExt16to64 x)) (Const64 <typ.UInt64> [16+umagic(16,c).s])))
+ // cond: umagicOK16(c) && config.RegSize == 8
+ // result: (Trunc64to16 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<16+umagic16(c).m)]) (ZeroExt16to64 x)) (Const64 <typ.UInt64> [16+umagic16(c).s])))
for {
x := v_0
if v_1.Op != OpConst16 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(16, c) && config.RegSize == 8) {
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 8) {
break
}
v.reset(OpTrunc64to16)
v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v2.AuxInt = int64(1<<16 + umagic(16, c).m)
+ v2.AuxInt = int64ToAuxInt(int64(1<<16 + umagic16(c).m))
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(x)
v1.AddArg2(v2, v3)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = 16 + umagic(16, c).s
+ v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s)
v0.AddArg2(v1, v4)
v.AddArg(v0)
return true
}
// match: (Div16u x (Const16 [c]))
- // cond: umagicOK(16, c) && config.RegSize == 4 && umagic(16,c).m&1 == 0
- // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(1<<15+umagic(16,c).m/2)]) (ZeroExt16to32 x)) (Const64 <typ.UInt64> [16+umagic(16,c).s-1])))
+ // cond: umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<15+umagic16(c).m/2)]) (ZeroExt16to32 x)) (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
for {
x := v_0
if v_1.Op != OpConst16 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(16, c) && config.RegSize == 4 && umagic(16, c).m&1 == 0) {
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0) {
break
}
v.reset(OpTrunc32to16)
v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(1<<15 + umagic(16, c).m/2)
+ v2.AuxInt = int32ToAuxInt(int32(1<<15 + umagic16(c).m/2))
v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v3.AddArg(x)
v1.AddArg2(v2, v3)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = 16 + umagic(16, c).s - 1
+ v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1)
v0.AddArg2(v1, v4)
v.AddArg(v0)
return true
}
// match: (Div16u x (Const16 [c]))
- // cond: umagicOK(16, c) && config.RegSize == 4 && c&1 == 0
- // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(1<<15+(umagic(16,c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [16+umagic(16,c).s-2])))
+ // cond: umagicOK16(c) && config.RegSize == 4 && c&1 == 0
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<15+(umagic16(c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [16+umagic16(c).s-2])))
for {
x := v_0
if v_1.Op != OpConst16 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(16, c) && config.RegSize == 4 && c&1 == 0) {
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && c&1 == 0) {
break
}
v.reset(OpTrunc32to16)
v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(1<<15 + (umagic(16, c).m+1)/2)
+ v2.AuxInt = int32ToAuxInt(int32(1<<15 + (umagic16(c).m+1)/2))
v3 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(x)
v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v5.AuxInt = 1
+ v5.AuxInt = int64ToAuxInt(1)
v3.AddArg2(v4, v5)
v1.AddArg2(v2, v3)
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v6.AuxInt = 16 + umagic(16, c).s - 2
+ v6.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 2)
v0.AddArg2(v1, v6)
v.AddArg(v0)
return true
}
// match: (Div16u x (Const16 [c]))
- // cond: umagicOK(16, c) && config.RegSize == 4 && config.useAvg
- // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Avg32u (Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16])) (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(umagic(16,c).m)]) (ZeroExt16to32 x))) (Const64 <typ.UInt64> [16+umagic(16,c).s-1])))
+ // cond: umagicOK16(c) && config.RegSize == 4 && config.useAvg
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Avg32u (Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16])) (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(umagic16(c).m)]) (ZeroExt16to32 x))) (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
for {
x := v_0
if v_1.Op != OpConst16 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(16, c) && config.RegSize == 4 && config.useAvg) {
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && config.useAvg) {
break
}
v.reset(OpTrunc32to16)
v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v3.AddArg(x)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = 16
+ v4.AuxInt = int64ToAuxInt(16)
v2.AddArg2(v3, v4)
v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v6.AuxInt = int64(umagic(16, c).m)
+ v6.AuxInt = int32ToAuxInt(int32(umagic16(c).m))
v5.AddArg2(v6, v3)
v1.AddArg2(v2, v5)
v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v7.AuxInt = 16 + umagic(16, c).s - 1
+ v7.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1)
v0.AddArg2(v1, v7)
v.AddArg(v0)
return true
return true
}
// match: (Div32 <t> x (Const32 [c]))
- // cond: smagicOK(32,c) && config.RegSize == 8
- // result: (Sub32 <t> (Rsh64x64 <t> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(smagic(32,c).m)]) (SignExt32to64 x)) (Const64 <typ.UInt64> [32+smagic(32,c).s])) (Rsh64x64 <t> (SignExt32to64 x) (Const64 <typ.UInt64> [63])))
+ // cond: smagicOK32(c) && config.RegSize == 8
+ // result: (Sub32 <t> (Rsh64x64 <t> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(smagic32(c).m)]) (SignExt32to64 x)) (Const64 <typ.UInt64> [32+smagic32(c).s])) (Rsh64x64 <t> (SignExt32to64 x) (Const64 <typ.UInt64> [63])))
for {
t := v.Type
x := v_0
if v_1.Op != OpConst32 {
break
}
- c := v_1.AuxInt
- if !(smagicOK(32, c) && config.RegSize == 8) {
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 8) {
break
}
v.reset(OpSub32)
v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v2.AuxInt = int64(smagic(32, c).m)
+ v2.AuxInt = int64ToAuxInt(int64(smagic32(c).m))
v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v3.AddArg(x)
v1.AddArg2(v2, v3)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = 32 + smagic(32, c).s
+ v4.AuxInt = int64ToAuxInt(32 + smagic32(c).s)
v0.AddArg2(v1, v4)
v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v6.AuxInt = 63
+ v6.AuxInt = int64ToAuxInt(63)
v5.AddArg2(v3, v6)
v.AddArg2(v0, v5)
return true
}
// match: (Div32 <t> x (Const32 [c]))
- // cond: smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 == 0 && config.useHmul
- // result: (Sub32 <t> (Rsh32x64 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int64(int32(smagic(32,c).m/2))]) x) (Const64 <typ.UInt64> [smagic(32,c).s-1])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
+ // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul
+ // result: (Sub32 <t> (Rsh32x64 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int32(smagic32(c).m/2)]) x) (Const64 <typ.UInt64> [smagic32(c).s-1])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
for {
t := v.Type
x := v_0
if v_1.Op != OpConst32 {
break
}
- c := v_1.AuxInt
- if !(smagicOK(32, c) && config.RegSize == 4 && smagic(32, c).m&1 == 0 && config.useHmul) {
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul) {
break
}
v.reset(OpSub32)
v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
v1 := b.NewValue0(v.Pos, OpHmul32, t)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(int32(smagic(32, c).m / 2))
+ v2.AuxInt = int32ToAuxInt(int32(smagic32(c).m / 2))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = smagic(32, c).s - 1
+ v3.AuxInt = int64ToAuxInt(smagic32(c).s - 1)
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpRsh32x64, t)
v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v5.AuxInt = 31
+ v5.AuxInt = int64ToAuxInt(31)
v4.AddArg2(x, v5)
v.AddArg2(v0, v4)
return true
}
// match: (Div32 <t> x (Const32 [c]))
- // cond: smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 != 0 && config.useHmul
- // result: (Sub32 <t> (Rsh32x64 <t> (Add32 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int64(int32(smagic(32,c).m))]) x) x) (Const64 <typ.UInt64> [smagic(32,c).s])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
+ // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul
+ // result: (Sub32 <t> (Rsh32x64 <t> (Add32 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int32(smagic32(c).m)]) x) x) (Const64 <typ.UInt64> [smagic32(c).s])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
for {
t := v.Type
x := v_0
if v_1.Op != OpConst32 {
break
}
- c := v_1.AuxInt
- if !(smagicOK(32, c) && config.RegSize == 4 && smagic(32, c).m&1 != 0 && config.useHmul) {
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul) {
break
}
v.reset(OpSub32)
v1 := b.NewValue0(v.Pos, OpAdd32, t)
v2 := b.NewValue0(v.Pos, OpHmul32, t)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = int64(int32(smagic(32, c).m))
+ v3.AuxInt = int32ToAuxInt(int32(smagic32(c).m))
v2.AddArg2(v3, x)
v1.AddArg2(v2, x)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = smagic(32, c).s
+ v4.AuxInt = int64ToAuxInt(smagic32(c).s)
v0.AddArg2(v1, v4)
v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v6.AuxInt = 31
+ v6.AuxInt = int64ToAuxInt(31)
v5.AddArg2(x, v6)
v.AddArg2(v0, v5)
return true
return true
}
// match: (Div32u x (Const32 [c]))
- // cond: umagicOK(32, c) && config.RegSize == 4 && umagic(32,c).m&1 == 0 && config.useHmul
- // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(1<<31+umagic(32,c).m/2))]) x) (Const64 <typ.UInt64> [umagic(32,c).s-1]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<31+umagic32(c).m/2)]) x) (Const64 <typ.UInt64> [umagic32(c).s-1]))
for {
x := v_0
if v_1.Op != OpConst32 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(32, c) && config.RegSize == 4 && umagic(32, c).m&1 == 0 && config.useHmul) {
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul) {
break
}
v.reset(OpRsh32Ux64)
v.Type = typ.UInt32
v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v1.AuxInt = int64(int32(1<<31 + umagic(32, c).m/2))
+ v1.AuxInt = int32ToAuxInt(int32(1<<31 + umagic32(c).m/2))
v0.AddArg2(v1, x)
v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v2.AuxInt = umagic(32, c).s - 1
+ v2.AuxInt = int64ToAuxInt(umagic32(c).s - 1)
v.AddArg2(v0, v2)
return true
}
// match: (Div32u x (Const32 [c]))
- // cond: umagicOK(32, c) && config.RegSize == 4 && c&1 == 0 && config.useHmul
- // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(1<<31+(umagic(32,c).m+1)/2))]) (Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic(32,c).s-2]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<31+(umagic32(c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic32(c).s-2]))
for {
x := v_0
if v_1.Op != OpConst32 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(32, c) && config.RegSize == 4 && c&1 == 0 && config.useHmul) {
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul) {
break
}
v.reset(OpRsh32Ux64)
v.Type = typ.UInt32
v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v1.AuxInt = int64(int32(1<<31 + (umagic(32, c).m+1)/2))
+ v1.AuxInt = int32ToAuxInt(int32(1<<31 + (umagic32(c).m+1)/2))
v2 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = 1
+ v3.AuxInt = int64ToAuxInt(1)
v2.AddArg2(x, v3)
v0.AddArg2(v1, v2)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = umagic(32, c).s - 2
+ v4.AuxInt = int64ToAuxInt(umagic32(c).s - 2)
v.AddArg2(v0, v4)
return true
}
// match: (Div32u x (Const32 [c]))
- // cond: umagicOK(32, c) && config.RegSize == 4 && config.useAvg && config.useHmul
- // result: (Rsh32Ux64 <typ.UInt32> (Avg32u x (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(umagic(32,c).m))]) x)) (Const64 <typ.UInt64> [umagic(32,c).s-1]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Avg32u x (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(umagic32(c).m)]) x)) (Const64 <typ.UInt64> [umagic32(c).s-1]))
for {
x := v_0
if v_1.Op != OpConst32 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(32, c) && config.RegSize == 4 && config.useAvg && config.useHmul) {
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul) {
break
}
v.reset(OpRsh32Ux64)
v0 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(int32(umagic(32, c).m))
+ v2.AuxInt = int32ToAuxInt(int32(umagic32(c).m))
v1.AddArg2(v2, x)
v0.AddArg2(x, v1)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = umagic(32, c).s - 1
+ v3.AuxInt = int64ToAuxInt(umagic32(c).s - 1)
v.AddArg2(v0, v3)
return true
}
// match: (Div32u x (Const32 [c]))
- // cond: umagicOK(32, c) && config.RegSize == 8 && umagic(32,c).m&1 == 0
- // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+umagic(32,c).m/2)]) (ZeroExt32to64 x)) (Const64 <typ.UInt64> [32+umagic(32,c).s-1])))
+ // cond: umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+umagic32(c).m/2)]) (ZeroExt32to64 x)) (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
for {
x := v_0
if v_1.Op != OpConst32 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(32, c) && config.RegSize == 8 && umagic(32, c).m&1 == 0) {
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0) {
break
}
v.reset(OpTrunc64to32)
v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v2.AuxInt = int64(1<<31 + umagic(32, c).m/2)
+ v2.AuxInt = int64ToAuxInt(int64(1<<31 + umagic32(c).m/2))
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(x)
v1.AddArg2(v2, v3)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = 32 + umagic(32, c).s - 1
+ v4.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1)
v0.AddArg2(v1, v4)
v.AddArg(v0)
return true
}
// match: (Div32u x (Const32 [c]))
- // cond: umagicOK(32, c) && config.RegSize == 8 && c&1 == 0
- // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+(umagic(32,c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [32+umagic(32,c).s-2])))
+ // cond: umagicOK32(c) && config.RegSize == 8 && c&1 == 0
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+(umagic32(c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [32+umagic32(c).s-2])))
for {
x := v_0
if v_1.Op != OpConst32 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(32, c) && config.RegSize == 8 && c&1 == 0) {
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && c&1 == 0) {
break
}
v.reset(OpTrunc64to32)
v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v2.AuxInt = int64(1<<31 + (umagic(32, c).m+1)/2)
+ v2.AuxInt = int64ToAuxInt(int64(1<<31 + (umagic32(c).m+1)/2))
v3 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(x)
v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v5.AuxInt = 1
+ v5.AuxInt = int64ToAuxInt(1)
v3.AddArg2(v4, v5)
v1.AddArg2(v2, v3)
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v6.AuxInt = 32 + umagic(32, c).s - 2
+ v6.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 2)
v0.AddArg2(v1, v6)
v.AddArg(v0)
return true
}
// match: (Div32u x (Const32 [c]))
- // cond: umagicOK(32, c) && config.RegSize == 8 && config.useAvg
- // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Avg64u (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32])) (Mul64 <typ.UInt64> (Const64 <typ.UInt32> [int64(umagic(32,c).m)]) (ZeroExt32to64 x))) (Const64 <typ.UInt64> [32+umagic(32,c).s-1])))
+ // cond: umagicOK32(c) && config.RegSize == 8 && config.useAvg
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Avg64u (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32])) (Mul64 <typ.UInt64> (Const64 <typ.UInt32> [int64(umagic32(c).m)]) (ZeroExt32to64 x))) (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
for {
x := v_0
if v_1.Op != OpConst32 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(32, c) && config.RegSize == 8 && config.useAvg) {
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && config.useAvg) {
break
}
v.reset(OpTrunc64to32)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(x)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = 32
+ v4.AuxInt = int64ToAuxInt(32)
v2.AddArg2(v3, v4)
v5 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt32)
- v6.AuxInt = int64(umagic(32, c).m)
+ v6.AuxInt = int64ToAuxInt(int64(umagic32(c).m))
v5.AddArg2(v6, v3)
v1.AddArg2(v2, v5)
v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v7.AuxInt = 32 + umagic(32, c).s - 1
+ v7.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1)
v0.AddArg2(v1, v7)
v.AddArg(v0)
return true
return true
}
// match: (Div64 <t> x (Const64 [c]))
- // cond: smagicOK(64,c) && smagic(64,c).m&1 == 0 && config.useHmul
- // result: (Sub64 <t> (Rsh64x64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic(64,c).m/2)]) x) (Const64 <typ.UInt64> [smagic(64,c).s-1])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
+ // cond: smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul
+ // result: (Sub64 <t> (Rsh64x64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic64(c).m/2)]) x) (Const64 <typ.UInt64> [smagic64(c).s-1])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
for {
t := v.Type
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
- if !(smagicOK(64, c) && smagic(64, c).m&1 == 0 && config.useHmul) {
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul) {
break
}
v.reset(OpSub64)
v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
v1 := b.NewValue0(v.Pos, OpHmul64, t)
v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v2.AuxInt = int64(smagic(64, c).m / 2)
+ v2.AuxInt = int64ToAuxInt(int64(smagic64(c).m / 2))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = smagic(64, c).s - 1
+ v3.AuxInt = int64ToAuxInt(smagic64(c).s - 1)
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpRsh64x64, t)
v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v5.AuxInt = 63
+ v5.AuxInt = int64ToAuxInt(63)
v4.AddArg2(x, v5)
v.AddArg2(v0, v4)
return true
}
// match: (Div64 <t> x (Const64 [c]))
- // cond: smagicOK(64,c) && smagic(64,c).m&1 != 0 && config.useHmul
- // result: (Sub64 <t> (Rsh64x64 <t> (Add64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic(64,c).m)]) x) x) (Const64 <typ.UInt64> [smagic(64,c).s])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
+ // cond: smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul
+ // result: (Sub64 <t> (Rsh64x64 <t> (Add64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic64(c).m)]) x) x) (Const64 <typ.UInt64> [smagic64(c).s])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
for {
t := v.Type
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
- if !(smagicOK(64, c) && smagic(64, c).m&1 != 0 && config.useHmul) {
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul) {
break
}
v.reset(OpSub64)
v1 := b.NewValue0(v.Pos, OpAdd64, t)
v2 := b.NewValue0(v.Pos, OpHmul64, t)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = int64(smagic(64, c).m)
+ v3.AuxInt = int64ToAuxInt(int64(smagic64(c).m))
v2.AddArg2(v3, x)
v1.AddArg2(v2, x)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = smagic(64, c).s
+ v4.AuxInt = int64ToAuxInt(smagic64(c).s)
v0.AddArg2(v1, v4)
v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v6.AuxInt = 63
+ v6.AuxInt = int64ToAuxInt(63)
v5.AddArg2(x, v6)
v.AddArg2(v0, v5)
return true
return true
}
// match: (Div64u x (Const64 [c]))
- // cond: umagicOK(64, c) && config.RegSize == 8 && umagic(64,c).m&1 == 0 && config.useHmul
- // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+umagic(64,c).m/2)]) x) (Const64 <typ.UInt64> [umagic(64,c).s-1]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+umagic64(c).m/2)]) x) (Const64 <typ.UInt64> [umagic64(c).s-1]))
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(64, c) && config.RegSize == 8 && umagic(64, c).m&1 == 0 && config.useHmul) {
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul) {
break
}
v.reset(OpRsh64Ux64)
v.Type = typ.UInt64
v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v1.AuxInt = int64(1<<63 + umagic(64, c).m/2)
+ v1.AuxInt = int64ToAuxInt(int64(1<<63 + umagic64(c).m/2))
v0.AddArg2(v1, x)
v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v2.AuxInt = umagic(64, c).s - 1
+ v2.AuxInt = int64ToAuxInt(umagic64(c).s - 1)
v.AddArg2(v0, v2)
return true
}
// match: (Div64u x (Const64 [c]))
- // cond: umagicOK(64, c) && config.RegSize == 8 && c&1 == 0 && config.useHmul
- // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+(umagic(64,c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic(64,c).s-2]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+(umagic64(c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic64(c).s-2]))
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(64, c) && config.RegSize == 8 && c&1 == 0 && config.useHmul) {
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul) {
break
}
v.reset(OpRsh64Ux64)
v.Type = typ.UInt64
v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v1.AuxInt = int64(1<<63 + (umagic(64, c).m+1)/2)
+ v1.AuxInt = int64ToAuxInt(int64(1<<63 + (umagic64(c).m+1)/2))
v2 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = 1
+ v3.AuxInt = int64ToAuxInt(1)
v2.AddArg2(x, v3)
v0.AddArg2(v1, v2)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = umagic(64, c).s - 2
+ v4.AuxInt = int64ToAuxInt(umagic64(c).s - 2)
v.AddArg2(v0, v4)
return true
}
// match: (Div64u x (Const64 [c]))
- // cond: umagicOK(64, c) && config.RegSize == 8 && config.useAvg && config.useHmul
- // result: (Rsh64Ux64 <typ.UInt64> (Avg64u x (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(umagic(64,c).m)]) x)) (Const64 <typ.UInt64> [umagic(64,c).s-1]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Avg64u x (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(umagic64(c).m)]) x)) (Const64 <typ.UInt64> [umagic64(c).s-1]))
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(64, c) && config.RegSize == 8 && config.useAvg && config.useHmul) {
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul) {
break
}
v.reset(OpRsh64Ux64)
v0 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v2.AuxInt = int64(umagic(64, c).m)
+ v2.AuxInt = int64ToAuxInt(int64(umagic64(c).m))
v1.AddArg2(v2, x)
v0.AddArg2(x, v1)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = umagic(64, c).s - 1
+ v3.AuxInt = int64ToAuxInt(umagic64(c).s - 1)
v.AddArg2(v0, v3)
return true
}
return true
}
// match: (Div8 <t> x (Const8 [c]))
- // cond: smagicOK(8,c)
- // result: (Sub8 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(smagic(8,c).m)]) (SignExt8to32 x)) (Const64 <typ.UInt64> [8+smagic(8,c).s])) (Rsh32x64 <t> (SignExt8to32 x) (Const64 <typ.UInt64> [31])))
+ // cond: smagicOK8(c)
+ // result: (Sub8 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(smagic8(c).m)]) (SignExt8to32 x)) (Const64 <typ.UInt64> [8+smagic8(c).s])) (Rsh32x64 <t> (SignExt8to32 x) (Const64 <typ.UInt64> [31])))
for {
t := v.Type
x := v_0
if v_1.Op != OpConst8 {
break
}
- c := v_1.AuxInt
- if !(smagicOK(8, c)) {
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(smagicOK8(c)) {
break
}
v.reset(OpSub8)
v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(smagic(8, c).m)
+ v2.AuxInt = int32ToAuxInt(int32(smagic8(c).m))
v3 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v3.AddArg(x)
v1.AddArg2(v2, v3)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = 8 + smagic(8, c).s
+ v4.AuxInt = int64ToAuxInt(8 + smagic8(c).s)
v0.AddArg2(v1, v4)
v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v6.AuxInt = 31
+ v6.AuxInt = int64ToAuxInt(31)
v5.AddArg2(v3, v6)
v.AddArg2(v0, v5)
return true
return true
}
// match: (Div8u x (Const8 [c]))
- // cond: umagicOK(8, c)
- // result: (Trunc32to8 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(1<<8+umagic(8,c).m)]) (ZeroExt8to32 x)) (Const64 <typ.UInt64> [8+umagic(8,c).s])))
+ // cond: umagicOK8(c)
+ // result: (Trunc32to8 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<8+umagic8(c).m)]) (ZeroExt8to32 x)) (Const64 <typ.UInt64> [8+umagic8(c).s])))
for {
x := v_0
if v_1.Op != OpConst8 {
break
}
- c := v_1.AuxInt
- if !(umagicOK(8, c)) {
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(umagicOK8(c)) {
break
}
v.reset(OpTrunc32to8)
v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(1<<8 + umagic(8, c).m)
+ v2.AuxInt = int32ToAuxInt(int32(1<<8 + umagic8(c).m))
v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v3.AddArg(x)
v1.AddArg2(v2, v3)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = 8 + umagic(8, c).s
+ v4.AuxInt = int64ToAuxInt(8 + umagic8(c).s)
v0.AddArg2(v1, v4)
v.AddArg(v0)
return true
break
}
// match: (Eq16 (Mod16u x (Const16 [c])) (Const16 [0]))
- // cond: x.Op != OpConst16 && udivisibleOK(16,c) && !hasSmallRotate(config)
- // result: (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [c&0xffff])) (Const32 <typ.UInt32> [0]))
+ // cond: x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint16(c))])) (Const32 <typ.UInt32> [0]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpMod16u {
if v_0_1.Op != OpConst16 {
continue
}
- c := v_0_1.AuxInt
- if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(x.Op != OpConst16 && udivisibleOK(16, c) && !hasSmallRotate(config)) {
+ c := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config)) {
continue
}
v.reset(OpEq32)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = c & 0xffff
+ v2.AuxInt = int32ToAuxInt(int32(uint16(c)))
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v.AddArg2(v0, v3)
return true
}
break
}
// match: (Eq16 (Mod16 x (Const16 [c])) (Const16 [0]))
- // cond: x.Op != OpConst16 && sdivisibleOK(16,c) && !hasSmallRotate(config)
- // result: (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [c])) (Const32 <typ.Int32> [0]))
+ // cond: x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpMod16 {
if v_0_1.Op != OpConst16 {
continue
}
- c := v_0_1.AuxInt
- if v_1.Op != OpConst16 || v_1.AuxInt != 0 || !(x.Op != OpConst16 && sdivisibleOK(16, c) && !hasSmallRotate(config)) {
+ c := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config)) {
continue
}
v.reset(OpEq32)
v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
- v2.AuxInt = c
+ v2.AuxInt = int32ToAuxInt(int32(c))
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v.AddArg2(v0, v3)
return true
}
break
}
// match: (Eq16 x (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16,c).m) && s == 16+umagic(16,c).s && x.Op != OpConst16 && udivisibleOK(16,c)
- // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).m))]) x) (Const16 <typ.UInt16> [int64(16-udivisible(16,c).k)]) ) (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst16 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt16(v_1_0.AuxInt)
if v_1_1.Op != OpTrunc64to16 {
continue
}
if mul_0.Op != OpConst64 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt64(mul_0.AuxInt)
if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic(16, c).m) && s == 16+umagic(16, c).s && x.Op != OpConst16 && udivisibleOK(16, c)) {
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c)) {
continue
}
v.reset(OpLeq16U)
v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v2.AuxInt = int64(int16(udivisible(16, c).m))
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v3.AuxInt = int64(16 - udivisible(16, c).k)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v4.AuxInt = int64(int16(udivisible(16, c).max))
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16,c).m/2) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c)
- // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).m))]) x) (Const16 <typ.UInt16> [int64(16-udivisible(16,c).k)]) ) (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst16 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt16(v_1_0.AuxInt)
if v_1_1.Op != OpTrunc32to16 {
continue
}
if mul_0.Op != OpConst32 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt32(mul_0.AuxInt)
if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+umagic(16, c).m/2) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) {
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) {
continue
}
v.reset(OpLeq16U)
v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v2.AuxInt = int64(int16(udivisible(16, c).m))
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v3.AuxInt = int64(16 - udivisible(16, c).k)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v4.AuxInt = int64(int16(udivisible(16, c).max))
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16,c).m+1)/2) && s == 16+umagic(16,c).s-2 && x.Op != OpConst16 && udivisibleOK(16,c)
- // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).m))]) x) (Const16 <typ.UInt16> [int64(16-udivisible(16,c).k)]) ) (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst16 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt16(v_1_0.AuxInt)
if v_1_1.Op != OpTrunc32to16 {
continue
}
if mul_0.Op != OpConst32 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt32(mul_0.AuxInt)
if mul_1.Op != OpRsh32Ux64 {
continue
}
continue
}
mul_1_1 := mul_1.Args[1]
- if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 {
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<15+(umagic(16, c).m+1)/2) && s == 16+umagic(16, c).s-2 && x.Op != OpConst16 && udivisibleOK(16, c)) {
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c)) {
continue
}
v.reset(OpLeq16U)
v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v2.AuxInt = int64(int16(udivisible(16, c).m))
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v3.AuxInt = int64(16 - udivisible(16, c).k)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v4.AuxInt = int64(int16(udivisible(16, c).max))
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16,c).m) && s == 16+umagic(16,c).s-1 && x.Op != OpConst16 && udivisibleOK(16,c)
- // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).m))]) x) (Const16 <typ.UInt16> [int64(16-udivisible(16,c).k)]) ) (Const16 <typ.UInt16> [int64(int16(udivisible(16,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst16 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt16(v_1_0.AuxInt)
if v_1_1.Op != OpTrunc32to16 {
continue
}
continue
}
v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1]
- if v_1_1_0_0_0_1.Op != OpConst64 || v_1_1_0_0_0_1.AuxInt != 16 {
+ if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 16 {
continue
}
mul := v_1_1_0_0.Args[1]
if mul_0.Op != OpConst32 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt32(mul_0.AuxInt)
if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(16, c).m) && s == 16+umagic(16, c).s-1 && x.Op != OpConst16 && udivisibleOK(16, c)) {
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) {
continue
}
v.reset(OpLeq16U)
v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v2.AuxInt = int64(int16(udivisible(16, c).m))
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v3.AuxInt = int64(16 - udivisible(16, c).k)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v4.AuxInt = int64(int16(udivisible(16, c).max))
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq16 x (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16,c).m) && s == 16+smagic(16,c).s && x.Op != OpConst16 && sdivisibleOK(16,c)
- // result: (Leq16U (RotateLeft16 <typ.UInt16> (Add16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int64(int16(sdivisible(16,c).m))]) x) (Const16 <typ.UInt16> [int64(int16(sdivisible(16,c).a))]) ) (Const16 <typ.UInt16> [int64(16-sdivisible(16,c).k)]) ) (Const16 <typ.UInt16> [int64(int16(sdivisible(16,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Add16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(sdivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(sdivisible16(c).a)]) ) (Const16 <typ.UInt16> [int16(16-sdivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(sdivisible16(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst16 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt16(v_1_0.AuxInt)
if v_1_1.Op != OpSub16 {
continue
}
if mul_0.Op != OpConst32 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt32(mul_0.AuxInt)
if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
v_1_1_1 := v_1_1.Args[1]
if v_1_1_1.Op != OpRsh32x64 {
continue
continue
}
v_1_1_1_1 := v_1_1_1.Args[1]
- if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(16, c).m) && s == 16+smagic(16, c).s && x.Op != OpConst16 && sdivisibleOK(16, c)) {
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c)) {
continue
}
v.reset(OpLeq16U)
v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16)
v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v3.AuxInt = int64(int16(sdivisible(16, c).m))
+ v3.AuxInt = int16ToAuxInt(int16(sdivisible16(c).m))
v2.AddArg2(v3, x)
v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v4.AuxInt = int64(int16(sdivisible(16, c).a))
+ v4.AuxInt = int16ToAuxInt(int16(sdivisible16(c).a))
v1.AddArg2(v2, v4)
v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v5.AuxInt = int64(16 - sdivisible(16, c).k)
+ v5.AuxInt = int16ToAuxInt(int16(16 - sdivisible16(c).k))
v0.AddArg2(v1, v5)
v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v6.AuxInt = int64(int16(sdivisible(16, c).max))
+ v6.AuxInt = int16ToAuxInt(int16(sdivisible16(c).max))
v.AddArg2(v0, v6)
return true
}
break
}
// match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) x) (Const64 [s])) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32,c).m/2)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c)
- // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))]) x) (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)]) ) (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst32 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt32(v_1_0.AuxInt)
if v_1_1.Op != OpRsh32Ux64 {
continue
}
if mul_0.Op != OpConst32 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt32(mul_0.AuxInt)
if x != mul_1 {
continue
}
if v_1_1_1.Op != OpConst64 {
continue
}
- s := v_1_1_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+umagic(32, c).m/2)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) {
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
continue
}
v.reset(OpLeq32U)
v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(int32(udivisible(32, c).m))
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = int64(32 - udivisible(32, c).k)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v4.AuxInt = int64(int32(udivisible(32, c).max))
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 <typ.UInt32> [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s])) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32,c).m+1)/2)) && s == umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c)
- // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))]) x) (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)]) ) (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst32 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt32(v_1_0.AuxInt)
if v_1_1.Op != OpRsh32Ux64 {
continue
}
if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt32(mul_0.AuxInt)
if mul_1.Op != OpRsh32Ux64 {
continue
}
continue
}
mul_1_1 := mul_1.Args[1]
- if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 {
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
continue
}
v_1_1_1 := v_1_1.Args[1]
if v_1_1_1.Op != OpConst64 {
continue
}
- s := v_1_1_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(1<<31+(umagic(32, c).m+1)/2)) && s == umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) {
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) {
continue
}
v.reset(OpLeq32U)
v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(int32(udivisible(32, c).m))
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = int64(32 - udivisible(32, c).k)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v4.AuxInt = int64(int32(udivisible(32, c).max))
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s])) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32,c).m)) && s == umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c)
- // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))]) x) (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)]) ) (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst32 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt32(v_1_0.AuxInt)
if v_1_1.Op != OpRsh32Ux64 {
continue
}
if mul_0.Op != OpConst32 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt32(mul_0.AuxInt)
if x != mul_1 {
continue
}
if v_1_1_1.Op != OpConst64 {
continue
}
- s := v_1_1_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(umagic(32, c).m)) && s == umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) {
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
continue
}
v.reset(OpLeq32U)
v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(int32(udivisible(32, c).m))
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = int64(32 - udivisible(32, c).k)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v4.AuxInt = int64(int32(udivisible(32, c).max))
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32,c).m/2) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c)
- // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))]) x) (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)]) ) (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst32 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt32(v_1_0.AuxInt)
if v_1_1.Op != OpTrunc64to32 {
continue
}
if mul_0.Op != OpConst64 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt64(mul_0.AuxInt)
if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic(32, c).m/2) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) {
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
continue
}
v.reset(OpLeq32U)
v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(int32(udivisible(32, c).m))
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = int64(32 - udivisible(32, c).k)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v4.AuxInt = int64(int32(udivisible(32, c).max))
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32,c).m+1)/2) && s == 32+umagic(32,c).s-2 && x.Op != OpConst32 && udivisibleOK(32,c)
- // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))]) x) (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)]) ) (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst32 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt32(v_1_0.AuxInt)
if v_1_1.Op != OpTrunc64to32 {
continue
}
if mul_0.Op != OpConst64 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt64(mul_0.AuxInt)
if mul_1.Op != OpRsh64Ux64 {
continue
}
continue
}
mul_1_1 := mul_1.Args[1]
- if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 {
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic(32, c).m+1)/2) && s == 32+umagic(32, c).s-2 && x.Op != OpConst32 && udivisibleOK(32, c)) {
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) {
continue
}
v.reset(OpLeq32U)
v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(int32(udivisible(32, c).m))
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = int64(32 - udivisible(32, c).k)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v4.AuxInt = int64(int32(udivisible(32, c).max))
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32,c).m) && s == 32+umagic(32,c).s-1 && x.Op != OpConst32 && udivisibleOK(32,c)
- // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).m))]) x) (Const32 <typ.UInt32> [int64(32-udivisible(32,c).k)]) ) (Const32 <typ.UInt32> [int64(int32(udivisible(32,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst32 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt32(v_1_0.AuxInt)
if v_1_1.Op != OpTrunc64to32 {
continue
}
continue
}
v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1]
- if v_1_1_0_0_0_1.Op != OpConst64 || v_1_1_0_0_0_1.AuxInt != 32 {
+ if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 32 {
continue
}
mul := v_1_1_0_0.Args[1]
if mul_0.Op != OpConst64 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt64(mul_0.AuxInt)
if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(32, c).m) && s == 32+umagic(32, c).s-1 && x.Op != OpConst32 && udivisibleOK(32, c)) {
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
continue
}
v.reset(OpLeq32U)
v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = int64(int32(udivisible(32, c).m))
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = int64(32 - udivisible(32, c).k)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v4.AuxInt = int64(int32(udivisible(32, c).max))
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32,c).m) && s == 32+smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c)
- // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).m))]) x) (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).a))]) ) (Const32 <typ.UInt32> [int64(32-sdivisible(32,c).k)]) ) (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst32 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt32(v_1_0.AuxInt)
if v_1_1.Op != OpSub32 {
continue
}
if mul_0.Op != OpConst64 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt64(mul_0.AuxInt)
if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
v_1_1_1 := v_1_1.Args[1]
if v_1_1_1.Op != OpRsh64x64 {
continue
continue
}
v_1_1_1_1 := v_1_1_1.Args[1]
- if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(32, c).m) && s == 32+smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) {
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) {
continue
}
v.reset(OpLeq32U)
v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = int64(int32(sdivisible(32, c).m))
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
v2.AddArg2(v3, x)
v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v4.AuxInt = int64(int32(sdivisible(32, c).a))
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
v1.AddArg2(v2, v4)
v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v5.AuxInt = int64(32 - sdivisible(32, c).k)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
v0.AddArg2(v1, v5)
v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v6.AuxInt = int64(int32(sdivisible(32, c).max))
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
v.AddArg2(v0, v6)
return true
}
break
}
// match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m/2)) && s == smagic(32,c).s-1 && x.Op != OpConst32 && sdivisibleOK(32,c)
- // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).m))]) x) (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).a))]) ) (Const32 <typ.UInt32> [int64(32-sdivisible(32,c).k)]) ) (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst32 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt32(v_1_0.AuxInt)
if v_1_1.Op != OpSub32 {
continue
}
if mul_0.Op != OpConst32 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt32(mul_0.AuxInt)
if x != mul_1 {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
v_1_1_1 := v_1_1.Args[1]
if v_1_1_1.Op != OpRsh32x64 {
continue
continue
}
v_1_1_1_1 := v_1_1_1.Args[1]
- if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m/2)) && s == smagic(32, c).s-1 && x.Op != OpConst32 && sdivisibleOK(32, c)) {
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c)) {
continue
}
v.reset(OpLeq32U)
v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = int64(int32(sdivisible(32, c).m))
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
v2.AddArg2(v3, x)
v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v4.AuxInt = int64(int32(sdivisible(32, c).a))
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
v1.AddArg2(v2, v4)
v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v5.AuxInt = int64(32 - sdivisible(32, c).k)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
v0.AddArg2(v1, v5)
v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v6.AuxInt = int64(int32(sdivisible(32, c).max))
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
v.AddArg2(v0, v6)
return true
}
break
}
// match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32,c).m)) && s == smagic(32,c).s && x.Op != OpConst32 && sdivisibleOK(32,c)
- // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).m))]) x) (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).a))]) ) (Const32 <typ.UInt32> [int64(32-sdivisible(32,c).k)]) ) (Const32 <typ.UInt32> [int64(int32(sdivisible(32,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst32 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt32(v_1_0.AuxInt)
if v_1_1.Op != OpSub32 {
continue
}
if mul_0.Op != OpConst32 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt32(mul_0.AuxInt)
if x != mul_1 || x != v_1_1_0_0_1 {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
v_1_1_1 := v_1_1.Args[1]
if v_1_1_1.Op != OpRsh32x64 {
continue
continue
}
v_1_1_1_1 := v_1_1_1.Args[1]
- if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(int32(smagic(32, c).m)) && s == smagic(32, c).s && x.Op != OpConst32 && sdivisibleOK(32, c)) {
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) {
continue
}
v.reset(OpLeq32U)
v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = int64(int32(sdivisible(32, c).m))
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
v2.AddArg2(v3, x)
v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v4.AuxInt = int64(int32(sdivisible(32, c).a))
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
v1.AddArg2(v2, v4)
v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v5.AuxInt = int64(32 - sdivisible(32, c).k)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
v0.AddArg2(v1, v5)
v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v6.AuxInt = int64(int32(sdivisible(32, c).max))
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
v.AddArg2(v0, v6)
return true
}
break
}
// match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) x) (Const64 [s])) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64,c).m/2) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c)
- // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible(64,c).m)]) x) (Const64 <typ.UInt64> [int64(64-udivisible(64,c).k)]) ) (Const64 <typ.UInt64> [int64(udivisible(64,c).max)]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst64 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt64(v_1_0.AuxInt)
if v_1_1.Op != OpRsh64Ux64 {
continue
}
if mul_0.Op != OpConst64 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt64(mul_0.AuxInt)
if x != mul_1 {
continue
}
if v_1_1_1.Op != OpConst64 {
continue
}
- s := v_1_1_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic(64, c).m/2) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) {
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) {
continue
}
v.reset(OpLeq64U)
v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v2.AuxInt = int64(udivisible(64, c).m)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = int64(64 - udivisible(64, c).k)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = int64(udivisible(64, c).max)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s])) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64,c).m+1)/2) && s == umagic(64,c).s-2 && x.Op != OpConst64 && udivisibleOK(64,c)
- // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible(64,c).m)]) x) (Const64 <typ.UInt64> [int64(64-udivisible(64,c).k)]) ) (Const64 <typ.UInt64> [int64(udivisible(64,c).max)]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst64 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt64(v_1_0.AuxInt)
if v_1_1.Op != OpRsh64Ux64 {
continue
}
if mul_0.Op != OpConst64 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt64(mul_0.AuxInt)
if mul_1.Op != OpRsh64Ux64 {
continue
}
continue
}
mul_1_1 := mul_1.Args[1]
- if mul_1_1.Op != OpConst64 || mul_1_1.AuxInt != 1 {
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
continue
}
v_1_1_1 := v_1_1.Args[1]
if v_1_1_1.Op != OpConst64 {
continue
}
- s := v_1_1_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic(64, c).m+1)/2) && s == umagic(64, c).s-2 && x.Op != OpConst64 && udivisibleOK(64, c)) {
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c)) {
continue
}
v.reset(OpLeq64U)
v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v2.AuxInt = int64(udivisible(64, c).m)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = int64(64 - udivisible(64, c).k)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = int64(udivisible(64, c).max)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s])) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64,c).m) && s == umagic(64,c).s-1 && x.Op != OpConst64 && udivisibleOK(64,c)
- // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible(64,c).m)]) x) (Const64 <typ.UInt64> [int64(64-udivisible(64,c).k)]) ) (Const64 <typ.UInt64> [int64(udivisible(64,c).max)]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst64 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt64(v_1_0.AuxInt)
if v_1_1.Op != OpRsh64Ux64 {
continue
}
if mul_0.Op != OpConst64 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt64(mul_0.AuxInt)
if x != mul_1 {
continue
}
if v_1_1_1.Op != OpConst64 {
continue
}
- s := v_1_1_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic(64, c).m) && s == umagic(64, c).s-1 && x.Op != OpConst64 && udivisibleOK(64, c)) {
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) {
continue
}
v.reset(OpLeq64U)
v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v2.AuxInt = int64(udivisible(64, c).m)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = int64(64 - udivisible(64, c).k)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = int64(udivisible(64, c).max)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m/2) && s == smagic(64,c).s-1 && x.Op != OpConst64 && sdivisibleOK(64,c)
- // result: (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible(64,c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible(64,c).a)]) ) (Const64 <typ.UInt64> [int64(64-sdivisible(64,c).k)]) ) (Const64 <typ.UInt64> [int64(sdivisible(64,c).max)]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible64(c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible64(c).a)]) ) (Const64 <typ.UInt64> [64-sdivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(sdivisible64(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst64 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt64(v_1_0.AuxInt)
if v_1_1.Op != OpSub64 {
continue
}
if mul_0.Op != OpConst64 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt64(mul_0.AuxInt)
if x != mul_1 {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
v_1_1_1 := v_1_1.Args[1]
if v_1_1_1.Op != OpRsh64x64 {
continue
continue
}
v_1_1_1_1 := v_1_1_1.Args[1]
- if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m/2) && s == smagic(64, c).s-1 && x.Op != OpConst64 && sdivisibleOK(64, c)) {
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c)) {
continue
}
v.reset(OpLeq64U)
v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = int64(sdivisible(64, c).m)
+ v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m))
v2.AddArg2(v3, x)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = int64(sdivisible(64, c).a)
+ v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a))
v1.AddArg2(v2, v4)
v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v5.AuxInt = int64(64 - sdivisible(64, c).k)
+ v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k)
v0.AddArg2(v1, v5)
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v6.AuxInt = int64(sdivisible(64, c).max)
+ v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max))
v.AddArg2(v0, v6)
return true
}
break
}
// match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64,c).m) && s == smagic(64,c).s && x.Op != OpConst64 && sdivisibleOK(64,c)
- // result: (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible(64,c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible(64,c).a)]) ) (Const64 <typ.UInt64> [int64(64-sdivisible(64,c).k)]) ) (Const64 <typ.UInt64> [int64(sdivisible(64,c).max)]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible64(c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible64(c).a)]) ) (Const64 <typ.UInt64> [64-sdivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(sdivisible64(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst64 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt64(v_1_0.AuxInt)
if v_1_1.Op != OpSub64 {
continue
}
if mul_0.Op != OpConst64 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt64(mul_0.AuxInt)
if x != mul_1 || x != v_1_1_0_0_1 {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
v_1_1_1 := v_1_1.Args[1]
if v_1_1_1.Op != OpRsh64x64 {
continue
continue
}
v_1_1_1_1 := v_1_1_1.Args[1]
- if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(64, c).m) && s == smagic(64, c).s && x.Op != OpConst64 && sdivisibleOK(64, c)) {
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c)) {
continue
}
v.reset(OpLeq64U)
v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v3.AuxInt = int64(sdivisible(64, c).m)
+ v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m))
v2.AddArg2(v3, x)
v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v4.AuxInt = int64(sdivisible(64, c).a)
+ v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a))
v1.AddArg2(v2, v4)
v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v5.AuxInt = int64(64 - sdivisible(64, c).k)
+ v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k)
v0.AddArg2(v1, v5)
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v6.AuxInt = int64(sdivisible(64, c).max)
+ v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max))
v.AddArg2(v0, v6)
return true
}
break
}
// match: (Eq8 (Mod8u x (Const8 [c])) (Const8 [0]))
- // cond: x.Op != OpConst8 && udivisibleOK(8,c) && !hasSmallRotate(config)
- // result: (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [c&0xff])) (Const32 <typ.UInt32> [0]))
+ // cond: x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint8(c))])) (Const32 <typ.UInt32> [0]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpMod8u {
if v_0_1.Op != OpConst8 {
continue
}
- c := v_0_1.AuxInt
- if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(x.Op != OpConst8 && udivisibleOK(8, c) && !hasSmallRotate(config)) {
+ c := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config)) {
continue
}
v.reset(OpEq32)
v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v2.AuxInt = c & 0xff
+ v2.AuxInt = int32ToAuxInt(int32(uint8(c)))
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v.AddArg2(v0, v3)
return true
}
break
}
// match: (Eq8 (Mod8 x (Const8 [c])) (Const8 [0]))
- // cond: x.Op != OpConst8 && sdivisibleOK(8,c) && !hasSmallRotate(config)
- // result: (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [c])) (Const32 <typ.Int32> [0]))
+ // cond: x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpMod8 {
if v_0_1.Op != OpConst8 {
continue
}
- c := v_0_1.AuxInt
- if v_1.Op != OpConst8 || v_1.AuxInt != 0 || !(x.Op != OpConst8 && sdivisibleOK(8, c) && !hasSmallRotate(config)) {
+ c := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config)) {
continue
}
v.reset(OpEq32)
v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
- v2.AuxInt = c
+ v2.AuxInt = int32ToAuxInt(int32(c))
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v.AddArg2(v0, v3)
return true
}
break
}
// match: (Eq8 x (Mul8 (Const8 [c]) (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt8to32 x)) (Const64 [s]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8,c).m) && s == 8+umagic(8,c).s && x.Op != OpConst8 && udivisibleOK(8,c)
- // result: (Leq8U (RotateLeft8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int64(int8(udivisible(8,c).m))]) x) (Const8 <typ.UInt8> [int64(8-udivisible(8,c).k)]) ) (Const8 <typ.UInt8> [int64(int8(udivisible(8,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c)
+ // result: (Leq8U (RotateLeft8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int8(udivisible8(c).m)]) x) (Const8 <typ.UInt8> [int8(8-udivisible8(c).k)]) ) (Const8 <typ.UInt8> [int8(udivisible8(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst8 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt8(v_1_0.AuxInt)
if v_1_1.Op != OpTrunc32to8 {
continue
}
if mul_0.Op != OpConst32 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt32(mul_0.AuxInt)
if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
- if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<8+umagic(8, c).m) && s == 8+umagic(8, c).s && x.Op != OpConst8 && udivisibleOK(8, c)) {
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c)) {
continue
}
v.reset(OpLeq8U)
v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8)
v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8)
v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
- v2.AuxInt = int64(int8(udivisible(8, c).m))
+ v2.AuxInt = int8ToAuxInt(int8(udivisible8(c).m))
v1.AddArg2(v2, x)
v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
- v3.AuxInt = int64(8 - udivisible(8, c).k)
+ v3.AuxInt = int8ToAuxInt(int8(8 - udivisible8(c).k))
v0.AddArg2(v1, v3)
v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
- v4.AuxInt = int64(int8(udivisible(8, c).max))
+ v4.AuxInt = int8ToAuxInt(int8(udivisible8(c).max))
v.AddArg2(v0, v4)
return true
}
break
}
// match: (Eq8 x (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) ) )
- // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8,c).m) && s == 8+smagic(8,c).s && x.Op != OpConst8 && sdivisibleOK(8,c)
- // result: (Leq8U (RotateLeft8 <typ.UInt8> (Add8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int64(int8(sdivisible(8,c).m))]) x) (Const8 <typ.UInt8> [int64(int8(sdivisible(8,c).a))]) ) (Const8 <typ.UInt8> [int64(8-sdivisible(8,c).k)]) ) (Const8 <typ.UInt8> [int64(int8(sdivisible(8,c).max))]) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c)
+ // result: (Leq8U (RotateLeft8 <typ.UInt8> (Add8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int8(sdivisible8(c).m)]) x) (Const8 <typ.UInt8> [int8(sdivisible8(c).a)]) ) (Const8 <typ.UInt8> [int8(8-sdivisible8(c).k)]) ) (Const8 <typ.UInt8> [int8(sdivisible8(c).max)]) )
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1_0.Op != OpConst8 {
continue
}
- c := v_1_0.AuxInt
+ c := auxIntToInt8(v_1_0.AuxInt)
if v_1_1.Op != OpSub8 {
continue
}
if mul_0.Op != OpConst32 {
continue
}
- m := mul_0.AuxInt
+ m := auxIntToInt32(mul_0.AuxInt)
if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] {
continue
}
if v_1_1_0_1.Op != OpConst64 {
continue
}
- s := v_1_1_0_1.AuxInt
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
v_1_1_1 := v_1_1.Args[1]
if v_1_1_1.Op != OpRsh32x64 {
continue
continue
}
v_1_1_1_1 := v_1_1_1.Args[1]
- if v_1_1_1_1.Op != OpConst64 || v_1_1_1_1.AuxInt != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic(8, c).m) && s == 8+smagic(8, c).s && x.Op != OpConst8 && sdivisibleOK(8, c)) {
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c)) {
continue
}
v.reset(OpLeq8U)
v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8)
v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8)
v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
- v3.AuxInt = int64(int8(sdivisible(8, c).m))
+ v3.AuxInt = int8ToAuxInt(int8(sdivisible8(c).m))
v2.AddArg2(v3, x)
v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
- v4.AuxInt = int64(int8(sdivisible(8, c).a))
+ v4.AuxInt = int8ToAuxInt(int8(sdivisible8(c).a))
v1.AddArg2(v2, v4)
v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
- v5.AuxInt = int64(8 - sdivisible(8, c).k)
+ v5.AuxInt = int8ToAuxInt(int8(8 - sdivisible8(c).k))
v0.AddArg2(v1, v5)
v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
- v6.AuxInt = int64(int8(sdivisible(8, c).max))
+ v6.AuxInt = int8ToAuxInt(int8(sdivisible8(c).max))
v.AddArg2(v0, v6)
return true
}