// Remove redundant ops
// Not in generic rules, because they may appear after lowering e. g. Slicemask
-(NEG(Q|L) (NEG(Q|L) x)) -> x
-(NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 -> (SUB(Q|L) y x)
+(NEG(Q|L) (NEG(Q|L) x)) => x
+(NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 => (SUB(Q|L) y x)
// Convert constant subtracts to constant adds
-(SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x)
-(SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x)
+(SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x)
+(SUBLconst [c] x) => (ADDLconst [-c] x)
// generic constant folding
// TODO: more of this
-(ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d])
-(ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))])
-(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x)
-(ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x)
-(SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c])
-(SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x)
-(SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
-(SARLconst [c] (MOVQconst [d])) -> (MOVQconst [int64(int32(d))>>uint64(c)])
-(SARWconst [c] (MOVQconst [d])) -> (MOVQconst [int64(int16(d))>>uint64(c)])
-(SARBconst [c] (MOVQconst [d])) -> (MOVQconst [int64(int8(d))>>uint64(c)])
-(NEGQ (MOVQconst [c])) -> (MOVQconst [-c])
-(NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))])
-(MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d])
-(MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))])
-(ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d])
-(ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d])
-(ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d])
-(ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d])
-(XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d])
-(XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
-(NOTQ (MOVQconst [c])) -> (MOVQconst [^c])
-(NOTL (MOVLconst [c])) -> (MOVLconst [^c])
-(BTSQconst [c] (MOVQconst [d])) -> (MOVQconst [d|(1<<uint32(c))])
-(BTSLconst [c] (MOVLconst [d])) -> (MOVLconst [d|(1<<uint32(c))])
-(BTRQconst [c] (MOVQconst [d])) -> (MOVQconst [d&^(1<<uint32(c))])
-(BTRLconst [c] (MOVLconst [d])) -> (MOVLconst [d&^(1<<uint32(c))])
-(BTCQconst [c] (MOVQconst [d])) -> (MOVQconst [d^(1<<uint32(c))])
-(BTCLconst [c] (MOVLconst [d])) -> (MOVLconst [d^(1<<uint32(c))])
+(ADDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)+d])
+(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d])
+(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x)
+(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
+(SUBQconst (MOVQconst [d]) [c]) => (MOVQconst [d-int64(c)])
+(SUBQconst (SUBQconst x [d]) [c]) && is32Bit(int64(-c)-int64(d)) => (ADDQconst [-c-d] x)
+(SARQconst [c] (MOVQconst [d])) => (MOVQconst [d>>uint64(c)])
+(SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)])
+(SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)])
+(SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)])
+(NEGQ (MOVQconst [c])) => (MOVQconst [-c])
+(NEGL (MOVLconst [c])) => (MOVLconst [-c])
+(MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d])
+(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
+(ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d])
+(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d])
+(ORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)|d])
+(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
+(XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d])
+(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
+(NOTQ (MOVQconst [c])) => (MOVQconst [^c])
+(NOTL (MOVLconst [c])) => (MOVLconst [^c])
+(BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))])
+(BTSLconst [c] (MOVLconst [d])) => (MOVLconst [d|(1<<uint32(c))])
+(BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))])
+(BTRLconst [c] (MOVLconst [d])) => (MOVLconst [d&^(1<<uint32(c))])
+(BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))])
+(BTCLconst [c] (MOVLconst [d])) => (MOVLconst [d^(1<<uint32(c))])
// If c or d doesn't fit into 32 bits, then we can't construct ORQconst,
// but we can still constant-fold.
// In theory this applies to any of the simplifications above,
// but ORQ is the only one I've actually seen occur.
-(ORQ (MOVQconst [c]) (MOVQconst [d])) -> (MOVQconst [c|d])
+(ORQ (MOVQconst [c]) (MOVQconst [d])) => (MOVQconst [c|d])
// generic simplifications
// TODO: more of this
-(ADDQ x (NEGQ y)) -> (SUBQ x y)
-(ADDL x (NEGL y)) -> (SUBL x y)
-(SUBQ x x) -> (MOVQconst [0])
-(SUBL x x) -> (MOVLconst [0])
-(ANDQ x x) -> x
-(ANDL x x) -> x
-(ORQ x x) -> x
-(ORL x x) -> x
-(XORQ x x) -> (MOVQconst [0])
-(XORL x x) -> (MOVLconst [0])
-
-(SHLLconst [d] (MOVLconst [c])) -> (MOVLconst [int64(int32(c)) << uint64(d)])
-(SHLQconst [d] (MOVQconst [c])) -> (MOVQconst [c << uint64(d)])
-(SHLQconst [d] (MOVLconst [c])) -> (MOVQconst [int64(int32(c)) << uint64(d)])
+(ADDQ x (NEGQ y)) => (SUBQ x y)
+(ADDL x (NEGL y)) => (SUBL x y)
+(SUBQ x x) => (MOVQconst [0])
+(SUBL x x) => (MOVLconst [0])
+(ANDQ x x) => x
+(ANDL x x) => x
+(ORQ x x) => x
+(ORL x x) => x
+(XORQ x x) => (MOVQconst [0])
+(XORL x x) => (MOVLconst [0])
+
+(SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)])
+(SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)])
+(SHLQconst [d] (MOVLconst [c])) => (MOVQconst [int64(c) << uint64(d)])
// Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range.
-(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) -> (ADDQconst [-c] x)
-(MULQconst [c] (NEGQ x)) && c != -(1<<31) -> (MULQconst [-c] x)
+(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) => (ADDQconst [-c] x)
+(MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x)
// checking AND against 0.
(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y)
return true
}
// match: (ADDLconst [c] (MOVLconst [d]))
- // result: (MOVLconst [int64(int32(c+d))])
+ // result: (MOVLconst [c+d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = int64(int32(c + d))
+ v.AuxInt = int32ToAuxInt(c + d)
return true
}
// match: (ADDLconst [c] (ADDLconst [d] x))
- // result: (ADDLconst [int64(int32(c+d))] x)
+ // result: (ADDLconst [c+d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64ADDLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64ADDLconst)
- v.AuxInt = int64(int32(c + d))
+ v.AuxInt = int32ToAuxInt(c + d)
v.AddArg(x)
return true
}
return true
}
// match: (ADDQconst [c] (MOVQconst [d]))
- // result: (MOVQconst [c+d])
+ // result: (MOVQconst [int64(c)+d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = c + d
+ v.AuxInt = int64ToAuxInt(int64(c) + d)
return true
}
// match: (ADDQconst [c] (ADDQconst [d] x))
- // cond: is32Bit(c+d)
+ // cond: is32Bit(int64(c)+int64(d))
// result: (ADDQconst [c+d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64ADDQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(is32Bit(c + d)) {
+ if !(is32Bit(int64(c) + int64(d))) {
break
}
v.reset(OpAMD64ADDQconst)
- v.AuxInt = c + d
+ v.AuxInt = int32ToAuxInt(c + d)
v.AddArg(x)
return true
}
// match: (ANDLconst [c] (MOVLconst [d]))
// result: (MOVLconst [c&d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = c & d
+ v.AuxInt = int32ToAuxInt(c & d)
return true
}
return false
return true
}
// match: (ANDQconst [c] (MOVQconst [d]))
- // result: (MOVQconst [c&d])
+ // result: (MOVQconst [int64(c)&d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = c & d
+ v.AuxInt = int64ToAuxInt(int64(c) & d)
return true
}
return false
// match: (BTCLconst [c] (MOVLconst [d]))
// result: (MOVLconst [d^(1<<uint32(c))])
for {
- c := v.AuxInt
+ c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = d ^ (1 << uint32(c))
+ v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
return true
}
return false
// match: (BTCQconst [c] (MOVQconst [d]))
// result: (MOVQconst [d^(1<<uint32(c))])
for {
- c := v.AuxInt
+ c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = d ^ (1 << uint32(c))
+ v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
return true
}
return false
// match: (BTRLconst [c] (MOVLconst [d]))
// result: (MOVLconst [d&^(1<<uint32(c))])
for {
- c := v.AuxInt
+ c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = d &^ (1 << uint32(c))
+ v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
return true
}
return false
// match: (BTRQconst [c] (MOVQconst [d]))
// result: (MOVQconst [d&^(1<<uint32(c))])
for {
- c := v.AuxInt
+ c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = d &^ (1 << uint32(c))
+ v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
return true
}
return false
// match: (BTSLconst [c] (MOVLconst [d]))
// result: (MOVLconst [d|(1<<uint32(c))])
for {
- c := v.AuxInt
+ c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = d | (1 << uint32(c))
+ v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
return true
}
return false
// match: (BTSQconst [c] (MOVQconst [d]))
// result: (MOVQconst [d|(1<<uint32(c))])
for {
- c := v.AuxInt
+ c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = d | (1 << uint32(c))
+ v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
return true
}
return false
return true
}
// match: (MULLconst [c] (MOVLconst [d]))
- // result: (MOVLconst [int64(int32(c*d))])
+ // result: (MOVLconst [c*d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = int64(int32(c * d))
+ v.AuxInt = int32ToAuxInt(c * d)
return true
}
return false
return true
}
// match: (MULQconst [c] (MOVQconst [d]))
- // result: (MOVQconst [c*d])
+ // result: (MOVQconst [int64(c)*d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = c * d
+ v.AuxInt = int64ToAuxInt(int64(c) * d)
return true
}
// match: (MULQconst [c] (NEGQ x))
// cond: c != -(1<<31)
// result: (MULQconst [-c] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64NEGQ {
break
}
break
}
v.reset(OpAMD64MULQconst)
- v.AuxInt = -c
+ v.AuxInt = int32ToAuxInt(-c)
v.AddArg(x)
return true
}
return true
}
// match: (NEGL (MOVLconst [c]))
- // result: (MOVLconst [int64(int32(-c))])
+ // result: (MOVLconst [-c])
for {
if v_0.Op != OpAMD64MOVLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = int64(int32(-c))
+ v.AuxInt = int32ToAuxInt(-c)
return true
}
return false
if v_0.Op != OpAMD64MOVQconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = -c
+ v.AuxInt = int64ToAuxInt(-c)
return true
}
// match: (NEGQ (ADDQconst [c] (NEGQ x)))
if v_0.Op != OpAMD64ADDQconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpAMD64NEGQ {
break
break
}
v.reset(OpAMD64ADDQconst)
- v.AuxInt = -c
+ v.AuxInt = int32ToAuxInt(-c)
v.AddArg(x)
return true
}
if v_0.Op != OpAMD64MOVLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = ^c
+ v.AuxInt = int32ToAuxInt(^c)
return true
}
return false
if v_0.Op != OpAMD64MOVQconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = ^c
+ v.AuxInt = int64ToAuxInt(^c)
return true
}
return false
// match: (ORLconst [c] (MOVLconst [d]))
// result: (MOVLconst [c|d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = c | d
+ v.AuxInt = int32ToAuxInt(c | d)
return true
}
return false
if v_0.Op != OpAMD64MOVQconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpAMD64MOVQconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = c | d
+ v.AuxInt = int64ToAuxInt(c | d)
return true
}
break
return true
}
// match: (ORQconst [c] (MOVQconst [d]))
- // result: (MOVQconst [c|d])
+ // result: (MOVQconst [int64(c)|d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = c | d
+ v.AuxInt = int64ToAuxInt(int64(c) | d)
return true
}
return false
// match: (SARBconst [c] (MOVQconst [d]))
// result: (MOVQconst [int64(int8(d))>>uint64(c)])
for {
- c := v.AuxInt
+ c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = int64(int8(d)) >> uint64(c)
+ v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
return true
}
return false
// match: (SARLconst [c] (MOVQconst [d]))
// result: (MOVQconst [int64(int32(d))>>uint64(c)])
for {
- c := v.AuxInt
+ c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = int64(int32(d)) >> uint64(c)
+ v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
return true
}
return false
// match: (SARQconst [c] (MOVQconst [d]))
// result: (MOVQconst [d>>uint64(c)])
for {
- c := v.AuxInt
+ c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = d >> uint64(c)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
return true
}
return false
// match: (SARWconst [c] (MOVQconst [d]))
// result: (MOVQconst [int64(int16(d))>>uint64(c)])
for {
- c := v.AuxInt
+ c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = int64(int16(d)) >> uint64(c)
+ v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
return true
}
return false
return true
}
// match: (SHLLconst [d] (MOVLconst [c]))
- // result: (MOVLconst [int64(int32(c)) << uint64(d)])
+ // result: (MOVLconst [c << uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = int64(int32(c)) << uint64(d)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
return true
}
return false
// match: (SHLQconst [d] (MOVQconst [c]))
// result: (MOVQconst [c << uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = c << uint64(d)
+ v.AuxInt = int64ToAuxInt(c << uint64(d))
return true
}
// match: (SHLQconst [d] (MOVLconst [c]))
- // result: (MOVQconst [int64(int32(c)) << uint64(d)])
+ // result: (MOVQconst [int64(c) << uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = int64(int32(c)) << uint64(d)
+ v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
return true
}
return false
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
return true
}
// match: (SUBLconst [c] x)
- // result: (ADDLconst [int64(int32(-c))] x)
+ // result: (ADDLconst [-c] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
x := v_0
v.reset(OpAMD64ADDLconst)
- v.AuxInt = int64(int32(-c))
+ v.AuxInt = int32ToAuxInt(-c)
v.AddArg(x)
return true
}
break
}
v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
// cond: c != -(1<<31)
// result: (ADDQconst [-c] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
x := v_0
if !(c != -(1 << 31)) {
break
}
v.reset(OpAMD64ADDQconst)
- v.AuxInt = -c
+ v.AuxInt = int32ToAuxInt(-c)
v.AddArg(x)
return true
}
// match: (SUBQconst (MOVQconst [d]) [c])
- // result: (MOVQconst [d-c])
+ // result: (MOVQconst [d-int64(c)])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = d - c
+ v.AuxInt = int64ToAuxInt(d - int64(c))
return true
}
// match: (SUBQconst (SUBQconst x [d]) [c])
- // cond: is32Bit(-c-d)
+ // cond: is32Bit(int64(-c)-int64(d))
// result: (ADDQconst [-c-d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64SUBQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(is32Bit(-c - d)) {
+ if !(is32Bit(int64(-c) - int64(d))) {
break
}
v.reset(OpAMD64ADDQconst)
- v.AuxInt = -c - d
+ v.AuxInt = int32ToAuxInt(-c - d)
v.AddArg(x)
return true
}
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (XORL x l:(MOVLload [off] {sym} ptr mem))
// match: (XORLconst [c] (MOVLconst [d]))
// result: (MOVLconst [c^d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = c ^ d
+ v.AuxInt = int32ToAuxInt(c ^ d)
return true
}
return false
break
}
v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (XORQ x l:(MOVQload [off] {sym} ptr mem))
return true
}
// match: (XORQconst [c] (MOVQconst [d]))
- // result: (MOVQconst [c^d])
+ // result: (MOVQconst [int64(c)^d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpAMD64MOVQconst)
- v.AuxInt = c ^ d
+ v.AuxInt = int64ToAuxInt(int64(c) ^ d)
return true
}
return false