(LEAQ [off+int32(scale)*8] {sym} x)
// Absorb InvertFlags into branches.
-(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
-(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
-(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
-(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
-(ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
-(UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
-(ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
-(UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
-(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
-(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
// Constant comparisons.
-(CMPQconst (MOVQconst [x]) [y]) && x==y -> (FlagEQ)
-(CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)<uint64(y) -> (FlagLT_ULT)
-(CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)>uint64(y) -> (FlagLT_UGT)
-(CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)<uint64(y) -> (FlagGT_ULT)
-(CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT)
-(CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
-(CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
-(CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
-(CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
-(CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
-(CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ)
-(CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT)
-(CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT)
-(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT)
-(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT)
-(CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ)
-(CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT)
-(CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT)
-(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
-(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
+(CMPQconst (MOVQconst [x]) [y]) && x==int64(y) => (FlagEQ)
+(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)<uint64(int64(y)) => (FlagLT_ULT)
+(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)>uint64(int64(y)) => (FlagLT_UGT)
+(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)<uint64(int64(y)) => (FlagGT_ULT)
+(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)>uint64(int64(y)) => (FlagGT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
// CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts.
// In theory this applies to any of the simplifications above,
// but CMPQ is the only one I've actually seen occur.
-(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y -> (FlagEQ)
-(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)<uint64(y) -> (FlagLT_ULT)
-(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)>uint64(y) -> (FlagLT_UGT)
-(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)<uint64(y) -> (FlagGT_ULT)
-(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y => (FlagEQ)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)<uint64(y) => (FlagLT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)>uint64(y) => (FlagLT_UGT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)<uint64(y) => (FlagGT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) => (FlagGT_UGT)
// Other known comparisons.
-(CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT)
-(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT)
-(CMPQconst (MOVLQZX _) [c]) && 0xFFFFFFFF < c -> (FlagLT_ULT)
-(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT_ULT)
-(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT_ULT)
-(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
-(CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
-(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT)
-(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT)
-(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT)
+(CMPQconst (MOVBQZX _) [c]) && 0xFF < c => (FlagLT_ULT)
+(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c => (FlagLT_ULT)
+(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT)
+(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) => (FlagLT_ULT)
+(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= m && int16(m) < n => (FlagLT_ULT)
+(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= m && int8(m) < n => (FlagLT_ULT)
// TESTQ c c sets flags like CMPQ c 0.
(TEST(Q|L)const [c] (MOV(Q|L)const [c])) && c == 0 -> (FlagEQ)
v_0 := v.Args[0]
b := v.Block
// match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)==int8(y)
+ // cond: int8(x)==y
// result: (FlagEQ)
for {
- y := v.AuxInt
+ y := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int8(x) == int8(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) == y) {
break
}
v.reset(OpAMD64FlagEQ)
return true
}
// match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)<int8(y) && uint8(x)<uint8(y)
+ // cond: int8(x)<y && uint8(x)<uint8(y)
// result: (FlagLT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) < uint8(y)) {
break
}
v.reset(OpAMD64FlagLT_ULT)
return true
}
// match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)<int8(y) && uint8(x)>uint8(y)
+ // cond: int8(x)<y && uint8(x)>uint8(y)
// result: (FlagLT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) > uint8(y)) {
break
}
v.reset(OpAMD64FlagLT_UGT)
return true
}
// match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)>int8(y) && uint8(x)<uint8(y)
+ // cond: int8(x)>y && uint8(x)<uint8(y)
// result: (FlagGT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) < uint8(y)) {
break
}
v.reset(OpAMD64FlagGT_ULT)
return true
}
// match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)>int8(y) && uint8(x)>uint8(y)
+ // cond: int8(x)>y && uint8(x)>uint8(y)
// result: (FlagGT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) > uint8(y)) {
break
}
v.reset(OpAMD64FlagGT_UGT)
return true
}
// match: (CMPBconst (ANDLconst _ [m]) [n])
- // cond: 0 <= int8(m) && int8(m) < int8(n)
+ // cond: 0 <= m && int8(m) < n
// result: (FlagLT_ULT)
for {
- n := v.AuxInt
+ n := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64ANDLconst {
break
}
- m := v_0.AuxInt
- if !(0 <= int8(m) && int8(m) < int8(n)) {
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && int8(m) < n) {
break
}
v.reset(OpAMD64FlagLT_ULT)
v_0 := v.Args[0]
b := v.Block
// match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)==int32(y)
+ // cond: x==y
// result: (FlagEQ)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) == int32(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x == y) {
break
}
v.reset(OpAMD64FlagEQ)
return true
}
// match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
+ // cond: x<y && uint32(x)<uint32(y)
// result: (FlagLT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) < uint32(y)) {
break
}
v.reset(OpAMD64FlagLT_ULT)
return true
}
// match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
+ // cond: x<y && uint32(x)>uint32(y)
// result: (FlagLT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) > uint32(y)) {
break
}
v.reset(OpAMD64FlagLT_UGT)
return true
}
// match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
+ // cond: x>y && uint32(x)<uint32(y)
// result: (FlagGT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) < uint32(y)) {
break
}
v.reset(OpAMD64FlagGT_ULT)
return true
}
// match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
+ // cond: x>y && uint32(x)>uint32(y)
// result: (FlagGT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) > uint32(y)) {
break
}
v.reset(OpAMD64FlagGT_UGT)
// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
// result: (FlagLT_ULT)
for {
- n := v.AuxInt
+ n := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64SHRLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt8(v_0.AuxInt)
if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
break
}
return true
}
// match: (CMPLconst (ANDLconst _ [m]) [n])
- // cond: 0 <= int32(m) && int32(m) < int32(n)
+ // cond: 0 <= m && m < n
// result: (FlagLT_ULT)
for {
- n := v.AuxInt
+ n := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64ANDLconst {
break
}
- m := v_0.AuxInt
- if !(0 <= int32(m) && int32(m) < int32(n)) {
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
break
}
v.reset(OpAMD64FlagLT_ULT)
if v_0.Op != OpAMD64MOVQconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpAMD64MOVQconst {
break
}
- y := v_1.AuxInt
+ y := auxIntToInt64(v_1.AuxInt)
if !(x == y) {
break
}
if v_0.Op != OpAMD64MOVQconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpAMD64MOVQconst {
break
}
- y := v_1.AuxInt
+ y := auxIntToInt64(v_1.AuxInt)
if !(x < y && uint64(x) < uint64(y)) {
break
}
if v_0.Op != OpAMD64MOVQconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpAMD64MOVQconst {
break
}
- y := v_1.AuxInt
+ y := auxIntToInt64(v_1.AuxInt)
if !(x < y && uint64(x) > uint64(y)) {
break
}
if v_0.Op != OpAMD64MOVQconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpAMD64MOVQconst {
break
}
- y := v_1.AuxInt
+ y := auxIntToInt64(v_1.AuxInt)
if !(x > y && uint64(x) < uint64(y)) {
break
}
if v_0.Op != OpAMD64MOVQconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpAMD64MOVQconst {
break
}
- y := v_1.AuxInt
+ y := auxIntToInt64(v_1.AuxInt)
if !(x > y && uint64(x) > uint64(y)) {
break
}
return true
}
// match: (CMPQconst (MOVQconst [x]) [y])
- // cond: x==y
+ // cond: x==int64(y)
// result: (FlagEQ)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- x := v_0.AuxInt
- if !(x == y) {
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == int64(y)) {
break
}
v.reset(OpAMD64FlagEQ)
return true
}
// match: (CMPQconst (MOVQconst [x]) [y])
- // cond: x<y && uint64(x)<uint64(y)
+ // cond: x<int64(y) && uint64(x)<uint64(int64(y))
// result: (FlagLT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- x := v_0.AuxInt
- if !(x < y && uint64(x) < uint64(y)) {
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
break
}
v.reset(OpAMD64FlagLT_ULT)
return true
}
// match: (CMPQconst (MOVQconst [x]) [y])
- // cond: x<y && uint64(x)>uint64(y)
+ // cond: x<int64(y) && uint64(x)>uint64(int64(y))
// result: (FlagLT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- x := v_0.AuxInt
- if !(x < y && uint64(x) > uint64(y)) {
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
break
}
v.reset(OpAMD64FlagLT_UGT)
return true
}
// match: (CMPQconst (MOVQconst [x]) [y])
- // cond: x>y && uint64(x)<uint64(y)
+ // cond: x>int64(y) && uint64(x)<uint64(int64(y))
// result: (FlagGT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- x := v_0.AuxInt
- if !(x > y && uint64(x) < uint64(y)) {
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
break
}
v.reset(OpAMD64FlagGT_ULT)
return true
}
// match: (CMPQconst (MOVQconst [x]) [y])
- // cond: x>y && uint64(x)>uint64(y)
+ // cond: x>int64(y) && uint64(x)>uint64(int64(y))
// result: (FlagGT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst {
break
}
- x := v_0.AuxInt
- if !(x > y && uint64(x) > uint64(y)) {
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
break
}
v.reset(OpAMD64FlagGT_UGT)
// cond: 0xFF < c
// result: (FlagLT_ULT)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
break
}
// cond: 0xFFFF < c
// result: (FlagLT_ULT)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
break
}
v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMPQconst (MOVLQZX _) [c])
- // cond: 0xFFFFFFFF < c
- // result: (FlagLT_ULT)
- for {
- c := v.AuxInt
- if v_0.Op != OpAMD64MOVLQZX || !(0xFFFFFFFF < c) {
- break
- }
- v.reset(OpAMD64FlagLT_ULT)
- return true
- }
// match: (CMPQconst (SHRQconst _ [c]) [n])
// cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
// result: (FlagLT_ULT)
for {
- n := v.AuxInt
+ n := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64SHRQconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt8(v_0.AuxInt)
if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
break
}
// cond: 0 <= m && m < n
// result: (FlagLT_ULT)
for {
- n := v.AuxInt
+ n := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64ANDQconst {
break
}
- m := v_0.AuxInt
+ m := auxIntToInt32(v_0.AuxInt)
if !(0 <= m && m < n) {
break
}
// cond: 0 <= m && m < n
// result: (FlagLT_ULT)
for {
- n := v.AuxInt
+ n := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64ANDLconst {
break
}
- m := v_0.AuxInt
+ m := auxIntToInt32(v_0.AuxInt)
if !(0 <= m && m < n) {
break
}
v_0 := v.Args[0]
b := v.Block
// match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)==int16(y)
+ // cond: int16(x)==y
// result: (FlagEQ)
for {
- y := v.AuxInt
+ y := auxIntToInt16(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int16(x) == int16(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) == y) {
break
}
v.reset(OpAMD64FlagEQ)
return true
}
// match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)<int16(y) && uint16(x)<uint16(y)
+ // cond: int16(x)<y && uint16(x)<uint16(y)
// result: (FlagLT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt16(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) < uint16(y)) {
break
}
v.reset(OpAMD64FlagLT_ULT)
return true
}
// match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)<int16(y) && uint16(x)>uint16(y)
+ // cond: int16(x)<y && uint16(x)>uint16(y)
// result: (FlagLT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt16(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) > uint16(y)) {
break
}
v.reset(OpAMD64FlagLT_UGT)
return true
}
// match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)>int16(y) && uint16(x)<uint16(y)
+ // cond: int16(x)>y && uint16(x)<uint16(y)
// result: (FlagGT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt16(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) < uint16(y)) {
break
}
v.reset(OpAMD64FlagGT_ULT)
return true
}
// match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)>int16(y) && uint16(x)>uint16(y)
+ // cond: int16(x)>y && uint16(x)>uint16(y)
// result: (FlagGT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt16(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) > uint16(y)) {
break
}
v.reset(OpAMD64FlagGT_UGT)
return true
}
// match: (CMPWconst (ANDLconst _ [m]) [n])
- // cond: 0 <= int16(m) && int16(m) < int16(n)
+ // cond: 0 <= m && int16(m) < n
// result: (FlagLT_ULT)
for {
- n := v.AuxInt
+ n := auxIntToInt16(v.AuxInt)
if v_0.Op != OpAMD64ANDLconst {
break
}
- m := v_0.AuxInt
- if !(0 <= int16(m) && int16(m) < int16(n)) {
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && int16(m) < n) {
break
}
v.reset(OpAMD64FlagLT_ULT)