(CMPB x (MOVBconst [c])) -> (CMPBconst x [c])
(CMPB (MOVBconst [c]) x) -> (InvertFlags (CMPBconst x [c]))
+// Using MOVBQZX instead of ANDQ is cheaper.
+(ANDQconst [0xFF] x) -> (MOVBQZX x)
+(ANDQconst [0xFFFF] x) -> (MOVWQZX x)
+(ANDQconst [0xFFFFFFFF] x) -> (MOVLQZX x)
+
// strength reduction
// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
// 1 - addq, shlq, leaq, negq
(CMPBconst (MOVBconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
// Other known comparisons.
+(CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT)
+(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT)
+(CMPQconst (MOVLQZX _) [c]) && 0xFFFFFFFF < c -> (FlagLT_ULT)
(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT)
(CMPWconst (ANDWconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT)
v.AddArg(x)
return true
}
+ // match: (ANDQconst [0xFF] x)
+ // cond:
+ // result: (MOVBQZX x)
+ for {
+ if v.AuxInt != 0xFF {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [0xFFFF] x)
+ // cond:
+ // result: (MOVWQZX x)
+ for {
+ if v.AuxInt != 0xFFFF {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [0xFFFFFFFF] x)
+ // cond:
+ // result: (MOVLQZX x)
+ for {
+ if v.AuxInt != 0xFFFFFFFF {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpAMD64MOVLQZX)
+ v.AddArg(x)
+ return true
+ }
// match: (ANDQconst [0] _)
// cond:
// result: (MOVQconst [0])
v.reset(OpAMD64FlagGT_UGT)
return true
}
+ // match: (CMPQconst (MOVBQZX _) [c])
+ // cond: 0xFF < c
+ // result: (FlagLT_ULT)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ c := v.AuxInt
+ if !(0xFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVWQZX _) [c])
+ // cond: 0xFFFF < c
+ // result: (FlagLT_ULT)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVWQZX {
+ break
+ }
+ c := v.AuxInt
+ if !(0xFFFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVLQZX _) [c])
+ // cond: 0xFFFFFFFF < c
+ // result: (FlagLT_ULT)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLQZX {
+ break
+ }
+ c := v.AuxInt
+ if !(0xFFFFFFFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
// match: (CMPQconst (ANDQconst _ [m]) [n])
// cond: 0 <= m && m < n
// result: (FlagLT_ULT)