(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no)
(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
-(Z (MOVDconst [0]) yes no) -> (First yes no)
-(Z (MOVDconst [c]) yes no) && c != 0 -> (First no yes)
-(NZ (MOVDconst [0]) yes no) -> (First no yes)
-(NZ (MOVDconst [c]) yes no) && c != 0 -> (First yes no)
-(ZW (MOVDconst [c]) yes no) && int32(c) == 0 -> (First yes no)
-(ZW (MOVDconst [c]) yes no) && int32(c) != 0 -> (First no yes)
-(NZW (MOVDconst [c]) yes no) && int32(c) == 0 -> (First no yes)
-(NZW (MOVDconst [c]) yes no) && int32(c) != 0 -> (First yes no)
+(Z (MOVDconst [0]) yes no) => (First yes no)
+(Z (MOVDconst [c]) yes no) && c != 0 => (First no yes)
+(NZ (MOVDconst [0]) yes no) => (First no yes)
+(NZ (MOVDconst [c]) yes no) && c != 0 => (First yes no)
+(ZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First yes no)
+(ZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First no yes)
+(NZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First no yes)
+(NZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First yes no)
// absorb InvertFlags into branches
-(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
-(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
-(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
-(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
-(ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
-(UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
-(ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
-(UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
-(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
-(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
-(FLT (InvertFlags cmp) yes no) -> (FGT cmp yes no)
-(FGT (InvertFlags cmp) yes no) -> (FLT cmp yes no)
-(FLE (InvertFlags cmp) yes no) -> (FGE cmp yes no)
-(FGE (InvertFlags cmp) yes no) -> (FLE cmp yes no)
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+(FLT (InvertFlags cmp) yes no) => (FGT cmp yes no)
+(FGT (InvertFlags cmp) yes no) => (FLT cmp yes no)
+(FLE (InvertFlags cmp) yes no) => (FGE cmp yes no)
+(FGE (InvertFlags cmp) yes no) => (FLE cmp yes no)
(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
// absorb InvertFlags into CSEL(0)
-(CSEL {cc} x y (InvertFlags cmp)) -> (CSEL {arm64Invert(cc.(Op))} x y cmp)
-(CSEL0 {cc} x (InvertFlags cmp)) -> (CSEL0 {arm64Invert(cc.(Op))} x cmp)
+(CSEL {cc} x y (InvertFlags cmp)) => (CSEL {arm64Invert(cc)} x y cmp)
+(CSEL0 {cc} x (InvertFlags cmp)) => (CSEL0 {arm64Invert(cc)} x cmp)
// absorb flag constants into boolean values
(Equal (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())])
(GreaterEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())])
// absorb InvertFlags into boolean values
-(Equal (InvertFlags x)) -> (Equal x)
-(NotEqual (InvertFlags x)) -> (NotEqual x)
-(LessThan (InvertFlags x)) -> (GreaterThan x)
-(LessThanU (InvertFlags x)) -> (GreaterThanU x)
-(GreaterThan (InvertFlags x)) -> (LessThan x)
-(GreaterThanU (InvertFlags x)) -> (LessThanU x)
-(LessEqual (InvertFlags x)) -> (GreaterEqual x)
-(LessEqualU (InvertFlags x)) -> (GreaterEqualU x)
-(GreaterEqual (InvertFlags x)) -> (LessEqual x)
-(GreaterEqualU (InvertFlags x)) -> (LessEqualU x)
-(LessThanF (InvertFlags x)) -> (GreaterThanF x)
-(LessEqualF (InvertFlags x)) -> (GreaterEqualF x)
-(GreaterThanF (InvertFlags x)) -> (LessThanF x)
-(GreaterEqualF (InvertFlags x)) -> (LessEqualF x)
+(Equal (InvertFlags x)) => (Equal x)
+(NotEqual (InvertFlags x)) => (NotEqual x)
+(LessThan (InvertFlags x)) => (GreaterThan x)
+(LessThanU (InvertFlags x)) => (GreaterThanU x)
+(GreaterThan (InvertFlags x)) => (LessThan x)
+(GreaterThanU (InvertFlags x)) => (LessThanU x)
+(LessEqual (InvertFlags x)) => (GreaterEqual x)
+(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
+(GreaterEqual (InvertFlags x)) => (LessEqual x)
+(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
+(LessThanF (InvertFlags x)) => (GreaterThanF x)
+(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
+(GreaterThanF (InvertFlags x)) => (LessThanF x)
+(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
// Boolean-generating instructions always
// zero upper bit of the register; no need to zero-extend
-(MOVBUreg x) && x.Type.IsBoolean() -> (MOVDreg x)
+(MOVBUreg x) && x.Type.IsBoolean() => (MOVDreg x)
// absorb flag constants into conditional instructions
-(CSEL {cc} x _ flag) && ccARM64Eval(cc, flag) > 0 -> x
-(CSEL {cc} _ y flag) && ccARM64Eval(cc, flag) < 0 -> y
-(CSEL0 {cc} x flag) && ccARM64Eval(cc, flag) > 0 -> x
-(CSEL0 {cc} _ flag) && ccARM64Eval(cc, flag) < 0 -> (MOVDconst [0])
+(CSEL {cc} x _ flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSEL {cc} _ y flag) && ccARM64Eval(cc, flag) < 0 => y
+(CSEL0 {cc} x flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSEL0 {cc} _ flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
// absorb flags back into boolean CSEL
-(CSEL {cc} x y (CMPWconst [0] boolval)) && cc.(Op) == OpARM64NotEqual && flagArg(boolval) != nil ->
+(CSEL {cc} x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
(CSEL {boolval.Op} x y flagArg(boolval))
-(CSEL {cc} x y (CMPWconst [0] boolval)) && cc.(Op) == OpARM64Equal && flagArg(boolval) != nil ->
+(CSEL {cc} x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
(CSEL {arm64Negate(boolval.Op)} x y flagArg(boolval))
-(CSEL0 {cc} x (CMPWconst [0] boolval)) && cc.(Op) == OpARM64NotEqual && flagArg(boolval) != nil ->
+(CSEL0 {cc} x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
(CSEL0 {boolval.Op} x flagArg(boolval))
-(CSEL0 {cc} x (CMPWconst [0] boolval)) && cc.(Op) == OpARM64Equal && flagArg(boolval) != nil ->
+(CSEL0 {cc} x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
(CSEL0 {arm64Negate(boolval.Op)} x flagArg(boolval))
// absorb shifts into ops
// As arm64 does not have a ROL instruction, so ROL(x, y) is replaced by ROR(x, -y).
((ADD|OR|XOR) (SLL x (ANDconst <t> [63] y))
(CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
- (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc.(Op) == OpARM64LessThanU
+ (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
-> (ROR x (NEG <t> y))
((ADD|OR|XOR) (SRL <typ.UInt64> x (ANDconst <t> [63] y))
(CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
- (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc.(Op) == OpARM64LessThanU
+ (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
-> (ROR x y)
// These rules match the Go source code like
// As arm64 does not have a ROLW instruction, so ROLW(x, y) is replaced by RORW(x, -y).
((ADD|OR|XOR) (SLL x (ANDconst <t> [31] y))
(CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
- (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc.(Op) == OpARM64LessThanU
+ (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
-> (RORW x (NEG <t> y))
((ADD|OR|XOR) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y))
(CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
- (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc.(Op) == OpARM64LessThanU
+ (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
-> (RORW x y)
// ((x>>8) | (x<<8)) -> (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
break
}
// match: (ADD (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (ROR x (NEG <t> y))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
break
}
// match: (ADD (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (ROR x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
break
}
// match: (ADD (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (RORW x (NEG <t> y))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
break
}
// match: (ADD (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (RORW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
return true
}
// match: (CSEL {cc} x y (InvertFlags cmp))
- // result: (CSEL {arm64Invert(cc.(Op))} x y cmp)
+ // result: (CSEL {arm64Invert(cc)} x y cmp)
for {
- cc := v.Aux
+ cc := auxToCCop(v.Aux)
x := v_0
y := v_1
if v_2.Op != OpARM64InvertFlags {
}
cmp := v_2.Args[0]
v.reset(OpARM64CSEL)
- v.Aux = arm64Invert(cc.(Op))
+ v.Aux = cCopToAux(arm64Invert(cc))
v.AddArg3(x, y, cmp)
return true
}
// cond: ccARM64Eval(cc, flag) > 0
// result: x
for {
- cc := v.Aux
+ cc := auxToCCop(v.Aux)
x := v_0
flag := v_2
if !(ccARM64Eval(cc, flag) > 0) {
// cond: ccARM64Eval(cc, flag) < 0
// result: y
for {
- cc := v.Aux
+ cc := auxToCCop(v.Aux)
y := v_1
flag := v_2
if !(ccARM64Eval(cc, flag) < 0) {
return true
}
// match: (CSEL {cc} x y (CMPWconst [0] boolval))
- // cond: cc.(Op) == OpARM64NotEqual && flagArg(boolval) != nil
+ // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil
// result: (CSEL {boolval.Op} x y flagArg(boolval))
for {
- cc := v.Aux
+ cc := auxToCCop(v.Aux)
x := v_0
y := v_1
- if v_2.Op != OpARM64CMPWconst || v_2.AuxInt != 0 {
+ if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
break
}
boolval := v_2.Args[0]
- if !(cc.(Op) == OpARM64NotEqual && flagArg(boolval) != nil) {
+ if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) {
break
}
v.reset(OpARM64CSEL)
- v.Aux = boolval.Op
+ v.Aux = cCopToAux(boolval.Op)
v.AddArg3(x, y, flagArg(boolval))
return true
}
// match: (CSEL {cc} x y (CMPWconst [0] boolval))
- // cond: cc.(Op) == OpARM64Equal && flagArg(boolval) != nil
+ // cond: cc == OpARM64Equal && flagArg(boolval) != nil
// result: (CSEL {arm64Negate(boolval.Op)} x y flagArg(boolval))
for {
- cc := v.Aux
+ cc := auxToCCop(v.Aux)
x := v_0
y := v_1
- if v_2.Op != OpARM64CMPWconst || v_2.AuxInt != 0 {
+ if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
break
}
boolval := v_2.Args[0]
- if !(cc.(Op) == OpARM64Equal && flagArg(boolval) != nil) {
+ if !(cc == OpARM64Equal && flagArg(boolval) != nil) {
break
}
v.reset(OpARM64CSEL)
- v.Aux = arm64Negate(boolval.Op)
+ v.Aux = cCopToAux(arm64Negate(boolval.Op))
v.AddArg3(x, y, flagArg(boolval))
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (CSEL0 {cc} x (InvertFlags cmp))
- // result: (CSEL0 {arm64Invert(cc.(Op))} x cmp)
+ // result: (CSEL0 {arm64Invert(cc)} x cmp)
for {
- cc := v.Aux
+ cc := auxToCCop(v.Aux)
x := v_0
if v_1.Op != OpARM64InvertFlags {
break
}
cmp := v_1.Args[0]
v.reset(OpARM64CSEL0)
- v.Aux = arm64Invert(cc.(Op))
+ v.Aux = cCopToAux(arm64Invert(cc))
v.AddArg2(x, cmp)
return true
}
// cond: ccARM64Eval(cc, flag) > 0
// result: x
for {
- cc := v.Aux
+ cc := auxToCCop(v.Aux)
x := v_0
flag := v_1
if !(ccARM64Eval(cc, flag) > 0) {
// cond: ccARM64Eval(cc, flag) < 0
// result: (MOVDconst [0])
for {
- cc := v.Aux
+ cc := auxToCCop(v.Aux)
flag := v_1
if !(ccARM64Eval(cc, flag) < 0) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (CSEL0 {cc} x (CMPWconst [0] boolval))
- // cond: cc.(Op) == OpARM64NotEqual && flagArg(boolval) != nil
+ // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil
// result: (CSEL0 {boolval.Op} x flagArg(boolval))
for {
- cc := v.Aux
+ cc := auxToCCop(v.Aux)
x := v_0
- if v_1.Op != OpARM64CMPWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
boolval := v_1.Args[0]
- if !(cc.(Op) == OpARM64NotEqual && flagArg(boolval) != nil) {
+ if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) {
break
}
v.reset(OpARM64CSEL0)
- v.Aux = boolval.Op
+ v.Aux = cCopToAux(boolval.Op)
v.AddArg2(x, flagArg(boolval))
return true
}
// match: (CSEL0 {cc} x (CMPWconst [0] boolval))
- // cond: cc.(Op) == OpARM64Equal && flagArg(boolval) != nil
+ // cond: cc == OpARM64Equal && flagArg(boolval) != nil
// result: (CSEL0 {arm64Negate(boolval.Op)} x flagArg(boolval))
for {
- cc := v.Aux
+ cc := auxToCCop(v.Aux)
x := v_0
- if v_1.Op != OpARM64CMPWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
boolval := v_1.Args[0]
- if !(cc.(Op) == OpARM64Equal && flagArg(boolval) != nil) {
+ if !(cc == OpARM64Equal && flagArg(boolval) != nil) {
break
}
v.reset(OpARM64CSEL0)
- v.Aux = arm64Negate(boolval.Op)
+ v.Aux = cCopToAux(arm64Negate(boolval.Op))
v.AddArg2(x, flagArg(boolval))
return true
}
break
}
// match: (OR (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (ROR x (NEG <t> y))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
break
}
// match: (OR (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (ROR x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
break
}
// match: (OR (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (RORW x (NEG <t> y))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
break
}
// match: (OR (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (RORW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
break
}
// match: (XOR (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (ROR x (NEG <t> y))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
break
}
// match: (XOR (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (ROR x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
break
}
// match: (XOR (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (RORW x (NEG <t> y))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
break
}
// match: (XOR (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
- // cond: cc.(Op) == OpARM64LessThanU
+ // cond: cc == OpARM64LessThanU
// result: (RORW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc.(Op) == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
// result: (First no yes)
for b.Controls[0].Op == OpARM64MOVDconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
b.Reset(BlockFirst)
// result: (First yes no)
for b.Controls[0].Op == OpARM64MOVDconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c != 0) {
break
}
// result: (First no yes)
for b.Controls[0].Op == OpARM64MOVDconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(int32(c) == 0) {
break
}
// result: (First yes no)
for b.Controls[0].Op == OpARM64MOVDconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(int32(c) != 0) {
break
}
// result: (First yes no)
for b.Controls[0].Op == OpARM64MOVDconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
b.Reset(BlockFirst)
// result: (First no yes)
for b.Controls[0].Op == OpARM64MOVDconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c != 0) {
break
}
// result: (First yes no)
for b.Controls[0].Op == OpARM64MOVDconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(int32(c) == 0) {
break
}
// result: (First no yes)
for b.Controls[0].Op == OpARM64MOVDconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(int32(c) != 0) {
break
}