(CSEL0 {arm64Negate(bool.Op)} x flagArg(bool))
// absorb shifts into ops
+(NEG x:(SLLconst [c] y)) && clobberIfDead(x) -> (NEGshiftLL [c] y)
+(NEG x:(SRLconst [c] y)) && clobberIfDead(x) -> (NEGshiftRL [c] y)
+(NEG x:(SRAconst [c] y)) && clobberIfDead(x) -> (NEGshiftRA [c] y)
+(MVN x:(SLLconst [c] y)) && clobberIfDead(x) -> (MVNshiftLL [c] y)
+(MVN x:(SRLconst [c] y)) && clobberIfDead(x) -> (MVNshiftRL [c] y)
+(MVN x:(SRAconst [c] y)) && clobberIfDead(x) -> (MVNshiftRA [c] y)
(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftLL x0 y [c])
(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRL x0 y [c])
(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRA x0 y [c])
(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRL x1 y [c]))
(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (CMPshiftRA x0 y [c])
(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRA x1 y [c]))
+(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (CMNshiftLL x0 y [c])
+(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (CMNshiftRL x0 y [c])
+(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (CMNshiftRA x0 y [c])
+(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (TSTshiftLL x0 y [c])
+(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (TSTshiftRL x0 y [c])
+(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (TSTshiftRA x0 y [c])
// prefer *const ops to *shift ops
(ADDshiftLL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d]))
(CMPshiftLL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
(CMPshiftRL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
(CMPshiftRA (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+(CMNshiftLL (MOVDconst [c]) x [d]) -> (CMNconst [c] (SLLconst <x.Type> x [d]))
+(CMNshiftRL (MOVDconst [c]) x [d]) -> (CMNconst [c] (SRLconst <x.Type> x [d]))
+(CMNshiftRA (MOVDconst [c]) x [d]) -> (CMNconst [c] (SRAconst <x.Type> x [d]))
+(TSTshiftLL (MOVDconst [c]) x [d]) -> (TSTconst [c] (SLLconst <x.Type> x [d]))
+(TSTshiftRL (MOVDconst [c]) x [d]) -> (TSTconst [c] (SRLconst <x.Type> x [d]))
+(TSTshiftRA (MOVDconst [c]) x [d]) -> (TSTconst [c] (SRAconst <x.Type> x [d]))
// constant folding in *shift ops
+(MVNshiftLL (MOVDconst [c]) [d]) -> (MOVDconst [^int64(uint64(c)<<uint64(d))])
+(MVNshiftRL (MOVDconst [c]) [d]) -> (MOVDconst [^int64(uint64(c)>>uint64(d))])
+(MVNshiftRA (MOVDconst [c]) [d]) -> (MOVDconst [^(c>>uint64(d))])
+(NEGshiftLL (MOVDconst [c]) [d]) -> (MOVDconst [-int64(uint64(c)<<uint64(d))])
+(NEGshiftRL (MOVDconst [c]) [d]) -> (MOVDconst [-int64(uint64(c)>>uint64(d))])
+(NEGshiftRA (MOVDconst [c]) [d]) -> (MOVDconst [-(c>>uint64(d))])
(ADDshiftLL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)<<uint64(d))])
(ADDshiftRL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)>>uint64(d))])
(ADDshiftRA x (MOVDconst [c]) [d]) -> (ADDconst x [c>>uint64(d)])
(CMPshiftLL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)<<uint64(d))])
(CMPshiftRL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)>>uint64(d))])
(CMPshiftRA x (MOVDconst [c]) [d]) -> (CMPconst x [c>>uint64(d)])
+(CMNshiftLL x (MOVDconst [c]) [d]) -> (CMNconst x [int64(uint64(c)<<uint64(d))])
+(CMNshiftRL x (MOVDconst [c]) [d]) -> (CMNconst x [int64(uint64(c)>>uint64(d))])
+(CMNshiftRA x (MOVDconst [c]) [d]) -> (CMNconst x [c>>uint64(d)])
+(TSTshiftLL x (MOVDconst [c]) [d]) -> (TSTconst x [int64(uint64(c)<<uint64(d))])
+(TSTshiftRL x (MOVDconst [c]) [d]) -> (TSTconst x [int64(uint64(c)>>uint64(d))])
+(TSTshiftRA x (MOVDconst [c]) [d]) -> (TSTconst x [c>>uint64(d)])
// simplification with *shift ops
(SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
{name: "FCMPD", argLength: 2, reg: fp2flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to arg1, float64
// shifted ops
+ {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0<<auxInt)
+ {name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), unsigned shift
+ {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), signed shift
+ {name: "NEGshiftLL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0<<auxInt)
+ {name: "NEGshiftRL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), unsigned shift
+ {name: "NEGshiftRA", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), signed shift
{name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1<<auxInt
{name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, unsigned shift
{name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, signed shift
{name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1<<auxInt
{name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift
{name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift
+ {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1<<auxInt) compare to 0
+ {name: "CMNshiftRL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, unsigned shift
+ {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, signed shift
+ {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1<<auxInt) compare to 0
+ {name: "TSTshiftRL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, unsigned shift
+ {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, signed shift
// bitfield ops
// for all bitfield ops lsb is auxInt>>8, width is auxInt&0xff
return rewriteValueARM64_OpARM64CMNWconst_0(v)
case OpARM64CMNconst:
return rewriteValueARM64_OpARM64CMNconst_0(v)
+ case OpARM64CMNshiftLL:
+ return rewriteValueARM64_OpARM64CMNshiftLL_0(v)
+ case OpARM64CMNshiftRA:
+ return rewriteValueARM64_OpARM64CMNshiftRA_0(v)
+ case OpARM64CMNshiftRL:
+ return rewriteValueARM64_OpARM64CMNshiftRL_0(v)
case OpARM64CMP:
return rewriteValueARM64_OpARM64CMP_0(v)
case OpARM64CMPW:
return rewriteValueARM64_OpARM64MULW_0(v) || rewriteValueARM64_OpARM64MULW_10(v) || rewriteValueARM64_OpARM64MULW_20(v)
case OpARM64MVN:
return rewriteValueARM64_OpARM64MVN_0(v)
+ case OpARM64MVNshiftLL:
+ return rewriteValueARM64_OpARM64MVNshiftLL_0(v)
+ case OpARM64MVNshiftRA:
+ return rewriteValueARM64_OpARM64MVNshiftRA_0(v)
+ case OpARM64MVNshiftRL:
+ return rewriteValueARM64_OpARM64MVNshiftRL_0(v)
case OpARM64NEG:
return rewriteValueARM64_OpARM64NEG_0(v)
+ case OpARM64NEGshiftLL:
+ return rewriteValueARM64_OpARM64NEGshiftLL_0(v)
+ case OpARM64NEGshiftRA:
+ return rewriteValueARM64_OpARM64NEGshiftRA_0(v)
+ case OpARM64NEGshiftRL:
+ return rewriteValueARM64_OpARM64NEGshiftRL_0(v)
case OpARM64NotEqual:
return rewriteValueARM64_OpARM64NotEqual_0(v)
case OpARM64OR:
return rewriteValueARM64_OpARM64TSTWconst_0(v)
case OpARM64TSTconst:
return rewriteValueARM64_OpARM64TSTconst_0(v)
+ case OpARM64TSTshiftLL:
+ return rewriteValueARM64_OpARM64TSTshiftLL_0(v)
+ case OpARM64TSTshiftRA:
+ return rewriteValueARM64_OpARM64TSTshiftRA_0(v)
+ case OpARM64TSTshiftRL:
+ return rewriteValueARM64_OpARM64TSTshiftRL_0(v)
case OpARM64UBFIZ:
return rewriteValueARM64_OpARM64UBFIZ_0(v)
case OpARM64UBFX:
v.AddArg(x)
return true
}
+ // match: (CMN x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftLL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftLL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMN x1:(SLLconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftLL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftLL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMN x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftRL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMN x1:(SRLconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftRL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMN x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRA x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftRA)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMN x1:(SRAconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRA x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftRA)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64CMNW_0(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64CMNshiftLL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMNshiftLL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (CMNconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftLL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (CMNconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftRA_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMNshiftRA (MOVDconst [c]) x [d])
+ // cond:
+ // result: (CMNconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRA x (MOVDconst [c]) [d])
+ // cond:
+ // result: (CMNconst x [c>>uint64(d)])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = c >> uint64(d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftRL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMNshiftRL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (CMNconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (CMNconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64CMP_0(v *Value) bool {
b := v.Block
_ = b
v.AuxInt = ^c
return true
}
- return false
-}
-func rewriteValueARM64_OpARM64NEG_0(v *Value) bool {
- // match: (NEG (MUL x y))
- // cond:
- // result: (MNEG x y)
+ // match: (MVN x:(SLLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftLL [c] y)
for {
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MUL {
+ x := v.Args[0]
+ if x.Op != OpARM64SLLconst {
break
}
- _ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
- v.reset(OpARM64MNEG)
- v.AddArg(x)
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftLL)
+ v.AuxInt = c
v.AddArg(y)
return true
}
- // match: (NEG (MULW x y))
- // cond:
- // result: (MNEGW x y)
+ // match: (MVN x:(SRLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRL [c] y)
for {
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MULW {
+ x := v.Args[0]
+ if x.Op != OpARM64SRLconst {
break
}
- _ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
- v.reset(OpARM64MNEGW)
- v.AddArg(x)
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRL)
+ v.AuxInt = c
v.AddArg(y)
return true
}
- // match: (NEG (MOVDconst [c]))
- // cond:
- // result: (MOVDconst [-c])
+ // match: (MVN x:(SRAconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRA [c] y)
for {
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDconst {
+ x := v.Args[0]
+ if x.Op != OpARM64SRAconst {
+ break
+ }
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRA)
+ v.AuxInt = c
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftLL_0(v *Value) bool {
+ // match: (MVNshiftLL (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = ^int64(uint64(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRA_0(v *Value) bool {
+ // match: (MVNshiftRA (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [^(c>>uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = ^(c >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRL_0(v *Value) bool {
+ // match: (MVNshiftRL (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = ^int64(uint64(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEG_0(v *Value) bool {
+ // match: (NEG (MUL x y))
+ // cond:
+ // result: (MNEG x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MUL {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARM64MNEG)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG (MULW x y))
+ // cond:
+ // result: (MNEGW x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MULW {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARM64MNEGW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [-c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
break
}
c := v_0.AuxInt
v.AuxInt = -c
return true
}
+ // match: (NEG x:(SLLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftLL [c] y)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64SLLconst {
+ break
+ }
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftLL)
+ v.AuxInt = c
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG x:(SRLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftRL [c] y)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64SRLconst {
+ break
+ }
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftRL)
+ v.AuxInt = c
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG x:(SRAconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftRA [c] y)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64SRAconst {
+ break
+ }
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftRA)
+ v.AuxInt = c
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftLL_0(v *Value) bool {
+ // match: (NEGshiftLL (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [-int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = -int64(uint64(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftRA_0(v *Value) bool {
+ // match: (NEGshiftRA (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [-(c>>uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = -(c >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftRL_0(v *Value) bool {
+ // match: (NEGshiftRL (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [-int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = -int64(uint64(c) >> uint64(d))
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (TST x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftLL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftLL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (TST x1:(SLLconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftLL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftLL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (TST x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftRL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (TST x1:(SRLconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftRL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (TST x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRA x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftRA)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (TST x1:(SRAconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRA x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftRA)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64TSTW_0(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64TSTshiftLL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (TSTshiftLL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (TSTconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftLL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (TSTconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRA_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (TSTshiftRA (MOVDconst [c]) x [d])
+ // cond:
+ // result: (TSTconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRA x (MOVDconst [c]) [d])
+ // cond:
+ // result: (TSTconst x [c>>uint64(d)])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = c >> uint64(d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (TSTshiftRL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (TSTconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (TSTconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64UBFIZ_0(v *Value) bool {
// match: (UBFIZ [bfc] (SLLconst [sc] x))
// cond: sc < getARM64BFwidth(bfc)