(Select0 (Mul64uover x y)) => (MUL x y)
(Select1 (Mul64uover x y)) => (NotEqual (CMPconst (UMULH <typ.UInt64> x y) [0]))
+
+// 32 mul 32 -> 64
+(MUL r:(MOVWUreg x) s:(MOVWUreg y)) && r.Uses == 1 && s.Uses == 1 => (UMULL x y)
+(MUL r:(MOVWreg x) s:(MOVWreg y)) && r.Uses == 1 && s.Uses == 1 => (MULL x y)
}
break
}
+ // match: (MUL r:(MOVWUreg x) s:(MOVWUreg y))
+ // cond: r.Uses == 1 && s.Uses == 1
+ // result: (UMULL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r := v_0
+ if r.Op != OpARM64MOVWUreg {
+ continue
+ }
+ x := r.Args[0]
+ s := v_1
+ if s.Op != OpARM64MOVWUreg {
+ continue
+ }
+ y := s.Args[0]
+ if !(r.Uses == 1 && s.Uses == 1) {
+ continue
+ }
+ v.reset(OpARM64UMULL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (MUL r:(MOVWreg x) s:(MOVWreg y))
+ // cond: r.Uses == 1 && s.Uses == 1
+ // result: (MULL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r := v_0
+ if r.Op != OpARM64MOVWreg {
+ continue
+ }
+ x := r.Args[0]
+ s := v_1
+ if s.Op != OpARM64MOVWreg {
+ continue
+ }
+ y := s.Args[0]
+ if !(r.Uses == 1 && s.Uses == 1) {
+ continue
+ }
+ v.reset(OpARM64MULL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
return false
}
func rewriteValueARM64_OpARM64MULW(v *Value) bool {
return -a * -b
}
+func Mul32(a, b int32) int64 {
+ // arm64:"SMULL" -"MOVW"
+ return int64(a) * int64(b)
+}
+func Mul32U(a, b uint32) uint64 {
+ // arm64:"UMULL" -"MOVWU"
+ return uint64(a) * uint64(b)
+}
+
// -------------- //
// Division //
// -------------- //