func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
},
- sys.AMD64)
+ sys.AMD64, sys.ARM64)
addF("math/bits", "Len32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
},
- sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
+ sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("math/bits", "Len16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return rewriteValueARM64_OpAtomicStorePtrNoWB_0(v)
case OpAvg64u:
return rewriteValueARM64_OpAvg64u_0(v)
+ case OpBitLen32:
+ return rewriteValueARM64_OpBitLen32_0(v)
case OpBitLen64:
return rewriteValueARM64_OpBitLen64_0(v)
case OpBitRev16:
return true
}
}
+func rewriteValueARM64_OpBitLen32_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (BitLen32 x)
+ // cond:
+ // result: (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = 32
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpARM64CLZW, typ.Int)
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
func rewriteValueARM64_OpBitLen64_0(v *Value) bool {
b := v.Block
_ = b
func LeadingZeros32(n uint32) int {
// amd64:"BSRQ","LEAQ",-"CMOVQEQ"
// s390x:"FLOGR"
- // arm:"CLZ" arm64:"CLZ"
+ // arm:"CLZ" arm64:"CLZW"
// mips:"CLZ"
return bits.LeadingZeros32(n)
}