// bit length
(BitLen32 <t> x) => (RSBconst [32] (CLZ <t> x))
+(BitLen(16|8) x) => (BitLen32 (ZeroExt(16|8)to32 x))
// byte swap for ARMv5
// let (a, b, c, d) be the bytes of x from high to low
(BitLen64 x) => (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
(BitLen32 x) => (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
+(BitLen(16|8) x) => (BitLen64 (ZeroExt(16|8)to64 x))
(Bswap64 ...) => (REV ...)
(Bswap32 ...) => (REVW ...)
(BitLen64 <t> x) => (NEGV <t> (SUBVconst <t> [64] (CLZV <t> x)))
(BitLen32 <t> x) => (NEGV <t> (SUBVconst <t> [32] (CLZW <t> x)))
+(BitLen(16|8) x) => (BitLen64 (ZeroExt(16|8)to64 x))
(Bswap(16|32|64) ...) => (REVB(2H|2W|V) ...)
(BitRev8 ...) => (BITREV4B ...)
(BitRev16 <t> x) => (REVB2H (BITREV4B <t> x))
// bit length
(BitLen32 <t> x) => (SUB (MOVWconst [32]) (CLZ <t> x))
+(BitLen(16|8) x) => (BitLen32 (ZeroExt(16|8)to32 x))
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB ...) => (AND ...)
(BitLen64 x) => (SUBFCconst [64] (CNTLZD <typ.Int> x))
(BitLen32 x) => (SUBFCconst [32] (CNTLZW <typ.Int> x))
+(BitLen(16|8) x) => (BitLen64 (ZeroExt(16|8)to64 x))
(PopCount64 ...) => (POPCNTD ...)
(PopCount(32|16|8) x) => (POPCNT(W|W|B) (MOV(W|H|B)Zreg x))
(Ctz32 <t> x) => (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
(BitLen64 x) => (SUB (MOVDconst [64]) (FLOGR x))
+(BitLen(32|16|8) x) => (BitLen64 (ZeroExt(32|16|8)to64 x))
// POPCNT treats the input register as a vector of 8 bytes, producing
// a population count for each individual byte. For inputs larger than
(Ctz(64|32|16|8)NonZero ...) => (I64Ctz ...)
(BitLen64 x) => (I64Sub (I64Const [64]) (I64Clz x))
+(BitLen(32|16|8) x) => (BitLen64 (ZeroExt(32|16|8)to64 x))
(PopCount64 ...) => (I64Popcnt ...)
(PopCount32 x) => (I64Popcnt (ZeroExt32to64 x))
return true
case OpAvg32u:
return rewriteValueARM_OpAvg32u(v)
+ case OpBitLen16:
+ return rewriteValueARM_OpBitLen16(v)
case OpBitLen32:
return rewriteValueARM_OpBitLen32(v)
+ case OpBitLen8:
+ return rewriteValueARM_OpBitLen8(v)
case OpBswap32:
return rewriteValueARM_OpBswap32(v)
case OpClosureCall:
return true
}
}
+func rewriteValueARM_OpBitLen16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen16 x)
+ // result: (BitLen32 (ZeroExt16to32 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueARM_OpBitLen32(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
return true
}
}
+func rewriteValueARM_OpBitLen8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen8 x)
+ // result: (BitLen32 (ZeroExt8to32 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueARM_OpBswap32(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
return true
case OpAvg64u:
return rewriteValueARM64_OpAvg64u(v)
+ case OpBitLen16:
+ return rewriteValueARM64_OpBitLen16(v)
case OpBitLen32:
return rewriteValueARM64_OpBitLen32(v)
case OpBitLen64:
return rewriteValueARM64_OpBitLen64(v)
+ case OpBitLen8:
+ return rewriteValueARM64_OpBitLen8(v)
case OpBitRev16:
return rewriteValueARM64_OpBitRev16(v)
case OpBitRev32:
return true
}
}
+func rewriteValueARM64_OpBitLen16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen16 x)
+ // result: (BitLen64 (ZeroExt16to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueARM64_OpBitLen32(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
return true
}
}
+func rewriteValueARM64_OpBitLen8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen8 x)
+ // result: (BitLen64 (ZeroExt8to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueARM64_OpBitRev16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
return true
case OpAvg64u:
return rewriteValueLOONG64_OpAvg64u(v)
+ case OpBitLen16:
+ return rewriteValueLOONG64_OpBitLen16(v)
case OpBitLen32:
return rewriteValueLOONG64_OpBitLen32(v)
case OpBitLen64:
return rewriteValueLOONG64_OpBitLen64(v)
+ case OpBitLen8:
+ return rewriteValueLOONG64_OpBitLen8(v)
case OpBitRev16:
return rewriteValueLOONG64_OpBitRev16(v)
case OpBitRev32:
return true
}
}
+func rewriteValueLOONG64_OpBitLen16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen16 x)
+ // result: (BitLen64 (ZeroExt16to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueLOONG64_OpBitLen32(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
return true
}
}
+func rewriteValueLOONG64_OpBitLen8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen8 x)
+ // result: (BitLen64 (ZeroExt8to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueLOONG64_OpBitRev16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
return true
case OpAvg32u:
return rewriteValueMIPS_OpAvg32u(v)
+ case OpBitLen16:
+ return rewriteValueMIPS_OpBitLen16(v)
case OpBitLen32:
return rewriteValueMIPS_OpBitLen32(v)
+ case OpBitLen8:
+ return rewriteValueMIPS_OpBitLen8(v)
case OpClosureCall:
v.Op = OpMIPSCALLclosure
return true
return true
}
}
+func rewriteValueMIPS_OpBitLen16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen16 x)
+ // result: (BitLen32 (ZeroExt16to32 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueMIPS_OpBitLen32(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
return true
}
}
+func rewriteValueMIPS_OpBitLen8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen8 x)
+ // result: (BitLen32 (ZeroExt8to32 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueMIPS_OpCom16(v *Value) bool {
v_0 := v.Args[0]
// match: (Com16 x)
return rewriteValuePPC64_OpAtomicStoreRel64(v)
case OpAvg64u:
return rewriteValuePPC64_OpAvg64u(v)
+ case OpBitLen16:
+ return rewriteValuePPC64_OpBitLen16(v)
case OpBitLen32:
return rewriteValuePPC64_OpBitLen32(v)
case OpBitLen64:
return rewriteValuePPC64_OpBitLen64(v)
+ case OpBitLen8:
+ return rewriteValuePPC64_OpBitLen8(v)
case OpBswap16:
return rewriteValuePPC64_OpBswap16(v)
case OpBswap32:
return true
}
}
+func rewriteValuePPC64_OpBitLen16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen16 x)
+ // result: (BitLen64 (ZeroExt16to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValuePPC64_OpBitLen32(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
return true
}
}
+func rewriteValuePPC64_OpBitLen8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen8 x)
+ // result: (BitLen64 (ZeroExt8to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValuePPC64_OpBswap16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
return rewriteValueS390X_OpAtomicStoreRel32(v)
case OpAvg64u:
return rewriteValueS390X_OpAvg64u(v)
+ case OpBitLen16:
+ return rewriteValueS390X_OpBitLen16(v)
+ case OpBitLen32:
+ return rewriteValueS390X_OpBitLen32(v)
case OpBitLen64:
return rewriteValueS390X_OpBitLen64(v)
+ case OpBitLen8:
+ return rewriteValueS390X_OpBitLen8(v)
case OpBswap16:
return rewriteValueS390X_OpBswap16(v)
case OpBswap32:
return true
}
}
+func rewriteValueS390X_OpBitLen16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen16 x)
+ // result: (BitLen64 (ZeroExt16to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 x)
+ // result: (BitLen64 (ZeroExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueS390X_OpBitLen64(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
return true
}
}
+func rewriteValueS390X_OpBitLen8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen8 x)
+ // result: (BitLen64 (ZeroExt8to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueS390X_OpBswap16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
case OpAndB:
v.Op = OpWasmI64And
return true
+ case OpBitLen16:
+ return rewriteValueWasm_OpBitLen16(v)
+ case OpBitLen32:
+ return rewriteValueWasm_OpBitLen32(v)
case OpBitLen64:
return rewriteValueWasm_OpBitLen64(v)
+ case OpBitLen8:
+ return rewriteValueWasm_OpBitLen8(v)
case OpCeil:
v.Op = OpWasmF64Ceil
return true
return true
}
}
+func rewriteValueWasm_OpBitLen16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen16 x)
+ // result: (BitLen64 (ZeroExt16to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 x)
+ // result: (BitLen64 (ZeroExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueWasm_OpBitLen64(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
return true
}
}
+func rewriteValueWasm_OpBitLen8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen8 x)
+ // result: (BitLen64 (ZeroExt8to64 x))
+ for {
+ x := v_0
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueWasm_OpCom16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
- sys.AMD64, sys.ARM64, sys.ARM, sys.Loong64, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.PPC64, sys.S390X, sys.Wasm)
addF("math/bits", "Len32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
},
- sys.AMD64, sys.ARM64, sys.Loong64, sys.PPC64)
- addF("math/bits", "Len32",
- func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
- if s.config.PtrSize == 4 {
- return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
- }
- x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
- },
- sys.ARM, sys.S390X, sys.MIPS, sys.Wasm)
- addF("math/bits", "Len16",
- func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
- if s.config.PtrSize == 4 {
- x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
- return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
- }
- x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
- },
- sys.ARM64, sys.ARM, sys.Loong64, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.PPC64, sys.S390X, sys.Wasm)
addF("math/bits", "Len16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
},
- sys.AMD64)
- addF("math/bits", "Len8",
- func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
- if s.config.PtrSize == 4 {
- x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
- return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
- }
- x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
- },
- sys.ARM64, sys.ARM, sys.Loong64, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.PPC64, sys.S390X, sys.Wasm)
addF("math/bits", "Len8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
},
- sys.AMD64)
+ sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.PPC64, sys.S390X, sys.Wasm)
addF("math/bits", "Len",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
}
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
- sys.AMD64, sys.ARM64, sys.ARM, sys.Loong64, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.PPC64, sys.S390X, sys.Wasm)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {