addF(simdPackage, "Float32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64)
addF(simdPackage, "LoadMaskedFloat32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64)
addF(simdPackage, "Float32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64)
+ addF(simdPackage, "LoadMaskedFloat32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64)
+ addF(simdPackage, "Float32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64)
addF(simdPackage, "LoadMaskedFloat64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64)
addF(simdPackage, "Float64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64)
addF(simdPackage, "LoadMaskedFloat64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64)
addF(simdPackage, "Float64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64)
+ addF(simdPackage, "LoadMaskedFloat64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64)
+ addF(simdPackage, "Float64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64)
+ addF(simdPackage, "LoadMaskedInt8x64", simdMaskedLoad(ssa.OpLoadMasked8), sys.AMD64)
+ addF(simdPackage, "Int8x64.StoreMasked", simdMaskedStore(ssa.OpStoreMasked8), sys.AMD64)
+ addF(simdPackage, "LoadMaskedInt16x32", simdMaskedLoad(ssa.OpLoadMasked16), sys.AMD64)
+ addF(simdPackage, "Int16x32.StoreMasked", simdMaskedStore(ssa.OpStoreMasked16), sys.AMD64)
addF(simdPackage, "LoadMaskedInt32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64)
addF(simdPackage, "Int32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64)
addF(simdPackage, "LoadMaskedInt32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64)
addF(simdPackage, "Int32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64)
+ addF(simdPackage, "LoadMaskedInt32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64)
+ addF(simdPackage, "Int32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64)
addF(simdPackage, "LoadMaskedInt64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64)
addF(simdPackage, "Int64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64)
addF(simdPackage, "LoadMaskedInt64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64)
addF(simdPackage, "Int64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64)
+ addF(simdPackage, "LoadMaskedInt64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64)
+ addF(simdPackage, "Int64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64)
+ addF(simdPackage, "LoadMaskedUint8x64", simdMaskedLoad(ssa.OpLoadMasked8), sys.AMD64)
+ addF(simdPackage, "Uint8x64.StoreMasked", simdMaskedStore(ssa.OpStoreMasked8), sys.AMD64)
+ addF(simdPackage, "LoadMaskedUint16x32", simdMaskedLoad(ssa.OpLoadMasked16), sys.AMD64)
+ addF(simdPackage, "Uint16x32.StoreMasked", simdMaskedStore(ssa.OpStoreMasked16), sys.AMD64)
addF(simdPackage, "LoadMaskedUint32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64)
addF(simdPackage, "Uint32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64)
addF(simdPackage, "LoadMaskedUint32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64)
addF(simdPackage, "Uint32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64)
+ addF(simdPackage, "LoadMaskedUint32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64)
+ addF(simdPackage, "Uint32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64)
addF(simdPackage, "LoadMaskedUint64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64)
addF(simdPackage, "Uint64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64)
addF(simdPackage, "LoadMaskedUint64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64)
addF(simdPackage, "Uint64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64)
+ addF(simdPackage, "LoadMaskedUint64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64)
+ addF(simdPackage, "Uint64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64)
+ addF(simdPackage, "LoadMaskedMask8x64", simdMaskedLoad(ssa.OpLoadMasked8), sys.AMD64)
+ addF(simdPackage, "Mask8x64.StoreMasked", simdMaskedStore(ssa.OpStoreMasked8), sys.AMD64)
+ addF(simdPackage, "LoadMaskedMask16x32", simdMaskedLoad(ssa.OpLoadMasked16), sys.AMD64)
+ addF(simdPackage, "Mask16x32.StoreMasked", simdMaskedStore(ssa.OpStoreMasked16), sys.AMD64)
+ addF(simdPackage, "LoadMaskedMask32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64)
+ addF(simdPackage, "Mask32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64)
+ addF(simdPackage, "LoadMaskedMask64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64)
+ addF(simdPackage, "Mask64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64)
addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64)
// LoadMaskedFloat32x4 loads a Float32x4 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedFloat32x4(y *[4]float32, mask Mask32x4) Float32x4
// StoreMasked stores a Float32x4 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func (x Float32x4) StoreMasked(y *[4]float32, mask Mask32x4)
// LoadMaskedFloat64x2 loads a Float64x2 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedFloat64x2(y *[2]float64, mask Mask64x2) Float64x2
// StoreMasked stores a Float64x2 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func (x Float64x2) StoreMasked(y *[2]float64, mask Mask64x2)
// LoadMaskedInt32x4 loads a Int32x4 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedInt32x4(y *[4]int32, mask Mask32x4) Int32x4
// StoreMasked stores a Int32x4 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func (x Int32x4) StoreMasked(y *[4]int32, mask Mask32x4)
// LoadMaskedInt64x2 loads a Int64x2 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedInt64x2(y *[2]int64, mask Mask64x2) Int64x2
// StoreMasked stores a Int64x2 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func (x Int64x2) StoreMasked(y *[2]int64, mask Mask64x2)
// LoadMaskedUint32x4 loads a Uint32x4 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedUint32x4(y *[4]uint32, mask Mask32x4) Uint32x4
// StoreMasked stores a Uint32x4 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func (x Uint32x4) StoreMasked(y *[4]uint32, mask Mask32x4)
// LoadMaskedUint64x2 loads a Uint64x2 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedUint64x2(y *[2]uint64, mask Mask64x2) Uint64x2
// StoreMasked stores a Uint64x2 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func (x Uint64x2) StoreMasked(y *[2]uint64, mask Mask64x2)
// Mask8x16FromBits constructs a Mask8x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 16 bits of y are used.
+//
+// Asm: KMOVB, CPU Feature: AVX512"
func Mask8x16FromBits(y uint16) Mask8x16
// Mask16x8 is a 128-bit SIMD vector of 8 int16
// Mask16x8FromBits constructs a Mask16x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 8 bits of y are used.
+//
+// Asm: KMOVW, CPU Feature: AVX512"
func Mask16x8FromBits(y uint8) Mask16x8
// Mask32x4 is a 128-bit SIMD vector of 4 int32
// Mask32x4FromBits constructs a Mask32x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 4 bits of y are used.
+//
+// Asm: KMOVD, CPU Feature: AVX512"
func Mask32x4FromBits(y uint8) Mask32x4
// Mask64x2 is a 128-bit SIMD vector of 2 int64
// Mask64x2FromBits constructs a Mask64x2 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 2 bits of y are used.
+//
+// Asm: KMOVQ, CPU Feature: AVX512"
func Mask64x2FromBits(y uint8) Mask64x2
// v256 is a tag type that tells the compiler that this is really 256-bit SIMD
// LoadMaskedFloat32x8 loads a Float32x8 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedFloat32x8(y *[8]float32, mask Mask32x8) Float32x8
// StoreMasked stores a Float32x8 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func (x Float32x8) StoreMasked(y *[8]float32, mask Mask32x8)
// LoadMaskedFloat64x4 loads a Float64x4 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedFloat64x4(y *[4]float64, mask Mask64x4) Float64x4
// StoreMasked stores a Float64x4 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func (x Float64x4) StoreMasked(y *[4]float64, mask Mask64x4)
// LoadMaskedInt32x8 loads a Int32x8 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedInt32x8(y *[8]int32, mask Mask32x8) Int32x8
// StoreMasked stores a Int32x8 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func (x Int32x8) StoreMasked(y *[8]int32, mask Mask32x8)
// LoadMaskedInt64x4 loads a Int64x4 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedInt64x4(y *[4]int64, mask Mask64x4) Int64x4
// StoreMasked stores a Int64x4 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func (x Int64x4) StoreMasked(y *[4]int64, mask Mask64x4)
// LoadMaskedUint32x8 loads a Uint32x8 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedUint32x8(y *[8]uint32, mask Mask32x8) Uint32x8
// StoreMasked stores a Uint32x8 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVD, CPU Feature: AVX2
+//
//go:noescape
func (x Uint32x8) StoreMasked(y *[8]uint32, mask Mask32x8)
// LoadMaskedUint64x4 loads a Uint64x4 from an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func LoadMaskedUint64x4(y *[4]uint64, mask Mask64x4) Uint64x4
// StoreMasked stores a Uint64x4 to an array,
// at those elements enabled by mask
//
+// Asm: VMASKMOVQ, CPU Feature: AVX2
+//
//go:noescape
func (x Uint64x4) StoreMasked(y *[4]uint64, mask Mask64x4)
// Mask8x32FromBits constructs a Mask8x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 32 bits of y are used.
+//
+// Asm: KMOVB, CPU Feature: AVX512"
func Mask8x32FromBits(y uint32) Mask8x32
// Mask16x16 is a 256-bit SIMD vector of 16 int16
// Mask16x16FromBits constructs a Mask16x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 16 bits of y are used.
+//
+// Asm: KMOVW, CPU Feature: AVX512"
func Mask16x16FromBits(y uint16) Mask16x16
// Mask32x8 is a 256-bit SIMD vector of 8 int32
// Mask32x8FromBits constructs a Mask32x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 8 bits of y are used.
+//
+// Asm: KMOVD, CPU Feature: AVX512"
func Mask32x8FromBits(y uint8) Mask32x8
// Mask64x4 is a 256-bit SIMD vector of 4 int64
// Mask64x4FromBits constructs a Mask64x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 4 bits of y are used.
+//
+// Asm: KMOVQ, CPU Feature: AVX512"
func Mask64x4FromBits(y uint8) Mask64x4
// v512 is a tag type that tells the compiler that this is really 512-bit SIMD
//go:noescape
func (x Float32x16) Store(y *[16]float32)
+// LoadMaskedFloat32x16 loads a Float32x16 from an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU32.Z, CPU Feature: AVX512
+//
+//go:noescape
+func LoadMaskedFloat32x16(y *[16]float32, mask Mask32x16) Float32x16
+
+// StoreMasked stores a Float32x16 to an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU32, CPU Feature: AVX512
+//
+//go:noescape
+func (x Float32x16) StoreMasked(y *[16]float32, mask Mask32x16)
+
// Float64x8 is a 512-bit SIMD vector of 8 float64
type Float64x8 struct {
float64x8 v512
//go:noescape
func (x Float64x8) Store(y *[8]float64)
+// LoadMaskedFloat64x8 loads a Float64x8 from an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU64.Z, CPU Feature: AVX512
+//
+//go:noescape
+func LoadMaskedFloat64x8(y *[8]float64, mask Mask64x8) Float64x8
+
+// StoreMasked stores a Float64x8 to an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU64, CPU Feature: AVX512
+//
+//go:noescape
+func (x Float64x8) StoreMasked(y *[8]float64, mask Mask64x8)
+
// Int8x64 is a 512-bit SIMD vector of 64 int8
type Int8x64 struct {
int8x64 v512
//go:noescape
func (x Int8x64) Store(y *[64]int8)
+// LoadMaskedInt8x64 loads a Int8x64 from an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU8.Z, CPU Feature: AVX512
+//
+//go:noescape
+func LoadMaskedInt8x64(y *[64]int8, mask Mask8x64) Int8x64
+
+// StoreMasked stores a Int8x64 to an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU8, CPU Feature: AVX512
+//
+//go:noescape
+func (x Int8x64) StoreMasked(y *[64]int8, mask Mask8x64)
+
// Int16x32 is a 512-bit SIMD vector of 32 int16
type Int16x32 struct {
int16x32 v512
//go:noescape
func (x Int16x32) Store(y *[32]int16)
+// LoadMaskedInt16x32 loads a Int16x32 from an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU16.Z, CPU Feature: AVX512
+//
+//go:noescape
+func LoadMaskedInt16x32(y *[32]int16, mask Mask16x32) Int16x32
+
+// StoreMasked stores a Int16x32 to an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU16, CPU Feature: AVX512
+//
+//go:noescape
+func (x Int16x32) StoreMasked(y *[32]int16, mask Mask16x32)
+
// Int32x16 is a 512-bit SIMD vector of 16 int32
type Int32x16 struct {
int32x16 v512
//go:noescape
func (x Int32x16) Store(y *[16]int32)
+// LoadMaskedInt32x16 loads a Int32x16 from an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU32.Z, CPU Feature: AVX512
+//
+//go:noescape
+func LoadMaskedInt32x16(y *[16]int32, mask Mask32x16) Int32x16
+
+// StoreMasked stores a Int32x16 to an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU32, CPU Feature: AVX512
+//
+//go:noescape
+func (x Int32x16) StoreMasked(y *[16]int32, mask Mask32x16)
+
// Int64x8 is a 512-bit SIMD vector of 8 int64
type Int64x8 struct {
int64x8 v512
//go:noescape
func (x Int64x8) Store(y *[8]int64)
+// LoadMaskedInt64x8 loads a Int64x8 from an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU64.Z, CPU Feature: AVX512
+//
+//go:noescape
+func LoadMaskedInt64x8(y *[8]int64, mask Mask64x8) Int64x8
+
+// StoreMasked stores a Int64x8 to an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU64, CPU Feature: AVX512
+//
+//go:noescape
+func (x Int64x8) StoreMasked(y *[8]int64, mask Mask64x8)
+
// Uint8x64 is a 512-bit SIMD vector of 64 uint8
type Uint8x64 struct {
uint8x64 v512
//go:noescape
func (x Uint8x64) Store(y *[64]uint8)
+// LoadMaskedUint8x64 loads a Uint8x64 from an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU8.Z, CPU Feature: AVX512
+//
+//go:noescape
+func LoadMaskedUint8x64(y *[64]uint8, mask Mask8x64) Uint8x64
+
+// StoreMasked stores a Uint8x64 to an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU8, CPU Feature: AVX512
+//
+//go:noescape
+func (x Uint8x64) StoreMasked(y *[64]uint8, mask Mask8x64)
+
// Uint16x32 is a 512-bit SIMD vector of 32 uint16
type Uint16x32 struct {
uint16x32 v512
//go:noescape
func (x Uint16x32) Store(y *[32]uint16)
+// LoadMaskedUint16x32 loads a Uint16x32 from an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU16.Z, CPU Feature: AVX512
+//
+//go:noescape
+func LoadMaskedUint16x32(y *[32]uint16, mask Mask16x32) Uint16x32
+
+// StoreMasked stores a Uint16x32 to an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU16, CPU Feature: AVX512
+//
+//go:noescape
+func (x Uint16x32) StoreMasked(y *[32]uint16, mask Mask16x32)
+
// Uint32x16 is a 512-bit SIMD vector of 16 uint32
type Uint32x16 struct {
uint32x16 v512
//go:noescape
func (x Uint32x16) Store(y *[16]uint32)
+// LoadMaskedUint32x16 loads a Uint32x16 from an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU32.Z, CPU Feature: AVX512
+//
+//go:noescape
+func LoadMaskedUint32x16(y *[16]uint32, mask Mask32x16) Uint32x16
+
+// StoreMasked stores a Uint32x16 to an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU32, CPU Feature: AVX512
+//
+//go:noescape
+func (x Uint32x16) StoreMasked(y *[16]uint32, mask Mask32x16)
+
// Uint64x8 is a 512-bit SIMD vector of 8 uint64
type Uint64x8 struct {
uint64x8 v512
//go:noescape
func (x Uint64x8) Store(y *[8]uint64)
+// LoadMaskedUint64x8 loads a Uint64x8 from an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU64.Z, CPU Feature: AVX512
+//
+//go:noescape
+func LoadMaskedUint64x8(y *[8]uint64, mask Mask64x8) Uint64x8
+
+// StoreMasked stores a Uint64x8 to an array,
+// at those elements enabled by mask
+//
+// Asm: VMOVDQU64, CPU Feature: AVX512
+//
+//go:noescape
+func (x Uint64x8) StoreMasked(y *[8]uint64, mask Mask64x8)
+
// Mask8x64 is a 512-bit SIMD vector of 64 int8
type Mask8x64 struct {
int8x64 v512
// Mask8x64FromBits constructs a Mask8x64 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 64 bits of y are used.
+//
+// Asm: KMOVB, CPU Feature: AVX512"
func Mask8x64FromBits(y uint64) Mask8x64
// Mask16x32 is a 512-bit SIMD vector of 32 int16
// Mask16x32FromBits constructs a Mask16x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 32 bits of y are used.
+//
+// Asm: KMOVW, CPU Feature: AVX512"
func Mask16x32FromBits(y uint32) Mask16x32
// Mask32x16 is a 512-bit SIMD vector of 16 int32
// Mask32x16FromBits constructs a Mask32x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 16 bits of y are used.
+//
+// Asm: KMOVD, CPU Feature: AVX512"
func Mask32x16FromBits(y uint16) Mask32x16
// Mask64x8 is a 512-bit SIMD vector of 8 int64
// Mask64x8FromBits constructs a Mask64x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
// Only the lower 8 bits of y are used.
+//
+// Asm: KMOVQ, CPU Feature: AVX512"
func Mask64x8FromBits(y uint8) Mask64x8