p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ case ssa.OpAMD64KMOVQ, ssa.OpAMD64KMOVD, ssa.OpAMD64KMOVW, ssa.OpAMD64KMOVB:
+ // See also ssa.OpAMD64KMOVQload
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
default:
if !ssaGenSIMDValue(s, v) {
v.Fatalf("genValue not implemented: %s", v.LongString())
(Select0 a:(ADD(Q|L)constflags [c] x)) && a.Uses == 1 => (ADD(Q|L)const [c] x)
// XXX SIMD
-(LoadMask8x16 <t> ptr mem) => (VPMOVMToVec8x16 <types.TypeVec128> (KMOVQload <t> ptr mem))
-(LoadMask8x32 <t> ptr mem) => (VPMOVMToVec8x32 <types.TypeVec256> (KMOVQload <t> ptr mem))
-(LoadMask8x64 <t> ptr mem) => (VPMOVMToVec8x64 <types.TypeVec512> (KMOVQload <t> ptr mem))
-(LoadMask16x8 <t> ptr mem) => (VPMOVMToVec16x8 <types.TypeVec128> (KMOVQload <t> ptr mem))
-(LoadMask16x16 <t> ptr mem) => (VPMOVMToVec16x16 <types.TypeVec256> (KMOVQload <t> ptr mem))
-(LoadMask16x32 <t> ptr mem) => (VPMOVMToVec16x32 <types.TypeVec512> (KMOVQload <t> ptr mem))
+// Mask loads
+(LoadMask8x16 <t> ptr mem) => (VPMOVMToVec8x16 <types.TypeVec128> (KMOVQload <types.TypeMask> ptr mem))
+(LoadMask8x32 <t> ptr mem) => (VPMOVMToVec8x32 <types.TypeVec256> (KMOVQload <types.TypeMask> ptr mem))
+(LoadMask8x64 <t> ptr mem) => (VPMOVMToVec8x64 <types.TypeVec512> (KMOVQload <types.TypeMask> ptr mem))
-(LoadMask32x4 <t> ptr mem) => (VPMOVMToVec32x4 <types.TypeVec128> (KMOVQload <t> ptr mem))
-(LoadMask32x8 <t> ptr mem) => (VPMOVMToVec32x8 <types.TypeVec256> (KMOVQload <t> ptr mem))
-(LoadMask32x16 <t> ptr mem) => (VPMOVMToVec32x16 <types.TypeVec512> (KMOVQload <t> ptr mem))
+(LoadMask16x8 <t> ptr mem) => (VPMOVMToVec16x8 <types.TypeVec128> (KMOVQload <types.TypeMask> ptr mem))
+(LoadMask16x16 <t> ptr mem) => (VPMOVMToVec16x16 <types.TypeVec256> (KMOVQload <types.TypeMask> ptr mem))
+(LoadMask16x32 <t> ptr mem) => (VPMOVMToVec16x32 <types.TypeVec512> (KMOVQload <types.TypeMask> ptr mem))
-(LoadMask64x2 <t> ptr mem) => (VPMOVMToVec64x2 <types.TypeVec128> (KMOVQload <t> ptr mem))
-(LoadMask64x4 <t> ptr mem) => (VPMOVMToVec64x4 <types.TypeVec256> (KMOVQload <t> ptr mem))
-(LoadMask64x8 <t> ptr mem) => (VPMOVMToVec64x8 <types.TypeVec512> (KMOVQload <t> ptr mem))
+(LoadMask32x4 <t> ptr mem) => (VPMOVMToVec32x4 <types.TypeVec128> (KMOVQload <types.TypeMask> ptr mem))
+(LoadMask32x8 <t> ptr mem) => (VPMOVMToVec32x8 <types.TypeVec256> (KMOVQload <types.TypeMask> ptr mem))
+(LoadMask32x16 <t> ptr mem) => (VPMOVMToVec32x16 <types.TypeVec512> (KMOVQload <types.TypeMask> ptr mem))
+
+(LoadMask64x2 <t> ptr mem) => (VPMOVMToVec64x2 <types.TypeVec128> (KMOVQload <types.TypeMask> ptr mem))
+(LoadMask64x4 <t> ptr mem) => (VPMOVMToVec64x4 <types.TypeVec256> (KMOVQload <types.TypeMask> ptr mem))
+(LoadMask64x8 <t> ptr mem) => (VPMOVMToVec64x8 <types.TypeVec512> (KMOVQload <types.TypeMask> ptr mem))
(StoreMask8x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x16ToM <t> val) mem)
(StoreMask8x32 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x32ToM <t> val) mem)
(StoreMask64x4 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x4ToM <t> val) mem)
(StoreMask64x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x8ToM <t> val) mem)
+// TODO is this correct? Should we just do it all from 64-bits?
+
+// Mask conversions (from integers)
+(Cvt16toMask8x16 <t> x) => (VPMOVMToVec8x16 <types.TypeVec128> (KMOVW <types.TypeMask> x))
+(Cvt32toMask8x32 <t> x) => (VPMOVMToVec8x32 <types.TypeVec256> (KMOVD <types.TypeMask> x))
+(Cvt64toMask8x64 <t> x) => (VPMOVMToVec8x64 <types.TypeVec512> (KMOVQ <types.TypeMask> x))
+
+(Cvt8toMask16x8 <t> x) => (VPMOVMToVec16x8 <types.TypeVec128> (KMOVB <types.TypeMask> x))
+(Cvt16toMask16x16 <t> x) => (VPMOVMToVec16x16 <types.TypeVec256> (KMOVW <types.TypeMask> x))
+(Cvt32toMask16x32 <t> x) => (VPMOVMToVec16x32 <types.TypeVec512> (KMOVD <types.TypeMask> x))
+
+(Cvt8toMask32x4 <t> x) => (VPMOVMToVec32x4 <types.TypeVec128> (KMOVB <types.TypeMask> x))
+(Cvt8toMask32x8 <t> x) => (VPMOVMToVec32x8 <types.TypeVec256> (KMOVB <types.TypeMask> x))
+(Cvt16toMask32x16 <t> x) => (VPMOVMToVec32x16 <types.TypeVec512> (KMOVW <types.TypeMask> x))
+
+(Cvt8toMask64x2 <t> x) => (VPMOVMToVec64x2 <types.TypeVec128> (KMOVB <types.TypeMask> x))
+(Cvt8toMask64x4 <t> x) => (VPMOVMToVec64x4 <types.TypeVec256> (KMOVB <types.TypeMask> x))
+(Cvt8toMask64x8 <t> x) => (VPMOVMToVec64x8 <types.TypeVec512> (KMOVB <types.TypeMask> x))
+
+// SIMD vector loads and stores
(Load <t> ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem)
(Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem)
(Load <t> ptr mem) && t.Size() == 64 => (VMOVDQUload512 ptr mem)
(Store {t} ptr val mem) && t.Size() == 64 => (VMOVDQUstore512 ptr val mem)
+// SIMD vector integer-vector-masked loads and stores.
(LoadMasked32 <t> ptr mask mem) && t.Size() == 16 => (VPMASK32load128 ptr mask mem)
(LoadMasked32 <t> ptr mask mem) && t.Size() == 32 => (VPMASK32load256 ptr mask mem)
(LoadMasked64 <t> ptr mask mem) && t.Size() == 16 => (VPMASK64load128 ptr mask mem)
kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly}
kstore = regInfo{inputs: []regMask{gpspsb, mask, 0}}
+ gpk = regInfo{inputs: gponly, outputs: maskonly}
prefreg = regInfo{inputs: []regMask{gpspsbg}}
)
{name: "KMOVQload", argLength: 2, reg: kload, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
{name: "KMOVQstore", argLength: 3, reg: kstore, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"},
+
+ // Move GP directly to mask register
+ {name: "KMOVQ", argLength: 1, reg: gpk, asm: "KMOVQ"},
+ {name: "KMOVD", argLength: 1, reg: gpk, asm: "KMOVD"},
+ {name: "KMOVW", argLength: 1, reg: gpk, asm: "KMOVW"},
+ {name: "KMOVB", argLength: 1, reg: gpk, asm: "KMOVB"},
}
var AMD64blocks = []blockData{
{name: "StoreMask64x2", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
{name: "StoreMask64x4", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
{name: "StoreMask64x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
+
+ // Convert integers to masks
+ {name: "Cvt16toMask8x16", argLength: 1}, // arg0 = integer mask value
+ {name: "Cvt32toMask8x32", argLength: 1}, // arg0 = integer mask value
+ {name: "Cvt64toMask8x64", argLength: 1}, // arg0 = integer mask value
+ {name: "Cvt8toMask16x8", argLength: 1}, // arg0 = integer mask value
+ {name: "Cvt16toMask16x16", argLength: 1}, // arg0 = integer mask value
+ {name: "Cvt32toMask16x32", argLength: 1}, // arg0 = integer mask value
+ {name: "Cvt8toMask32x4", argLength: 1}, // arg0 = integer mask value
+ {name: "Cvt8toMask32x8", argLength: 1}, // arg0 = integer mask value
+ {name: "Cvt16toMask32x16", argLength: 1}, // arg0 = integer mask value
+ {name: "Cvt8toMask64x2", argLength: 1}, // arg0 = integer mask value
+ {name: "Cvt8toMask64x4", argLength: 1}, // arg0 = integer mask value
+ {name: "Cvt8toMask64x8", argLength: 1}, // arg0 = integer mask value
}
// kind controls successors implicit exit
OpAMD64VZEROALL
OpAMD64KMOVQload
OpAMD64KMOVQstore
+ OpAMD64KMOVQ
+ OpAMD64KMOVD
+ OpAMD64KMOVW
+ OpAMD64KMOVB
OpAMD64VADDPD128
OpAMD64VADDPD256
OpAMD64VADDPD512
OpStoreMask64x2
OpStoreMask64x4
OpStoreMask64x8
+ OpCvt16toMask8x16
+ OpCvt32toMask8x32
+ OpCvt64toMask8x64
+ OpCvt8toMask16x8
+ OpCvt16toMask16x16
+ OpCvt32toMask16x32
+ OpCvt8toMask32x4
+ OpCvt8toMask32x8
+ OpCvt16toMask32x16
+ OpCvt8toMask64x2
+ OpCvt8toMask64x4
+ OpCvt8toMask64x8
OpAbsoluteInt8x16
OpAbsoluteInt8x32
OpAbsoluteInt8x64
},
},
},
+ {
+ name: "KMOVQ",
+ argLen: 1,
+ asm: x86.AKMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ },
+ },
+ },
+ {
+ name: "KMOVD",
+ argLen: 1,
+ asm: x86.AKMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ },
+ },
+ },
+ {
+ name: "KMOVW",
+ argLen: 1,
+ asm: x86.AKMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ },
+ },
+ },
+ {
+ name: "KMOVB",
+ argLen: 1,
+ asm: x86.AKMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ },
+ },
+ },
{
name: "VADDPD128",
argLen: 2,
argLen: 3,
generic: true,
},
+ {
+ name: "Cvt16toMask8x16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32toMask8x32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64toMask8x64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt8toMask16x8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt16toMask16x16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32toMask16x32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt8toMask32x4",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt8toMask32x8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt16toMask32x16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt8toMask64x2",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt8toMask64x4",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt8toMask64x8",
+ argLen: 1,
+ generic: true,
+ },
{
name: "AbsoluteInt8x16",
argLen: 1,
return rewriteValueAMD64_OpCtz8(v)
case OpCtz8NonZero:
return rewriteValueAMD64_OpCtz8NonZero(v)
+ case OpCvt16toMask16x16:
+ return rewriteValueAMD64_OpCvt16toMask16x16(v)
+ case OpCvt16toMask32x16:
+ return rewriteValueAMD64_OpCvt16toMask32x16(v)
+ case OpCvt16toMask8x16:
+ return rewriteValueAMD64_OpCvt16toMask8x16(v)
case OpCvt32Fto32:
v.Op = OpAMD64CVTTSS2SL
return true
case OpCvt32to64F:
v.Op = OpAMD64CVTSL2SD
return true
+ case OpCvt32toMask16x32:
+ return rewriteValueAMD64_OpCvt32toMask16x32(v)
+ case OpCvt32toMask8x32:
+ return rewriteValueAMD64_OpCvt32toMask8x32(v)
case OpCvt64Fto32:
v.Op = OpAMD64CVTTSD2SL
return true
case OpCvt64to64F:
v.Op = OpAMD64CVTSQ2SD
return true
+ case OpCvt64toMask8x64:
+ return rewriteValueAMD64_OpCvt64toMask8x64(v)
+ case OpCvt8toMask16x8:
+ return rewriteValueAMD64_OpCvt8toMask16x8(v)
+ case OpCvt8toMask32x4:
+ return rewriteValueAMD64_OpCvt8toMask32x4(v)
+ case OpCvt8toMask32x8:
+ return rewriteValueAMD64_OpCvt8toMask32x8(v)
+ case OpCvt8toMask64x2:
+ return rewriteValueAMD64_OpCvt8toMask64x2(v)
+ case OpCvt8toMask64x4:
+ return rewriteValueAMD64_OpCvt8toMask64x4(v)
+ case OpCvt8toMask64x8:
+ return rewriteValueAMD64_OpCvt8toMask64x8(v)
case OpCvtBoolToUint8:
v.Op = OpCopy
return true
}
return false
}
+func rewriteValueAMD64_OpCvt16toMask16x16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt16toMask16x16 <t> x)
+ // result: (VPMOVMToVec16x16 <types.TypeVec256> (KMOVW <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec16x16)
+ v.Type = types.TypeVec256
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt16toMask32x16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt16toMask32x16 <t> x)
+ // result: (VPMOVMToVec32x16 <types.TypeVec512> (KMOVW <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec32x16)
+ v.Type = types.TypeVec512
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt16toMask8x16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt16toMask8x16 <t> x)
+ // result: (VPMOVMToVec8x16 <types.TypeVec128> (KMOVW <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec8x16)
+ v.Type = types.TypeVec128
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32toMask16x32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt32toMask16x32 <t> x)
+ // result: (VPMOVMToVec16x32 <types.TypeVec512> (KMOVD <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec16x32)
+ v.Type = types.TypeVec512
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVD, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32toMask8x32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt32toMask8x32 <t> x)
+ // result: (VPMOVMToVec8x32 <types.TypeVec256> (KMOVD <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec8x32)
+ v.Type = types.TypeVec256
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVD, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64toMask8x64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt64toMask8x64 <t> x)
+ // result: (VPMOVMToVec8x64 <types.TypeVec512> (KMOVQ <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec8x64)
+ v.Type = types.TypeVec512
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQ, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt8toMask16x8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt8toMask16x8 <t> x)
+ // result: (VPMOVMToVec16x8 <types.TypeVec128> (KMOVB <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec16x8)
+ v.Type = types.TypeVec128
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt8toMask32x4(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt8toMask32x4 <t> x)
+ // result: (VPMOVMToVec32x4 <types.TypeVec128> (KMOVB <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec32x4)
+ v.Type = types.TypeVec128
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt8toMask32x8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt8toMask32x8 <t> x)
+ // result: (VPMOVMToVec32x8 <types.TypeVec256> (KMOVB <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec32x8)
+ v.Type = types.TypeVec256
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt8toMask64x2(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt8toMask64x2 <t> x)
+ // result: (VPMOVMToVec64x2 <types.TypeVec128> (KMOVB <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec64x2)
+ v.Type = types.TypeVec128
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt8toMask64x4(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt8toMask64x4 <t> x)
+ // result: (VPMOVMToVec64x4 <types.TypeVec256> (KMOVB <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec64x4)
+ v.Type = types.TypeVec256
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt8toMask64x8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Cvt8toMask64x8 <t> x)
+ // result: (VPMOVMToVec64x8 <types.TypeVec512> (KMOVB <types.TypeMask> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64VPMOVMToVec64x8)
+ v.Type = types.TypeVec512
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool {
v_0 := v.Args[0]
// match: (DiffWithCeilWithPrecisionFloat32x16 [a] x)
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask16x16 <t> ptr mem)
- // result: (VPMOVMToVec16x16 <types.TypeVec256> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec16x16 <types.TypeVec256> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v.Type = types.TypeVec256
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask16x32 <t> ptr mem)
- // result: (VPMOVMToVec16x32 <types.TypeVec512> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec16x32 <types.TypeVec512> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v.Type = types.TypeVec512
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask16x8 <t> ptr mem)
- // result: (VPMOVMToVec16x8 <types.TypeVec128> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec16x8 <types.TypeVec128> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v.Type = types.TypeVec128
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask32x16 <t> ptr mem)
- // result: (VPMOVMToVec32x16 <types.TypeVec512> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec32x16 <types.TypeVec512> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v.Type = types.TypeVec512
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask32x4 <t> ptr mem)
- // result: (VPMOVMToVec32x4 <types.TypeVec128> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec32x4 <types.TypeVec128> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v.Type = types.TypeVec128
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask32x8 <t> ptr mem)
- // result: (VPMOVMToVec32x8 <types.TypeVec256> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec32x8 <types.TypeVec256> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v.Type = types.TypeVec256
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask64x2 <t> ptr mem)
- // result: (VPMOVMToVec64x2 <types.TypeVec128> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec64x2 <types.TypeVec128> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v.Type = types.TypeVec128
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask64x4 <t> ptr mem)
- // result: (VPMOVMToVec64x4 <types.TypeVec256> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec64x4 <types.TypeVec256> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v.Type = types.TypeVec256
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask64x8 <t> ptr mem)
- // result: (VPMOVMToVec64x8 <types.TypeVec512> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec64x8 <types.TypeVec512> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v.Type = types.TypeVec512
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask8x16 <t> ptr mem)
- // result: (VPMOVMToVec8x16 <types.TypeVec128> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec8x16 <types.TypeVec128> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v.Type = types.TypeVec128
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask8x32 <t> ptr mem)
- // result: (VPMOVMToVec8x32 <types.TypeVec256> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec8x32 <types.TypeVec256> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v.Type = types.TypeVec256
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (LoadMask8x64 <t> ptr mem)
- // result: (VPMOVMToVec8x64 <types.TypeVec512> (KMOVQload <t> ptr mem))
+ // result: (VPMOVMToVec8x64 <types.TypeVec512> (KMOVQload <types.TypeMask> ptr mem))
for {
- t := v.Type
ptr := v_0
mem := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v.Type = types.TypeVec512
- v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t)
+ v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
}
+var loadMaskOpcodes = map[int]map[int]ssa.Op{
+ 8: {16: ssa.OpLoadMask8x16, 32: ssa.OpLoadMask8x32, 64: ssa.OpLoadMask8x64},
+ 16: {8: ssa.OpLoadMask16x8, 16: ssa.OpLoadMask16x16, 32: ssa.OpLoadMask16x32},
+ 32: {4: ssa.OpLoadMask32x4, 8: ssa.OpLoadMask32x8, 16: ssa.OpLoadMask32x16},
+ 64: {2: ssa.OpLoadMask64x2, 4: ssa.OpLoadMask64x4, 8: ssa.OpLoadMask64x8},
+}
+
+var cvtMaskOpcodes = map[int]map[int]ssa.Op{
+ 8: {16: ssa.OpCvt16toMask8x16, 32: ssa.OpCvt32toMask8x32, 64: ssa.OpCvt64toMask8x64},
+ 16: {8: ssa.OpCvt8toMask16x8, 16: ssa.OpCvt16toMask16x16, 32: ssa.OpCvt32toMask16x32},
+ 32: {4: ssa.OpCvt8toMask32x4, 8: ssa.OpCvt8toMask32x8, 16: ssa.OpCvt16toMask32x16},
+ 64: {2: ssa.OpCvt8toMask64x2, 4: ssa.OpCvt8toMask64x4, 8: ssa.OpCvt8toMask64x8},
+}
+
func simdLoadMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
- opCodes := map[int]map[int]ssa.Op{
- 8: {16: ssa.OpLoadMask8x16, 32: ssa.OpLoadMask8x32, 64: ssa.OpLoadMask8x64},
- 16: {8: ssa.OpLoadMask16x8, 16: ssa.OpLoadMask16x16, 32: ssa.OpLoadMask16x32},
- 32: {4: ssa.OpLoadMask32x4, 8: ssa.OpLoadMask32x8, 16: ssa.OpLoadMask32x16},
- 64: {2: ssa.OpLoadMask64x2, 4: ssa.OpLoadMask64x4, 8: ssa.OpLoadMask64x8},
- }
- op := opCodes[elemBits][lanes]
+ op := loadMaskOpcodes[elemBits][lanes]
if op == 0 {
panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes))
}
}
}
+func simdCvtMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ op := cvtMaskOpcodes[elemBits][lanes]
+ if op == 0 {
+ panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes))
+ }
+ return s.newValue1(op, types.TypeMask, args[0])
+ }
+}
+
func simdMaskedLoad(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(op, n.Type(), args[0], args[1], s.mem())
addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "LoadMask8x16FromBits", simdLoadMask(8, 16), sys.AMD64)
addF(simdPackage, "Mask8x16.StoreToBits", simdStoreMask(8, 16), sys.AMD64)
+ addF(simdPackage, "Mask8x16FromBits", simdCvtMask(8, 16), sys.AMD64)
addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "LoadMask8x32FromBits", simdLoadMask(8, 32), sys.AMD64)
addF(simdPackage, "Mask8x32.StoreToBits", simdStoreMask(8, 32), sys.AMD64)
+ addF(simdPackage, "Mask8x32FromBits", simdCvtMask(8, 32), sys.AMD64)
addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "LoadMask8x64FromBits", simdLoadMask(8, 64), sys.AMD64)
addF(simdPackage, "Mask8x64.StoreToBits", simdStoreMask(8, 64), sys.AMD64)
+ addF(simdPackage, "Mask8x64FromBits", simdCvtMask(8, 64), sys.AMD64)
addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "LoadMask16x8FromBits", simdLoadMask(16, 8), sys.AMD64)
addF(simdPackage, "Mask16x8.StoreToBits", simdStoreMask(16, 8), sys.AMD64)
+ addF(simdPackage, "Mask16x8FromBits", simdCvtMask(16, 8), sys.AMD64)
addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "LoadMask16x16FromBits", simdLoadMask(16, 16), sys.AMD64)
addF(simdPackage, "Mask16x16.StoreToBits", simdStoreMask(16, 16), sys.AMD64)
+ addF(simdPackage, "Mask16x16FromBits", simdCvtMask(16, 16), sys.AMD64)
addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "LoadMask16x32FromBits", simdLoadMask(16, 32), sys.AMD64)
addF(simdPackage, "Mask16x32.StoreToBits", simdStoreMask(16, 32), sys.AMD64)
+ addF(simdPackage, "Mask16x32FromBits", simdCvtMask(16, 32), sys.AMD64)
addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "LoadMask32x4FromBits", simdLoadMask(32, 4), sys.AMD64)
addF(simdPackage, "Mask32x4.StoreToBits", simdStoreMask(32, 4), sys.AMD64)
+ addF(simdPackage, "Mask32x4FromBits", simdCvtMask(32, 4), sys.AMD64)
addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "LoadMask32x8FromBits", simdLoadMask(32, 8), sys.AMD64)
addF(simdPackage, "Mask32x8.StoreToBits", simdStoreMask(32, 8), sys.AMD64)
+ addF(simdPackage, "Mask32x8FromBits", simdCvtMask(32, 8), sys.AMD64)
addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "LoadMask32x16FromBits", simdLoadMask(32, 16), sys.AMD64)
addF(simdPackage, "Mask32x16.StoreToBits", simdStoreMask(32, 16), sys.AMD64)
+ addF(simdPackage, "Mask32x16FromBits", simdCvtMask(32, 16), sys.AMD64)
addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "LoadMask64x2FromBits", simdLoadMask(64, 2), sys.AMD64)
addF(simdPackage, "Mask64x2.StoreToBits", simdStoreMask(64, 2), sys.AMD64)
+ addF(simdPackage, "Mask64x2FromBits", simdCvtMask(64, 2), sys.AMD64)
addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "LoadMask64x4FromBits", simdLoadMask(64, 4), sys.AMD64)
addF(simdPackage, "Mask64x4.StoreToBits", simdStoreMask(64, 4), sys.AMD64)
+ addF(simdPackage, "Mask64x4FromBits", simdCvtMask(64, 4), sys.AMD64)
addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "LoadMask64x8FromBits", simdLoadMask(64, 8), sys.AMD64)
addF(simdPackage, "Mask64x8.StoreToBits", simdStoreMask(64, 8), sys.AMD64)
+ addF(simdPackage, "Mask64x8FromBits", simdCvtMask(64, 8), sys.AMD64)
}
//go:noescape
func (x Mask8x16) StoreToBits(y *uint64)
+// Mask8x16FromBits constructs a Mask8x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 16 bits of y are used.
+func Mask8x16FromBits(y uint16) Mask8x16
+
// Mask16x8 is a 128-bit SIMD vector of 8 int16
type Mask16x8 struct {
int16x8 v128
//go:noescape
func (x Mask16x8) StoreToBits(y *uint64)
+// Mask16x8FromBits constructs a Mask16x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 8 bits of y are used.
+func Mask16x8FromBits(y uint8) Mask16x8
+
// Mask32x4 is a 128-bit SIMD vector of 4 int32
type Mask32x4 struct {
int32x4 v128
//go:noescape
func (x Mask32x4) StoreToBits(y *uint64)
+// Mask32x4FromBits constructs a Mask32x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 4 bits of y are used.
+func Mask32x4FromBits(y uint8) Mask32x4
+
// Mask64x2 is a 128-bit SIMD vector of 2 int64
type Mask64x2 struct {
int64x2 v128
//go:noescape
func (x Mask64x2) StoreToBits(y *uint64)
+// Mask64x2FromBits constructs a Mask64x2 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 2 bits of y are used.
+func Mask64x2FromBits(y uint8) Mask64x2
+
// v256 is a tag type that tells the compiler that this is really 256-bit SIMD
type v256 struct {
_256 struct{}
//go:noescape
func (x Mask8x32) StoreToBits(y *uint64)
+// Mask8x32FromBits constructs a Mask8x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 32 bits of y are used.
+func Mask8x32FromBits(y uint32) Mask8x32
+
// Mask16x16 is a 256-bit SIMD vector of 16 int16
type Mask16x16 struct {
int16x16 v256
//go:noescape
func (x Mask16x16) StoreToBits(y *uint64)
+// Mask16x16FromBits constructs a Mask16x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 16 bits of y are used.
+func Mask16x16FromBits(y uint16) Mask16x16
+
// Mask32x8 is a 256-bit SIMD vector of 8 int32
type Mask32x8 struct {
int32x8 v256
//go:noescape
func (x Mask32x8) StoreToBits(y *uint64)
+// Mask32x8FromBits constructs a Mask32x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 8 bits of y are used.
+func Mask32x8FromBits(y uint8) Mask32x8
+
// Mask64x4 is a 256-bit SIMD vector of 4 int64
type Mask64x4 struct {
int64x4 v256
//go:noescape
func (x Mask64x4) StoreToBits(y *uint64)
+// Mask64x4FromBits constructs a Mask64x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 4 bits of y are used.
+func Mask64x4FromBits(y uint8) Mask64x4
+
// v512 is a tag type that tells the compiler that this is really 512-bit SIMD
type v512 struct {
_512 struct{}
//go:noescape
func (x Mask8x64) StoreToBits(y *uint64)
+// Mask8x64FromBits constructs a Mask8x64 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 64 bits of y are used.
+func Mask8x64FromBits(y uint64) Mask8x64
+
// Mask16x32 is a 512-bit SIMD vector of 32 int16
type Mask16x32 struct {
int16x32 v512
//go:noescape
func (x Mask16x32) StoreToBits(y *uint64)
+// Mask16x32FromBits constructs a Mask16x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 32 bits of y are used.
+func Mask16x32FromBits(y uint32) Mask16x32
+
// Mask32x16 is a 512-bit SIMD vector of 16 int32
type Mask32x16 struct {
int32x16 v512
//go:noescape
func (x Mask32x16) StoreToBits(y *uint64)
+// Mask32x16FromBits constructs a Mask32x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 16 bits of y are used.
+func Mask32x16FromBits(y uint16) Mask32x16
+
// Mask64x8 is a 512-bit SIMD vector of 8 int64
type Mask64x8 struct {
int64x8 v512
//
//go:noescape
func (x Mask64x8) StoreToBits(y *uint64)
+
+// Mask64x8FromBits constructs a Mask64x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset.
+// Only the lower 8 bits of y are used.
+func Mask64x8FromBits(y uint8) Mask64x8