ssa.OpAMD64VPXORDMasked512,
ssa.OpAMD64VPXORQMasked128,
ssa.OpAMD64VPXORQMasked256,
- ssa.OpAMD64VPXORQMasked512:
+ ssa.OpAMD64VPXORQMasked512,
+ ssa.OpAMD64VPBLENDMBMasked512,
+ ssa.OpAMD64VPBLENDMWMasked512,
+ ssa.OpAMD64VPBLENDMDMasked512,
+ ssa.OpAMD64VPBLENDMQMasked512:
p = simdV2kv(s, v)
case ssa.OpAMD64VPABSBMasked128,
ssa.OpAMD64VSQRTPDMasked512:
p = simdVkv(s, v)
+ case ssa.OpAMD64VPBLENDVB128,
+ ssa.OpAMD64VPBLENDVB256:
+ p = simdV31(s, v)
+
case ssa.OpAMD64VROUNDPS128,
ssa.OpAMD64VROUNDPS256,
ssa.OpAMD64VROUNDPD128,
ssa.OpAMD64VPXORQMasked128,
ssa.OpAMD64VPXORQMasked256,
ssa.OpAMD64VPXORQMasked512,
+ ssa.OpAMD64VPBLENDMBMasked512,
+ ssa.OpAMD64VPBLENDMWMasked512,
+ ssa.OpAMD64VPBLENDMDMasked512,
+ ssa.OpAMD64VPBLENDMQMasked512,
ssa.OpAMD64VPSLLWMasked128const,
ssa.OpAMD64VPSLLWMasked256const,
ssa.OpAMD64VPSLLWMasked512const,
(XorMaskedUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
(XorMaskedUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
(XorMaskedUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
+(blendInt8x16 ...) => (VPBLENDVB128 ...)
+(blendInt8x32 ...) => (VPBLENDVB256 ...)
+(blendMaskedInt8x64 x y mask) => (VPBLENDMBMasked512 x y (VPMOVVec8x64ToM <types.TypeMask> mask))
+(blendMaskedInt16x32 x y mask) => (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM <types.TypeMask> mask))
+(blendMaskedInt32x16 x y mask) => (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
+(blendMaskedInt64x8 x y mask) => (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
{name: "VPAVGWMasked128", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPAVGWMasked256", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPAVGWMasked512", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false},
+ {name: "VPBLENDMBMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMB", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPBLENDMDMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMD", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPBLENDMQMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMQ", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPBLENDMWMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMW", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPBLENDVB128", argLength: 3, reg: v31, asm: "VPBLENDVB", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPBLENDVB256", argLength: 3, reg: v31, asm: "VPBLENDVB", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPCMPEQB512", argLength: 2, reg: w2k, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false},
{name: "XorUint64x2", argLength: 2, commutative: true},
{name: "XorUint64x4", argLength: 2, commutative: true},
{name: "XorUint64x8", argLength: 2, commutative: true},
+ {name: "blendInt8x16", argLength: 3, commutative: false},
+ {name: "blendInt8x32", argLength: 3, commutative: false},
+ {name: "blendMaskedInt8x64", argLength: 3, commutative: false},
+ {name: "blendMaskedInt16x32", argLength: 3, commutative: false},
+ {name: "blendMaskedInt32x16", argLength: 3, commutative: false},
+ {name: "blendMaskedInt64x8", argLength: 3, commutative: false},
{name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
OpAMD64VPAVGWMasked128
OpAMD64VPAVGWMasked256
OpAMD64VPAVGWMasked512
+ OpAMD64VPBLENDMBMasked512
+ OpAMD64VPBLENDMDMasked512
+ OpAMD64VPBLENDMQMasked512
+ OpAMD64VPBLENDMWMasked512
+ OpAMD64VPBLENDVB128
+ OpAMD64VPBLENDVB256
OpAMD64VPCMPEQB128
OpAMD64VPCMPEQB256
OpAMD64VPCMPEQB512
OpXorUint64x2
OpXorUint64x4
OpXorUint64x8
+ OpblendInt8x16
+ OpblendInt8x32
+ OpblendMaskedInt8x64
+ OpblendMaskedInt16x32
+ OpblendMaskedInt32x16
+ OpblendMaskedInt64x8
OpCeilScaledFloat32x4
OpCeilScaledFloat32x8
OpCeilScaledFloat32x16
},
},
},
+ {
+ name: "VPBLENDMBMasked512",
+ argLen: 3,
+ asm: x86.AVPBLENDMB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VPBLENDMDMasked512",
+ argLen: 3,
+ asm: x86.AVPBLENDMD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VPBLENDMQMasked512",
+ argLen: 3,
+ asm: x86.AVPBLENDMQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VPBLENDMWMasked512",
+ argLen: 3,
+ asm: x86.AVPBLENDMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VPBLENDVB128",
+ argLen: 3,
+ asm: x86.AVPBLENDVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VPBLENDVB256",
+ argLen: 3,
+ asm: x86.AVPBLENDVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPCMPEQB128",
argLen: 2,
commutative: true,
generic: true,
},
+ {
+ name: "blendInt8x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "blendInt8x32",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "blendMaskedInt8x64",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "blendMaskedInt16x32",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "blendMaskedInt32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "blendMaskedInt64x8",
+ argLen: 3,
+ generic: true,
+ },
{
name: "CeilScaledFloat32x4",
auxType: auxInt8,
return true
case OpZeroSIMD:
return rewriteValueAMD64_OpZeroSIMD(v)
+ case OpblendInt8x16:
+ v.Op = OpAMD64VPBLENDVB128
+ return true
+ case OpblendInt8x32:
+ v.Op = OpAMD64VPBLENDVB256
+ return true
+ case OpblendMaskedInt16x32:
+ return rewriteValueAMD64_OpblendMaskedInt16x32(v)
+ case OpblendMaskedInt32x16:
+ return rewriteValueAMD64_OpblendMaskedInt32x16(v)
+ case OpblendMaskedInt64x8:
+ return rewriteValueAMD64_OpblendMaskedInt64x8(v)
+ case OpblendMaskedInt8x64:
+ return rewriteValueAMD64_OpblendMaskedInt8x64(v)
}
return false
}
}
return false
}
+func rewriteValueAMD64_OpblendMaskedInt16x32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (blendMaskedInt16x32 x y mask)
+ // result: (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ mask := v_2
+ v.reset(OpAMD64VPBLENDMWMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpblendMaskedInt32x16(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (blendMaskedInt32x16 x y mask)
+ // result: (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ mask := v_2
+ v.reset(OpAMD64VPBLENDMDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpblendMaskedInt64x8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (blendMaskedInt64x8 x y mask)
+ // result: (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ mask := v_2
+ v.reset(OpAMD64VPBLENDMQMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpblendMaskedInt8x64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (blendMaskedInt8x64 x y mask)
+ // result: (VPBLENDMBMasked512 x y (VPMOVVec8x64ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ mask := v_2
+ v.reset(OpAMD64VPBLENDMBMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+}
func rewriteBlockAMD64(b *Block) bool {
typ := &b.Func.Config.Types
switch b.Kind {
addF(simdPackage, "Uint64x2.XorMasked", opLen3(ssa.OpXorMaskedUint64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint64x4.XorMasked", opLen3(ssa.OpXorMaskedUint64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint64x8.XorMasked", opLen3(ssa.OpXorMaskedUint64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Int8x16.blend", opLen3(ssa.OpblendInt8x16, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Int8x32.blend", opLen3(ssa.OpblendInt8x32, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Int8x64.blendMasked", opLen3(ssa.OpblendMaskedInt8x64, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Int16x32.blendMasked", opLen3(ssa.OpblendMaskedInt16x32, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Int32x16.blendMasked", opLen3(ssa.OpblendMaskedInt32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Int64x8.blendMasked", opLen3(ssa.OpblendMaskedInt64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
// Asm: VPXORQ, CPU Feature: AVX512F
func (x Uint64x8) XorMasked(y Uint64x8, mask Mask64x8) Uint64x8
+/* blend */
+
+// blend blends two vectors based on mask values, choosing either
+// the first or the second based on whether the third is false or true
+//
+// Asm: VPBLENDVB, CPU Feature: AVX
+func (x Int8x16) blend(y Int8x16, mask Int8x16) Int8x16
+
+// blend blends two vectors based on mask values, choosing either
+// the first or the second based on whether the third is false or true
+//
+// Asm: VPBLENDVB, CPU Feature: AVX2
+func (x Int8x32) blend(y Int8x32, mask Int8x32) Int8x32
+
+/* blendMasked */
+
+// blendMasked blends two vectors based on mask values, choosing either
+// the first or the second based on whether the third is false or true
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VPBLENDMB, CPU Feature: AVX512BW
+func (x Int8x64) blendMasked(y Int8x64, mask Mask8x64) Int8x64
+
+// blendMasked blends two vectors based on mask values, choosing either
+// the first or the second based on whether the third is false or true
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VPBLENDMW, CPU Feature: AVX512BW
+func (x Int16x32) blendMasked(y Int16x32, mask Mask16x32) Int16x32
+
+// blendMasked blends two vectors based on mask values, choosing either
+// the first or the second based on whether the third is false or true
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VPBLENDMD, CPU Feature: AVX512F
+func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16
+
+// blendMasked blends two vectors based on mask values, choosing either
+// the first or the second based on whether the third is false or true
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VPBLENDMQ, CPU Feature: AVX512F
+func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8
+
// Float64x2 converts from Float32x4 to Float64x2
func (from Float32x4) AsFloat64x2() (to Float64x2)