]> Cypherpunks repositories - gostls13.git/commitdiff
[dev.simd] simd, cmd/compile: sample peephole optimization for .Masked()
authorDavid Chase <drchase@google.com>
Mon, 18 Aug 2025 21:58:30 +0000 (17:58 -0400)
committerDavid Chase <drchase@google.com>
Wed, 20 Aug 2025 22:10:00 +0000 (15:10 -0700)
This is not the end of such peephole optimizations, there
would need to be many of these for many simd operations.

Change-Id: I4511f6fac502bc7259c1c4414c96f56eb400c202
Reviewed-on: https://go-review.googlesource.com/c/go/+/697157
TryBot-Bypass: David Chase <drchase@google.com>
Commit-Queue: David Chase <drchase@google.com>
Reviewed-by: Junyang Shao <shaojunyang@google.com>
13 files changed:
src/cmd/compile/internal/amd64/simdssa.go
src/cmd/compile/internal/ssa/_gen/AMD64.rules
src/cmd/compile/internal/ssa/_gen/simdAMD64.rules
src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go
src/cmd/compile/internal/ssa/_gen/simdgenericOps.go
src/cmd/compile/internal/ssa/opGen.go
src/cmd/compile/internal/ssa/rewriteAMD64.go
src/cmd/compile/internal/ssagen/simdintrinsics.go
src/simd/_gen/simdgen/gen_simdssa.go
src/simd/_gen/simdgen/godefs.go
src/simd/_gen/simdgen/ops/Moves/categories.yaml
src/simd/_gen/simdgen/ops/Moves/go.yaml
src/simd/ops_amd64.go

index c535734bd5220f01518002d390600276b9486235..03617d4a5dcbdfddcfff92f81a6ac80e64fc1aad 100644 (file)
@@ -741,7 +741,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
                ssa.OpAMD64VSQRTPSMasked512,
                ssa.OpAMD64VSQRTPDMasked128,
                ssa.OpAMD64VSQRTPDMasked256,
-               ssa.OpAMD64VSQRTPDMasked512:
+               ssa.OpAMD64VSQRTPDMasked512,
+               ssa.OpAMD64VMOVUPSMasked512,
+               ssa.OpAMD64VMOVUPDMasked512,
+               ssa.OpAMD64VMOVDQU8Masked512,
+               ssa.OpAMD64VMOVDQU16Masked512,
+               ssa.OpAMD64VMOVDQU32Masked512,
+               ssa.OpAMD64VMOVDQU64Masked512:
                p = simdVkv(s, v)
 
        case ssa.OpAMD64VPBLENDVB128,
@@ -1672,6 +1678,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
                ssa.OpAMD64VPXORQMasked128,
                ssa.OpAMD64VPXORQMasked256,
                ssa.OpAMD64VPXORQMasked512,
+               ssa.OpAMD64VMOVUPSMasked512,
+               ssa.OpAMD64VMOVUPDMasked512,
+               ssa.OpAMD64VMOVDQU8Masked512,
+               ssa.OpAMD64VMOVDQU16Masked512,
+               ssa.OpAMD64VMOVDQU32Masked512,
+               ssa.OpAMD64VMOVDQU64Masked512,
                ssa.OpAMD64VPSLLWMasked128const,
                ssa.OpAMD64VPSLLWMasked256const,
                ssa.OpAMD64VPSLLWMasked512const,
index cec260e9488e7681ed2161dcf4d98c044af5e404..adab859e7bcc1ae4823815016682e7128ae1ad48 100644 (file)
 (VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) => x
 (VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) => x
 (VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) => x
+
+(VPANDQ512 x (VPMOVMToVec64x8 k)) => (VMOVDQU64Masked512 x k)
+(VPANDQ512 x (VPMOVMToVec32x16 k)) => (VMOVDQU32Masked512 x k)
+(VPANDQ512 x (VPMOVMToVec16x32 k)) => (VMOVDQU16Masked512 x k)
+(VPANDQ512 x (VPMOVMToVec8x64 k)) => (VMOVDQU8Masked512 x k)
index f2bb1ffb009567d010564365d0ea7f2a1ae3f685..1be54c738274e2b434d597b1a839b52bd9fce3b5 100644 (file)
 (blendMaskedInt16x32 x y mask) => (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM <types.TypeMask> mask))
 (blendMaskedInt32x16 x y mask) => (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
 (blendMaskedInt64x8 x y mask) => (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
+(moveMaskedFloat32x16 x mask) => (VMOVUPSMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+(moveMaskedFloat64x8 x mask) => (VMOVUPDMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
+(moveMaskedInt8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
+(moveMaskedInt16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
+(moveMaskedInt32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+(moveMaskedInt64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
+(moveMaskedUint8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
+(moveMaskedUint16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
+(moveMaskedUint32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+(moveMaskedUint64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
index c87978cd0d79b63b670b3bca47f58420e7fa10e8..171ae59e32a2d0a662063088f703a244a396c019 100644 (file)
@@ -140,6 +140,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf
                {name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false},
                {name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false},
                {name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false},
+               {name: "VMOVDQU8Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec512", resultInArg0: false},
+               {name: "VMOVDQU16Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec512", resultInArg0: false},
+               {name: "VMOVDQU32Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec512", resultInArg0: false},
+               {name: "VMOVDQU64Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec512", resultInArg0: false},
+               {name: "VMOVUPDMasked512", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec512", resultInArg0: false},
+               {name: "VMOVUPSMasked512", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec512", resultInArg0: false},
                {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false},
                {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false},
                {name: "VMULPD512", argLength: 2, reg: w21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false},
index 4d48e4b16ec21384087ecea65ef177ae45c90dcb..4f9877aa0344b330fb337de9c493590c5fa73754 100644 (file)
@@ -928,6 +928,16 @@ func simdGenericOps() []opData {
                {name: "blendMaskedInt16x32", argLength: 3, commutative: false},
                {name: "blendMaskedInt32x16", argLength: 3, commutative: false},
                {name: "blendMaskedInt64x8", argLength: 3, commutative: false},
+               {name: "moveMaskedFloat32x16", argLength: 2, commutative: false},
+               {name: "moveMaskedFloat64x8", argLength: 2, commutative: false},
+               {name: "moveMaskedInt8x64", argLength: 2, commutative: false},
+               {name: "moveMaskedInt16x32", argLength: 2, commutative: false},
+               {name: "moveMaskedInt32x16", argLength: 2, commutative: false},
+               {name: "moveMaskedInt64x8", argLength: 2, commutative: false},
+               {name: "moveMaskedUint8x64", argLength: 2, commutative: false},
+               {name: "moveMaskedUint16x32", argLength: 2, commutative: false},
+               {name: "moveMaskedUint32x16", argLength: 2, commutative: false},
+               {name: "moveMaskedUint64x8", argLength: 2, commutative: false},
                {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
                {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
                {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"},
index 05ee56d1572658d49e891ccfae190626edc25854..8375b3f8a68e2220ce9e7e95f3be9aae96e63903 100644 (file)
@@ -1363,6 +1363,12 @@ const (
        OpAMD64VMINPSMasked128
        OpAMD64VMINPSMasked256
        OpAMD64VMINPSMasked512
+       OpAMD64VMOVDQU8Masked512
+       OpAMD64VMOVDQU16Masked512
+       OpAMD64VMOVDQU32Masked512
+       OpAMD64VMOVDQU64Masked512
+       OpAMD64VMOVUPDMasked512
+       OpAMD64VMOVUPSMasked512
        OpAMD64VMULPD128
        OpAMD64VMULPD256
        OpAMD64VMULPD512
@@ -5572,6 +5578,16 @@ const (
        OpblendMaskedInt16x32
        OpblendMaskedInt32x16
        OpblendMaskedInt64x8
+       OpmoveMaskedFloat32x16
+       OpmoveMaskedFloat64x8
+       OpmoveMaskedInt8x64
+       OpmoveMaskedInt16x32
+       OpmoveMaskedInt32x16
+       OpmoveMaskedInt64x8
+       OpmoveMaskedUint8x64
+       OpmoveMaskedUint16x32
+       OpmoveMaskedUint32x16
+       OpmoveMaskedUint64x8
        OpCeilScaledFloat32x4
        OpCeilScaledFloat32x8
        OpCeilScaledFloat32x16
@@ -20776,6 +20792,90 @@ var opcodeTable = [...]opInfo{
                        },
                },
        },
+       {
+               name:   "VMOVDQU8Masked512",
+               argLen: 2,
+               asm:    x86.AVMOVDQU8,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+                               {0, 2147418112},        // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+                       outputs: []outputInfo{
+                               {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+               },
+       },
+       {
+               name:   "VMOVDQU16Masked512",
+               argLen: 2,
+               asm:    x86.AVMOVDQU16,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+                               {0, 2147418112},        // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+                       outputs: []outputInfo{
+                               {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+               },
+       },
+       {
+               name:   "VMOVDQU32Masked512",
+               argLen: 2,
+               asm:    x86.AVMOVDQU32,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+                               {0, 2147418112},        // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+                       outputs: []outputInfo{
+                               {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+               },
+       },
+       {
+               name:   "VMOVDQU64Masked512",
+               argLen: 2,
+               asm:    x86.AVMOVDQU64,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+                               {0, 2147418112},        // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+                       outputs: []outputInfo{
+                               {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+               },
+       },
+       {
+               name:   "VMOVUPDMasked512",
+               argLen: 2,
+               asm:    x86.AVMOVUPD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+                               {0, 2147418112},        // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+                       outputs: []outputInfo{
+                               {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+               },
+       },
+       {
+               name:   "VMOVUPSMasked512",
+               argLen: 2,
+               asm:    x86.AVMOVUPS,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+                               {0, 2147418112},        // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+                       outputs: []outputInfo{
+                               {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+                       },
+               },
+       },
        {
                name:        "VMULPD128",
                argLen:      2,
@@ -67992,6 +68092,56 @@ var opcodeTable = [...]opInfo{
                argLen:  3,
                generic: true,
        },
+       {
+               name:    "moveMaskedFloat32x16",
+               argLen:  2,
+               generic: true,
+       },
+       {
+               name:    "moveMaskedFloat64x8",
+               argLen:  2,
+               generic: true,
+       },
+       {
+               name:    "moveMaskedInt8x64",
+               argLen:  2,
+               generic: true,
+       },
+       {
+               name:    "moveMaskedInt16x32",
+               argLen:  2,
+               generic: true,
+       },
+       {
+               name:    "moveMaskedInt32x16",
+               argLen:  2,
+               generic: true,
+       },
+       {
+               name:    "moveMaskedInt64x8",
+               argLen:  2,
+               generic: true,
+       },
+       {
+               name:    "moveMaskedUint8x64",
+               argLen:  2,
+               generic: true,
+       },
+       {
+               name:    "moveMaskedUint16x32",
+               argLen:  2,
+               generic: true,
+       },
+       {
+               name:    "moveMaskedUint32x16",
+               argLen:  2,
+               generic: true,
+       },
+       {
+               name:    "moveMaskedUint64x8",
+               argLen:  2,
+               generic: true,
+       },
        {
                name:    "CeilScaledFloat32x4",
                auxType: auxUInt8,
index 2b2df15bc129d63575517c929ec890545b0c7e38..78c1ddd9dc6fb959b6257ca7be8b18009a5b782f 100644 (file)
@@ -507,6 +507,8 @@ func rewriteValueAMD64(v *Value) bool {
                return rewriteValueAMD64_OpAMD64TESTW(v)
        case OpAMD64TESTWconst:
                return rewriteValueAMD64_OpAMD64TESTWconst(v)
+       case OpAMD64VPANDQ512:
+               return rewriteValueAMD64_OpAMD64VPANDQ512(v)
        case OpAMD64VPMOVVec16x16ToM:
                return rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v)
        case OpAMD64VPMOVVec16x32ToM:
@@ -4255,6 +4257,26 @@ func rewriteValueAMD64(v *Value) bool {
                return rewriteValueAMD64_OpblendMaskedInt64x8(v)
        case OpblendMaskedInt8x64:
                return rewriteValueAMD64_OpblendMaskedInt8x64(v)
+       case OpmoveMaskedFloat32x16:
+               return rewriteValueAMD64_OpmoveMaskedFloat32x16(v)
+       case OpmoveMaskedFloat64x8:
+               return rewriteValueAMD64_OpmoveMaskedFloat64x8(v)
+       case OpmoveMaskedInt16x32:
+               return rewriteValueAMD64_OpmoveMaskedInt16x32(v)
+       case OpmoveMaskedInt32x16:
+               return rewriteValueAMD64_OpmoveMaskedInt32x16(v)
+       case OpmoveMaskedInt64x8:
+               return rewriteValueAMD64_OpmoveMaskedInt64x8(v)
+       case OpmoveMaskedInt8x64:
+               return rewriteValueAMD64_OpmoveMaskedInt8x64(v)
+       case OpmoveMaskedUint16x32:
+               return rewriteValueAMD64_OpmoveMaskedUint16x32(v)
+       case OpmoveMaskedUint32x16:
+               return rewriteValueAMD64_OpmoveMaskedUint32x16(v)
+       case OpmoveMaskedUint64x8:
+               return rewriteValueAMD64_OpmoveMaskedUint64x8(v)
+       case OpmoveMaskedUint8x64:
+               return rewriteValueAMD64_OpmoveMaskedUint8x64(v)
        }
        return false
 }
@@ -25949,6 +25971,71 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
        }
        return false
 }
+func rewriteValueAMD64_OpAMD64VPANDQ512(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       // match: (VPANDQ512 x (VPMOVMToVec64x8 k))
+       // result: (VMOVDQU64Masked512 x k)
+       for {
+               for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+                       x := v_0
+                       if v_1.Op != OpAMD64VPMOVMToVec64x8 {
+                               continue
+                       }
+                       k := v_1.Args[0]
+                       v.reset(OpAMD64VMOVDQU64Masked512)
+                       v.AddArg2(x, k)
+                       return true
+               }
+               break
+       }
+       // match: (VPANDQ512 x (VPMOVMToVec32x16 k))
+       // result: (VMOVDQU32Masked512 x k)
+       for {
+               for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+                       x := v_0
+                       if v_1.Op != OpAMD64VPMOVMToVec32x16 {
+                               continue
+                       }
+                       k := v_1.Args[0]
+                       v.reset(OpAMD64VMOVDQU32Masked512)
+                       v.AddArg2(x, k)
+                       return true
+               }
+               break
+       }
+       // match: (VPANDQ512 x (VPMOVMToVec16x32 k))
+       // result: (VMOVDQU16Masked512 x k)
+       for {
+               for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+                       x := v_0
+                       if v_1.Op != OpAMD64VPMOVMToVec16x32 {
+                               continue
+                       }
+                       k := v_1.Args[0]
+                       v.reset(OpAMD64VMOVDQU16Masked512)
+                       v.AddArg2(x, k)
+                       return true
+               }
+               break
+       }
+       // match: (VPANDQ512 x (VPMOVMToVec8x64 k))
+       // result: (VMOVDQU8Masked512 x k)
+       for {
+               for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+                       x := v_0
+                       if v_1.Op != OpAMD64VPMOVMToVec8x64 {
+                               continue
+                       }
+                       k := v_1.Args[0]
+                       v.reset(OpAMD64VMOVDQU8Masked512)
+                       v.AddArg2(x, k)
+                       return true
+               }
+               break
+       }
+       return false
+}
 func rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v *Value) bool {
        v_0 := v.Args[0]
        // match: (VPMOVVec16x16ToM (VPMOVMToVec16x16 x))
@@ -39220,6 +39307,166 @@ func rewriteValueAMD64_OpblendMaskedInt8x64(v *Value) bool {
                return true
        }
 }
+func rewriteValueAMD64_OpmoveMaskedFloat32x16(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (moveMaskedFloat32x16 x mask)
+       // result: (VMOVUPSMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+       for {
+               x := v_0
+               mask := v_1
+               v.reset(OpAMD64VMOVUPSMasked512)
+               v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+               v0.AddArg(mask)
+               v.AddArg2(x, v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpmoveMaskedFloat64x8(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (moveMaskedFloat64x8 x mask)
+       // result: (VMOVUPDMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
+       for {
+               x := v_0
+               mask := v_1
+               v.reset(OpAMD64VMOVUPDMasked512)
+               v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+               v0.AddArg(mask)
+               v.AddArg2(x, v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpmoveMaskedInt16x32(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (moveMaskedInt16x32 x mask)
+       // result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
+       for {
+               x := v_0
+               mask := v_1
+               v.reset(OpAMD64VMOVDQU16Masked512)
+               v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
+               v0.AddArg(mask)
+               v.AddArg2(x, v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpmoveMaskedInt32x16(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (moveMaskedInt32x16 x mask)
+       // result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+       for {
+               x := v_0
+               mask := v_1
+               v.reset(OpAMD64VMOVDQU32Masked512)
+               v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+               v0.AddArg(mask)
+               v.AddArg2(x, v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpmoveMaskedInt64x8(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (moveMaskedInt64x8 x mask)
+       // result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
+       for {
+               x := v_0
+               mask := v_1
+               v.reset(OpAMD64VMOVDQU64Masked512)
+               v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+               v0.AddArg(mask)
+               v.AddArg2(x, v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpmoveMaskedInt8x64(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (moveMaskedInt8x64 x mask)
+       // result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
+       for {
+               x := v_0
+               mask := v_1
+               v.reset(OpAMD64VMOVDQU8Masked512)
+               v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
+               v0.AddArg(mask)
+               v.AddArg2(x, v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpmoveMaskedUint16x32(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (moveMaskedUint16x32 x mask)
+       // result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
+       for {
+               x := v_0
+               mask := v_1
+               v.reset(OpAMD64VMOVDQU16Masked512)
+               v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
+               v0.AddArg(mask)
+               v.AddArg2(x, v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpmoveMaskedUint32x16(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (moveMaskedUint32x16 x mask)
+       // result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+       for {
+               x := v_0
+               mask := v_1
+               v.reset(OpAMD64VMOVDQU32Masked512)
+               v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+               v0.AddArg(mask)
+               v.AddArg2(x, v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpmoveMaskedUint64x8(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (moveMaskedUint64x8 x mask)
+       // result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
+       for {
+               x := v_0
+               mask := v_1
+               v.reset(OpAMD64VMOVDQU64Masked512)
+               v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+               v0.AddArg(mask)
+               v.AddArg2(x, v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpmoveMaskedUint8x64(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (moveMaskedUint8x64 x mask)
+       // result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
+       for {
+               x := v_0
+               mask := v_1
+               v.reset(OpAMD64VMOVDQU8Masked512)
+               v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
+               v0.AddArg(mask)
+               v.AddArg2(x, v0)
+               return true
+       }
+}
 func rewriteBlockAMD64(b *Block) bool {
        typ := &b.Func.Config.Types
        switch b.Kind {
index a519b7d5b3ebeaea9904de4b60b49fa86771319b..0fd330779eab1f66fee8cf6f4b9d5791553931ad 100644 (file)
@@ -1070,6 +1070,16 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies .
        addF(simdPackage, "Int16x32.blendMasked", opLen3(ssa.OpblendMaskedInt16x32, types.TypeVec512), sys.AMD64)
        addF(simdPackage, "Int32x16.blendMasked", opLen3(ssa.OpblendMaskedInt32x16, types.TypeVec512), sys.AMD64)
        addF(simdPackage, "Int64x8.blendMasked", opLen3(ssa.OpblendMaskedInt64x8, types.TypeVec512), sys.AMD64)
+       addF(simdPackage, "Float32x16.moveMasked", opLen2(ssa.OpmoveMaskedFloat32x16, types.TypeVec512), sys.AMD64)
+       addF(simdPackage, "Float64x8.moveMasked", opLen2(ssa.OpmoveMaskedFloat64x8, types.TypeVec512), sys.AMD64)
+       addF(simdPackage, "Int8x64.moveMasked", opLen2(ssa.OpmoveMaskedInt8x64, types.TypeVec512), sys.AMD64)
+       addF(simdPackage, "Int16x32.moveMasked", opLen2(ssa.OpmoveMaskedInt16x32, types.TypeVec512), sys.AMD64)
+       addF(simdPackage, "Int32x16.moveMasked", opLen2(ssa.OpmoveMaskedInt32x16, types.TypeVec512), sys.AMD64)
+       addF(simdPackage, "Int64x8.moveMasked", opLen2(ssa.OpmoveMaskedInt64x8, types.TypeVec512), sys.AMD64)
+       addF(simdPackage, "Uint8x64.moveMasked", opLen2(ssa.OpmoveMaskedUint8x64, types.TypeVec512), sys.AMD64)
+       addF(simdPackage, "Uint16x32.moveMasked", opLen2(ssa.OpmoveMaskedUint16x32, types.TypeVec512), sys.AMD64)
+       addF(simdPackage, "Uint32x16.moveMasked", opLen2(ssa.OpmoveMaskedUint32x16, types.TypeVec512), sys.AMD64)
+       addF(simdPackage, "Uint64x8.moveMasked", opLen2(ssa.OpmoveMaskedUint64x8, types.TypeVec512), sys.AMD64)
        addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
        addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
        addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
index 5a5421a815fd2cfb059d2a43dee2f6fbd6aea486..67a029fa457986840d873692e052320cfadf8b16 100644 (file)
@@ -98,7 +98,7 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer {
                seen[asm] = struct{}{}
                caseStr := fmt.Sprintf("ssa.OpAMD64%s", asm)
                if shapeIn == OneKmaskIn || shapeIn == OneKmaskImmIn {
-                       if gOp.Zeroing == nil {
+                       if gOp.Zeroing == nil || *gOp.Zeroing {
                                ZeroingMask = append(ZeroingMask, caseStr)
                        }
                }
index 4044addd8c16ad6ec8f8e5ead86d8a7d6b72d63a..e438d7fa6e3ac0b1b636d9fe8c4a399f5e0645f0 100644 (file)
@@ -129,7 +129,7 @@ func (o *Operation) VectorWidth() int {
 
 func machineOpName(maskType maskShape, gOp Operation) string {
        asm := gOp.Asm
-       if maskType == 2 {
+       if maskType == OneMask {
                asm += "Masked"
        }
        asm = fmt.Sprintf("%s%d", asm, gOp.VectorWidth())
index ef8e036050933391782a97554cb1aee42aeb6b10..438c1ef3092bf371e09e978c374d9b272a3c6d3b 100644 (file)
   documentation: !string |-
     // NAME blends two vectors based on mask values, choosing either
     // the first or the second based on whether the third is false or true
+- go: move
+  commutative: false
+  documentation: !string |-
+    // NAME blends a vector with zero, with the original value where the mask is true
+    // and zero where the mask is false.
 - go: Expand
   commutative: false
   documentation: !string |-
index d4d1b4b9bd34454ff4473f65a233abf926afd423..2398e5341555cdb7642770546a3d492eb30d4e63 100644 (file)
   out:
   - *v
 
+  # For AVX512
+- go: move
+  asm: VMOVDQU(8|16|32|64)
+  zeroing: true
+  in:
+  - &v
+    go: $t
+    bits: 512
+    class: vreg
+    base: int|uint
+  inVariant:
+  -
+    class: mask
+  out:
+  - *v
+
+  # For AVX512
+- go: move
+  asm: VMOVUP[SD]
+  zeroing: true
+  in:
+  - &v
+    go: $t
+    bits: 512
+    class: vreg
+    base: float
+  inVariant:
+  -
+    class: mask
+  out:
+  - *v
+
 - go: Expand
   asm: "VPEXPAND[BWDQ]|VEXPANDP[SD]"
   in:
index 79f5dc8523b236df63142c49f5a333f276545cb5..019f9df1ed62df651c5cada6a0829c53ec33bcb8 100644 (file)
@@ -6122,6 +6122,88 @@ func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16
 // Asm: VPBLENDMQ, CPU Feature: AVX512
 func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8
 
+/* moveMasked */
+
+// moveMasked blends a vector with zero, with the original value where the mask is true
+// and zero where the mask is false.
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VMOVUPS, CPU Feature: AVX512
+func (x Float32x16) moveMasked(mask Mask32x16) Float32x16
+
+// moveMasked blends a vector with zero, with the original value where the mask is true
+// and zero where the mask is false.
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VMOVUPD, CPU Feature: AVX512
+func (x Float64x8) moveMasked(mask Mask64x8) Float64x8
+
+// moveMasked blends a vector with zero, with the original value where the mask is true
+// and zero where the mask is false.
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VMOVDQU8, CPU Feature: AVX512
+func (x Int8x64) moveMasked(mask Mask8x64) Int8x64
+
+// moveMasked blends a vector with zero, with the original value where the mask is true
+// and zero where the mask is false.
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VMOVDQU16, CPU Feature: AVX512
+func (x Int16x32) moveMasked(mask Mask16x32) Int16x32
+
+// moveMasked blends a vector with zero, with the original value where the mask is true
+// and zero where the mask is false.
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VMOVDQU32, CPU Feature: AVX512
+func (x Int32x16) moveMasked(mask Mask32x16) Int32x16
+
+// moveMasked blends a vector with zero, with the original value where the mask is true
+// and zero where the mask is false.
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VMOVDQU64, CPU Feature: AVX512
+func (x Int64x8) moveMasked(mask Mask64x8) Int64x8
+
+// moveMasked blends a vector with zero, with the original value where the mask is true
+// and zero where the mask is false.
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VMOVDQU8, CPU Feature: AVX512
+func (x Uint8x64) moveMasked(mask Mask8x64) Uint8x64
+
+// moveMasked blends a vector with zero, with the original value where the mask is true
+// and zero where the mask is false.
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VMOVDQU16, CPU Feature: AVX512
+func (x Uint16x32) moveMasked(mask Mask16x32) Uint16x32
+
+// moveMasked blends a vector with zero, with the original value where the mask is true
+// and zero where the mask is false.
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VMOVDQU32, CPU Feature: AVX512
+func (x Uint32x16) moveMasked(mask Mask32x16) Uint32x16
+
+// moveMasked blends a vector with zero, with the original value where the mask is true
+// and zero where the mask is false.
+//
+// This operation is applied selectively under a write mask.
+//
+// Asm: VMOVDQU64, CPU Feature: AVX512
+func (x Uint64x8) moveMasked(mask Mask64x8) Uint64x8
+
 // Float64x2 converts from Float32x4 to Float64x2
 func (from Float32x4) AsFloat64x2() (to Float64x2)