From ede64cf0d82e49edbdcb5107a80bbdac3217b55b Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 18 Aug 2025 17:58:30 -0400 Subject: [PATCH] [dev.simd] simd, cmd/compile: sample peephole optimization for .Masked() This is not the end of such peephole optimizations, there would need to be many of these for many simd operations. Change-Id: I4511f6fac502bc7259c1c4414c96f56eb400c202 Reviewed-on: https://go-review.googlesource.com/c/go/+/697157 TryBot-Bypass: David Chase Commit-Queue: David Chase Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 14 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 5 + .../compile/internal/ssa/_gen/simdAMD64.rules | 10 + .../compile/internal/ssa/_gen/simdAMD64ops.go | 6 + .../internal/ssa/_gen/simdgenericOps.go | 10 + src/cmd/compile/internal/ssa/opGen.go | 150 +++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 247 ++++++++++++++++++ .../compile/internal/ssagen/simdintrinsics.go | 10 + src/simd/_gen/simdgen/gen_simdssa.go | 2 +- src/simd/_gen/simdgen/godefs.go | 2 +- .../_gen/simdgen/ops/Moves/categories.yaml | 5 + src/simd/_gen/simdgen/ops/Moves/go.yaml | 32 +++ src/simd/ops_amd64.go | 82 ++++++ 13 files changed, 572 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index c535734bd5..03617d4a5d 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -741,7 +741,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VSQRTPDMasked512: + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VMOVUPSMasked512, + ssa.OpAMD64VMOVUPDMasked512, + ssa.OpAMD64VMOVDQU8Masked512, + ssa.OpAMD64VMOVDQU16Masked512, + ssa.OpAMD64VMOVDQU32Masked512, + ssa.OpAMD64VMOVDQU64Masked512: p = simdVkv(s, v) case ssa.OpAMD64VPBLENDVB128, @@ -1672,6 +1678,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMOVUPSMasked512, + ssa.OpAMD64VMOVUPDMasked512, + ssa.OpAMD64VMOVDQU8Masked512, + ssa.OpAMD64VMOVDQU16Masked512, + ssa.OpAMD64VMOVDQU32Masked512, + ssa.OpAMD64VMOVDQU64Masked512, ssa.OpAMD64VPSLLWMasked128const, ssa.OpAMD64VPSLLWMasked256const, ssa.OpAMD64VPSLLWMasked512const, diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index cec260e948..adab859e7b 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1763,3 +1763,8 @@ (VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) => x (VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) => x (VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) => x + +(VPANDQ512 x (VPMOVMToVec64x8 k)) => (VMOVDQU64Masked512 x k) +(VPANDQ512 x (VPMOVMToVec32x16 k)) => (VMOVDQU32Masked512 x k) +(VPANDQ512 x (VPMOVMToVec16x32 k)) => (VMOVDQU16Masked512 x k) +(VPANDQ512 x (VPMOVMToVec8x64 k)) => (VMOVDQU8Masked512 x k) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index f2bb1ffb00..1be54c7382 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1076,3 +1076,13 @@ (blendMaskedInt16x32 x y mask) => (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM mask)) (blendMaskedInt32x16 x y mask) => (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM mask)) (blendMaskedInt64x8 x y mask) => (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM mask)) +(moveMaskedFloat32x16 x mask) => (VMOVUPSMasked512 x (VPMOVVec32x16ToM mask)) +(moveMaskedFloat64x8 x mask) => (VMOVUPDMasked512 x (VPMOVVec64x8ToM mask)) +(moveMaskedInt8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) +(moveMaskedInt16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) +(moveMaskedInt32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) +(moveMaskedInt64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) +(moveMaskedUint8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) +(moveMaskedUint16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) +(moveMaskedUint32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) +(moveMaskedUint64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index c87978cd0d..171ae59e32 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -140,6 +140,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU8Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU16Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU32Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU64Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVUPDMasked512", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVUPSMasked512", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD512", argLength: 2, reg: w21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 4d48e4b16e..4f9877aa03 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -928,6 +928,16 @@ func simdGenericOps() []opData { {name: "blendMaskedInt16x32", argLength: 3, commutative: false}, {name: "blendMaskedInt32x16", argLength: 3, commutative: false}, {name: "blendMaskedInt64x8", argLength: 3, commutative: false}, + {name: "moveMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "moveMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "moveMaskedInt8x64", argLength: 2, commutative: false}, + {name: "moveMaskedInt16x32", argLength: 2, commutative: false}, + {name: "moveMaskedInt32x16", argLength: 2, commutative: false}, + {name: "moveMaskedInt64x8", argLength: 2, commutative: false}, + {name: "moveMaskedUint8x64", argLength: 2, commutative: false}, + {name: "moveMaskedUint16x32", argLength: 2, commutative: false}, + {name: "moveMaskedUint32x16", argLength: 2, commutative: false}, + {name: "moveMaskedUint64x8", argLength: 2, commutative: false}, {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 05ee56d157..8375b3f8a6 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1363,6 +1363,12 @@ const ( OpAMD64VMINPSMasked128 OpAMD64VMINPSMasked256 OpAMD64VMINPSMasked512 + OpAMD64VMOVDQU8Masked512 + OpAMD64VMOVDQU16Masked512 + OpAMD64VMOVDQU32Masked512 + OpAMD64VMOVDQU64Masked512 + OpAMD64VMOVUPDMasked512 + OpAMD64VMOVUPSMasked512 OpAMD64VMULPD128 OpAMD64VMULPD256 OpAMD64VMULPD512 @@ -5572,6 +5578,16 @@ const ( OpblendMaskedInt16x32 OpblendMaskedInt32x16 OpblendMaskedInt64x8 + OpmoveMaskedFloat32x16 + OpmoveMaskedFloat64x8 + OpmoveMaskedInt8x64 + OpmoveMaskedInt16x32 + OpmoveMaskedInt32x16 + OpmoveMaskedInt64x8 + OpmoveMaskedUint8x64 + OpmoveMaskedUint16x32 + OpmoveMaskedUint32x16 + OpmoveMaskedUint64x8 OpCeilScaledFloat32x4 OpCeilScaledFloat32x8 OpCeilScaledFloat32x16 @@ -20776,6 +20792,90 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVDQU8Masked512", + argLen: 2, + asm: x86.AVMOVDQU8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQU16Masked512", + argLen: 2, + asm: x86.AVMOVDQU16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQU32Masked512", + argLen: 2, + asm: x86.AVMOVDQU32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQU64Masked512", + argLen: 2, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVUPDMasked512", + argLen: 2, + asm: x86.AVMOVUPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVUPSMasked512", + argLen: 2, + asm: x86.AVMOVUPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VMULPD128", argLen: 2, @@ -67992,6 +68092,56 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "moveMaskedFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedInt8x64", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedInt32x16", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedInt64x8", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedUint8x64", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedUint16x32", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedUint32x16", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedUint64x8", + argLen: 2, + generic: true, + }, { name: "CeilScaledFloat32x4", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 2b2df15bc1..78c1ddd9dc 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -507,6 +507,8 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64TESTW(v) case OpAMD64TESTWconst: return rewriteValueAMD64_OpAMD64TESTWconst(v) + case OpAMD64VPANDQ512: + return rewriteValueAMD64_OpAMD64VPANDQ512(v) case OpAMD64VPMOVVec16x16ToM: return rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v) case OpAMD64VPMOVVec16x32ToM: @@ -4255,6 +4257,26 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpblendMaskedInt64x8(v) case OpblendMaskedInt8x64: return rewriteValueAMD64_OpblendMaskedInt8x64(v) + case OpmoveMaskedFloat32x16: + return rewriteValueAMD64_OpmoveMaskedFloat32x16(v) + case OpmoveMaskedFloat64x8: + return rewriteValueAMD64_OpmoveMaskedFloat64x8(v) + case OpmoveMaskedInt16x32: + return rewriteValueAMD64_OpmoveMaskedInt16x32(v) + case OpmoveMaskedInt32x16: + return rewriteValueAMD64_OpmoveMaskedInt32x16(v) + case OpmoveMaskedInt64x8: + return rewriteValueAMD64_OpmoveMaskedInt64x8(v) + case OpmoveMaskedInt8x64: + return rewriteValueAMD64_OpmoveMaskedInt8x64(v) + case OpmoveMaskedUint16x32: + return rewriteValueAMD64_OpmoveMaskedUint16x32(v) + case OpmoveMaskedUint32x16: + return rewriteValueAMD64_OpmoveMaskedUint32x16(v) + case OpmoveMaskedUint64x8: + return rewriteValueAMD64_OpmoveMaskedUint64x8(v) + case OpmoveMaskedUint8x64: + return rewriteValueAMD64_OpmoveMaskedUint8x64(v) } return false } @@ -25949,6 +25971,71 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPANDQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDQ512 x (VPMOVMToVec64x8 k)) + // result: (VMOVDQU64Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec64x8 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU64Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDQ512 x (VPMOVMToVec32x16 k)) + // result: (VMOVDQU32Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec32x16 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU32Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDQ512 x (VPMOVMToVec16x32 k)) + // result: (VMOVDQU16Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec16x32 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU16Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDQ512 x (VPMOVMToVec8x64 k)) + // result: (VMOVDQU8Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec8x64 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU8Masked512) + v.AddArg2(x, k) + return true + } + break + } + return false +} func rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v *Value) bool { v_0 := v.Args[0] // match: (VPMOVVec16x16ToM (VPMOVMToVec16x16 x)) @@ -39220,6 +39307,166 @@ func rewriteValueAMD64_OpblendMaskedInt8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpmoveMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedFloat32x16 x mask) + // result: (VMOVUPSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVUPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedFloat64x8 x mask) + // result: (VMOVUPDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVUPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedInt16x32 x mask) + // result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU16Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedInt32x16 x mask) + // result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU32Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedInt64x8 x mask) + // result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU64Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedInt8x64 x mask) + // result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU8Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedUint16x32 x mask) + // result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU16Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedUint32x16 x mask) + // result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU32Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedUint64x8 x mask) + // result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU64Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedUint8x64 x mask) + // result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU8Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteBlockAMD64(b *Block) bool { typ := &b.Func.Config.Types switch b.Kind { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a519b7d5b3..0fd330779e 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1070,6 +1070,16 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x32.blendMasked", opLen3(ssa.OpblendMaskedInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.blendMasked", opLen3(ssa.OpblendMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.blendMasked", opLen3(ssa.OpblendMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.moveMasked", opLen2(ssa.OpmoveMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.moveMasked", opLen2(ssa.OpmoveMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.moveMasked", opLen2(ssa.OpmoveMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.moveMasked", opLen2(ssa.OpmoveMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.moveMasked", opLen2(ssa.OpmoveMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.moveMasked", opLen2(ssa.OpmoveMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.moveMasked", opLen2(ssa.OpmoveMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.moveMasked", opLen2(ssa.OpmoveMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.moveMasked", opLen2(ssa.OpmoveMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.moveMasked", opLen2(ssa.OpmoveMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdssa.go b/src/simd/_gen/simdgen/gen_simdssa.go index 5a5421a815..67a029fa45 100644 --- a/src/simd/_gen/simdgen/gen_simdssa.go +++ b/src/simd/_gen/simdgen/gen_simdssa.go @@ -98,7 +98,7 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { seen[asm] = struct{}{} caseStr := fmt.Sprintf("ssa.OpAMD64%s", asm) if shapeIn == OneKmaskIn || shapeIn == OneKmaskImmIn { - if gOp.Zeroing == nil { + if gOp.Zeroing == nil || *gOp.Zeroing { ZeroingMask = append(ZeroingMask, caseStr) } } diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index 4044addd8c..e438d7fa6e 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -129,7 +129,7 @@ func (o *Operation) VectorWidth() int { func machineOpName(maskType maskShape, gOp Operation) string { asm := gOp.Asm - if maskType == 2 { + if maskType == OneMask { asm += "Masked" } asm = fmt.Sprintf("%s%d", asm, gOp.VectorWidth()) diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index ef8e036050..438c1ef309 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -50,6 +50,11 @@ documentation: !string |- // NAME blends two vectors based on mask values, choosing either // the first or the second based on whether the third is false or true +- go: move + commutative: false + documentation: !string |- + // NAME blends a vector with zero, with the original value where the mask is true + // and zero where the mask is false. - go: Expand commutative: false documentation: !string |- diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index d4d1b4b9bd..2398e53415 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -284,6 +284,38 @@ out: - *v + # For AVX512 +- go: move + asm: VMOVDQU(8|16|32|64) + zeroing: true + in: + - &v + go: $t + bits: 512 + class: vreg + base: int|uint + inVariant: + - + class: mask + out: + - *v + + # For AVX512 +- go: move + asm: VMOVUP[SD] + zeroing: true + in: + - &v + go: $t + bits: 512 + class: vreg + base: float + inVariant: + - + class: mask + out: + - *v + - go: Expand asm: "VPEXPAND[BWDQ]|VEXPANDP[SD]" in: diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 79f5dc8523..019f9df1ed 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -6122,6 +6122,88 @@ func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 // Asm: VPBLENDMQ, CPU Feature: AVX512 func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 +/* moveMasked */ + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVUPS, CPU Feature: AVX512 +func (x Float32x16) moveMasked(mask Mask32x16) Float32x16 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVUPD, CPU Feature: AVX512 +func (x Float64x8) moveMasked(mask Mask64x8) Float64x8 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU8, CPU Feature: AVX512 +func (x Int8x64) moveMasked(mask Mask8x64) Int8x64 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU16, CPU Feature: AVX512 +func (x Int16x32) moveMasked(mask Mask16x32) Int16x32 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +func (x Int32x16) moveMasked(mask Mask32x16) Int32x16 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +func (x Int64x8) moveMasked(mask Mask64x8) Int64x8 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU8, CPU Feature: AVX512 +func (x Uint8x64) moveMasked(mask Mask8x64) Uint8x64 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU16, CPU Feature: AVX512 +func (x Uint16x32) moveMasked(mask Mask16x32) Uint16x32 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +func (x Uint32x16) moveMasked(mask Mask32x16) Uint32x16 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +func (x Uint64x8) moveMasked(mask Mask64x8) Uint64x8 + // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) -- 2.52.0