From 972732b245399097e1e59aa2e35c47ef5efbf394 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 4 Nov 2025 20:33:52 +0000 Subject: [PATCH] [dev.simd] simd, cmd/compile: remove move from API These should really be machine ops only. Change-Id: Idcc611719eff068153d88c5162dd2e0883e5e0ca Reviewed-on: https://go-review.googlesource.com/c/go/+/717821 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 24 ++ .../compile/internal/ssa/_gen/simdAMD64.rules | 10 - .../compile/internal/ssa/_gen/simdAMD64ops.go | 12 + .../internal/ssa/_gen/simdgenericOps.go | 10 - src/cmd/compile/internal/ssa/opGen.go | 240 +++++++++++++----- src/cmd/compile/internal/ssa/rewriteAMD64.go | 180 ------------- .../compile/internal/ssagen/simdintrinsics.go | 10 - src/simd/_gen/simdgen/gen_simdTypes.go | 6 +- .../_gen/simdgen/ops/Moves/categories.yaml | 5 +- src/simd/_gen/simdgen/ops/Moves/go.yaml | 2 - src/simd/ops_amd64.go | 12 - src/simd/ops_internal_amd64.go | 88 +------ 12 files changed, 232 insertions(+), 367 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index d365ce8afe..9a265e127f 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -914,11 +914,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VMOVUPSMasked128, + ssa.OpAMD64VMOVUPSMasked256, ssa.OpAMD64VMOVUPSMasked512, + ssa.OpAMD64VMOVUPDMasked128, + ssa.OpAMD64VMOVUPDMasked256, ssa.OpAMD64VMOVUPDMasked512, + ssa.OpAMD64VMOVDQU8Masked128, + ssa.OpAMD64VMOVDQU8Masked256, ssa.OpAMD64VMOVDQU8Masked512, + ssa.OpAMD64VMOVDQU16Masked128, + ssa.OpAMD64VMOVDQU16Masked256, ssa.OpAMD64VMOVDQU16Masked512, + ssa.OpAMD64VMOVDQU32Masked128, + ssa.OpAMD64VMOVDQU32Masked256, ssa.OpAMD64VMOVDQU32Masked512, + ssa.OpAMD64VMOVDQU64Masked128, + ssa.OpAMD64VMOVDQU64Masked256, ssa.OpAMD64VMOVDQU64Masked512: p = simdVkv(s, v) @@ -2541,11 +2553,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMOVUPSMasked128, + ssa.OpAMD64VMOVUPSMasked256, ssa.OpAMD64VMOVUPSMasked512, + ssa.OpAMD64VMOVUPDMasked128, + ssa.OpAMD64VMOVUPDMasked256, ssa.OpAMD64VMOVUPDMasked512, + ssa.OpAMD64VMOVDQU8Masked128, + ssa.OpAMD64VMOVDQU8Masked256, ssa.OpAMD64VMOVDQU8Masked512, + ssa.OpAMD64VMOVDQU16Masked128, + ssa.OpAMD64VMOVDQU16Masked256, ssa.OpAMD64VMOVDQU16Masked512, + ssa.OpAMD64VMOVDQU32Masked128, + ssa.OpAMD64VMOVDQU32Masked256, ssa.OpAMD64VMOVDQU32Masked512, + ssa.OpAMD64VMOVDQU64Masked128, + ssa.OpAMD64VMOVDQU64Masked256, ssa.OpAMD64VMOVDQU64Masked512, ssa.OpAMD64VPSLLWMasked128const, ssa.OpAMD64VPSLLWMasked256const, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 1fc569017b..06e1020ec4 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1324,16 +1324,6 @@ (concatSelectedConstantGroupedUint32x16 ...) => (VSHUFPS512 ...) (concatSelectedConstantGroupedUint64x4 ...) => (VSHUFPD256 ...) (concatSelectedConstantGroupedUint64x8 ...) => (VSHUFPD512 ...) -(moveMaskedFloat32x16 x mask) => (VMOVUPSMasked512 x (VPMOVVec32x16ToM mask)) -(moveMaskedFloat64x8 x mask) => (VMOVUPDMasked512 x (VPMOVVec64x8ToM mask)) -(moveMaskedInt8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) -(moveMaskedInt16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) -(moveMaskedInt32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) -(moveMaskedInt64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) -(moveMaskedUint8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) -(moveMaskedUint16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) -(moveMaskedUint32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) -(moveMaskedUint64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) (ternInt32x4 ...) => (VPTERNLOGD128 ...) (ternInt32x8 ...) => (VPTERNLOGD256 ...) (ternInt32x16 ...) => (VPTERNLOGD512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 70558de0f3..f867c6e315 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -155,11 +155,23 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU8Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVDQU8Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVDQU8Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU16Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVDQU16Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVDQU16Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU32Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVDQU32Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVDQU32Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU64Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVDQU64Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVDQU64Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVUPDMasked128", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVUPDMasked256", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVUPDMasked512", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVUPSMasked128", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVUPSMasked256", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVUPSMasked512", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 53b3984351..71a4cb3ea8 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1109,16 +1109,6 @@ func simdGenericOps() []opData { {name: "blendMaskedInt16x32", argLength: 3, commutative: false}, {name: "blendMaskedInt32x16", argLength: 3, commutative: false}, {name: "blendMaskedInt64x8", argLength: 3, commutative: false}, - {name: "moveMaskedFloat32x16", argLength: 2, commutative: false}, - {name: "moveMaskedFloat64x8", argLength: 2, commutative: false}, - {name: "moveMaskedInt8x64", argLength: 2, commutative: false}, - {name: "moveMaskedInt16x32", argLength: 2, commutative: false}, - {name: "moveMaskedInt32x16", argLength: 2, commutative: false}, - {name: "moveMaskedInt64x8", argLength: 2, commutative: false}, - {name: "moveMaskedUint8x64", argLength: 2, commutative: false}, - {name: "moveMaskedUint16x32", argLength: 2, commutative: false}, - {name: "moveMaskedUint32x16", argLength: 2, commutative: false}, - {name: "moveMaskedUint64x8", argLength: 2, commutative: false}, {name: "AESRoundKeyGenAssistUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 11f53f5a56..68bfe68eb4 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1395,11 +1395,23 @@ const ( OpAMD64VMINPSMasked128 OpAMD64VMINPSMasked256 OpAMD64VMINPSMasked512 + OpAMD64VMOVDQU8Masked128 + OpAMD64VMOVDQU8Masked256 OpAMD64VMOVDQU8Masked512 + OpAMD64VMOVDQU16Masked128 + OpAMD64VMOVDQU16Masked256 OpAMD64VMOVDQU16Masked512 + OpAMD64VMOVDQU32Masked128 + OpAMD64VMOVDQU32Masked256 OpAMD64VMOVDQU32Masked512 + OpAMD64VMOVDQU64Masked128 + OpAMD64VMOVDQU64Masked256 OpAMD64VMOVDQU64Masked512 + OpAMD64VMOVUPDMasked128 + OpAMD64VMOVUPDMasked256 OpAMD64VMOVUPDMasked512 + OpAMD64VMOVUPSMasked128 + OpAMD64VMOVUPSMasked256 OpAMD64VMOVUPSMasked512 OpAMD64VMULPD128 OpAMD64VMULPD256 @@ -6508,16 +6520,6 @@ const ( OpblendMaskedInt16x32 OpblendMaskedInt32x16 OpblendMaskedInt64x8 - OpmoveMaskedFloat32x16 - OpmoveMaskedFloat64x8 - OpmoveMaskedInt8x64 - OpmoveMaskedInt16x32 - OpmoveMaskedInt32x16 - OpmoveMaskedInt64x8 - OpmoveMaskedUint8x64 - OpmoveMaskedUint16x32 - OpmoveMaskedUint32x16 - OpmoveMaskedUint64x8 OpAESRoundKeyGenAssistUint32x4 OpCeilScaledFloat32x4 OpCeilScaledFloat32x8 @@ -22218,6 +22220,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVDQU8Masked128", + argLen: 2, + asm: x86.AVMOVDQU8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVDQU8Masked256", + argLen: 2, + asm: x86.AVMOVDQU8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVDQU8Masked512", argLen: 2, @@ -22232,6 +22262,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVDQU16Masked128", + argLen: 2, + asm: x86.AVMOVDQU16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVDQU16Masked256", + argLen: 2, + asm: x86.AVMOVDQU16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVDQU16Masked512", argLen: 2, @@ -22246,6 +22304,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVDQU32Masked128", + argLen: 2, + asm: x86.AVMOVDQU32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVDQU32Masked256", + argLen: 2, + asm: x86.AVMOVDQU32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVDQU32Masked512", argLen: 2, @@ -22260,6 +22346,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVDQU64Masked128", + argLen: 2, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVDQU64Masked256", + argLen: 2, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVDQU64Masked512", argLen: 2, @@ -22274,6 +22388,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVUPDMasked128", + argLen: 2, + asm: x86.AVMOVUPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVUPDMasked256", + argLen: 2, + asm: x86.AVMOVUPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVUPDMasked512", argLen: 2, @@ -22288,6 +22430,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVUPSMasked128", + argLen: 2, + asm: x86.AVMOVUPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVUPSMasked256", + argLen: 2, + asm: x86.AVMOVUPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVUPSMasked512", argLen: 2, @@ -82110,56 +82280,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "moveMaskedFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedFloat64x8", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedInt8x64", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedInt16x32", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedInt32x16", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedInt64x8", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedUint8x64", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedUint16x32", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedUint32x16", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedUint64x8", - argLen: 2, - generic: true, - }, { name: "AESRoundKeyGenAssistUint32x4", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 83f8e0dc2e..610086b88f 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -6095,26 +6095,6 @@ func rewriteValueAMD64(v *Value) bool { case OpconcatSelectedConstantUint64x2: v.Op = OpAMD64VSHUFPD128 return true - case OpmoveMaskedFloat32x16: - return rewriteValueAMD64_OpmoveMaskedFloat32x16(v) - case OpmoveMaskedFloat64x8: - return rewriteValueAMD64_OpmoveMaskedFloat64x8(v) - case OpmoveMaskedInt16x32: - return rewriteValueAMD64_OpmoveMaskedInt16x32(v) - case OpmoveMaskedInt32x16: - return rewriteValueAMD64_OpmoveMaskedInt32x16(v) - case OpmoveMaskedInt64x8: - return rewriteValueAMD64_OpmoveMaskedInt64x8(v) - case OpmoveMaskedInt8x64: - return rewriteValueAMD64_OpmoveMaskedInt8x64(v) - case OpmoveMaskedUint16x32: - return rewriteValueAMD64_OpmoveMaskedUint16x32(v) - case OpmoveMaskedUint32x16: - return rewriteValueAMD64_OpmoveMaskedUint32x16(v) - case OpmoveMaskedUint64x8: - return rewriteValueAMD64_OpmoveMaskedUint64x8(v) - case OpmoveMaskedUint8x64: - return rewriteValueAMD64_OpmoveMaskedUint8x64(v) case OpternInt32x16: v.Op = OpAMD64VPTERNLOGD512 return true @@ -60638,166 +60618,6 @@ func rewriteValueAMD64_OpblendMaskedInt8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpmoveMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedFloat32x16 x mask) - // result: (VMOVUPSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVUPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedFloat64x8 x mask) - // result: (VMOVUPDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVUPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedInt16x32 x mask) - // result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU16Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedInt32x16 x mask) - // result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU32Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedInt64x8 x mask) - // result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU64Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedInt8x64 x mask) - // result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU8Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedUint16x32 x mask) - // result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU16Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedUint32x16 x mask) - // result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU32Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedUint64x8 x mask) - // result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU64Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedUint8x64 x mask) - // result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU8Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteBlockAMD64(b *Block) bool { typ := &b.Func.Config.Types switch b.Kind { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 6e02860916..710d375ad5 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1300,16 +1300,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x16.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint32x16, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint64x4.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Float32x16.moveMasked", opLen2(ssa.OpmoveMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.moveMasked", opLen2(ssa.OpmoveMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.moveMasked", opLen2(ssa.OpmoveMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.moveMasked", opLen2(ssa.OpmoveMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.moveMasked", opLen2(ssa.OpmoveMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.moveMasked", opLen2(ssa.OpmoveMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.moveMasked", opLen2(ssa.OpmoveMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.moveMasked", opLen2(ssa.OpmoveMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.moveMasked", opLen2(ssa.OpmoveMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.moveMasked", opLen2(ssa.OpmoveMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.tern", opLen3Imm8(ssa.OpternInt32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x8.tern", opLen3Imm8(ssa.OpternInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x16.tern", opLen3Imm8(ssa.OpternInt32x16, types.TypeVec512, 0), sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index 7765327b32..efa3ffabeb 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -613,7 +613,11 @@ func writeSIMDStubs(ops []Operation, typeMap simdTypeMap) (f, fI *bytes.Buffer) } } if i == 0 || op.Go != ops[i-1].Go { - fmt.Fprintf(f, "\n/* %s */\n", op.Go) + if unicode.IsUpper([]rune(op.Go)[0]) { + fmt.Fprintf(f, "\n/* %s */\n", op.Go) + } else { + fmt.Fprintf(fI, "\n/* %s */\n", op.Go) + } } if unicode.IsUpper([]rune(op.Go)[0]) { if err := t.ExecuteTemplate(f, s, op); err != nil { diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index 49006f8801..b1283f4b6b 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -52,9 +52,8 @@ // the first or the second based on whether the third is false or true - go: move commutative: false - documentation: !string |- - // NAME blends a vector with zero, with the original value where the mask is true - // and zero where the mask is false. + noTypes: "true" + noGenericOps: "true" - go: Expand commutative: false documentation: !string |- diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 495b9ed6fa..08e857c8ea 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -291,7 +291,6 @@ in: - &v go: $t - bits: 512 class: vreg base: int|uint inVariant: @@ -307,7 +306,6 @@ in: - &v go: $t - bits: 512 class: vreg base: float inVariant: diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index ace2f7aec8..0f21c8594c 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -7606,18 +7606,6 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4 // Asm: VPXORQ, CPU Feature: AVX512 func (x Uint64x8) Xor(y Uint64x8) Uint64x8 -/* blend */ - -/* blendMasked */ - -/* concatSelectedConstant */ - -/* concatSelectedConstantGrouped */ - -/* moveMasked */ - -/* tern */ - // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) diff --git a/src/simd/ops_internal_amd64.go b/src/simd/ops_internal_amd64.go index cb18c90e29..8be40995f0 100644 --- a/src/simd/ops_internal_amd64.go +++ b/src/simd/ops_internal_amd64.go @@ -4,6 +4,8 @@ package simd +/* blend */ + // blend blends two vectors based on mask values, choosing either // the first or the second based on whether the third is false or true // @@ -16,6 +18,8 @@ func (x Int8x16) blend(y Int8x16, mask Int8x16) Int8x16 // Asm: VPBLENDVB, CPU Feature: AVX2 func (x Int8x32) blend(y Int8x32, mask Int8x32) Int8x32 +/* blendMasked */ + // blendMasked blends two vectors based on mask values, choosing either // the first or the second based on whether the third is false or true // @@ -48,6 +52,8 @@ func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 // Asm: VPBLENDMQ, CPU Feature: AVX512 func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 +/* concatSelectedConstant */ + // concatSelectedConstant concatenates selected elements from x and y into the lower and upper // halves of the output. The selection is chosen by the constant parameter h1h0l1l0 // where each {h,l}{1,0} is two bits specify which element from y or x to select. @@ -117,6 +123,8 @@ func (x Uint32x4) concatSelectedConstant(h1h0l1l0 uint8, y Uint32x4) Uint32x4 // Asm: VSHUFPD, CPU Feature: AVX func (x Uint64x2) concatSelectedConstant(hilo uint8, y Uint64x2) Uint64x2 +/* concatSelectedConstantGrouped */ + // concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y // into the lower and upper halves of corresponding subvectors of the output. // The selection is chosen by the constant parameter h1h0l1l0 @@ -330,85 +338,7 @@ func (x Uint64x4) concatSelectedConstantGrouped(hilos uint8, y Uint64x4) Uint64x // Asm: VSHUFPD, CPU Feature: AVX512 func (x Uint64x8) concatSelectedConstantGrouped(hilos uint8, y Uint64x8) Uint64x8 -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVUPS, CPU Feature: AVX512 -func (x Float32x16) moveMasked(mask Mask32x16) Float32x16 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVUPD, CPU Feature: AVX512 -func (x Float64x8) moveMasked(mask Mask64x8) Float64x8 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU8, CPU Feature: AVX512 -func (x Int8x64) moveMasked(mask Mask8x64) Int8x64 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU16, CPU Feature: AVX512 -func (x Int16x32) moveMasked(mask Mask16x32) Int16x32 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU32, CPU Feature: AVX512 -func (x Int32x16) moveMasked(mask Mask32x16) Int32x16 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU64, CPU Feature: AVX512 -func (x Int64x8) moveMasked(mask Mask64x8) Int64x8 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU8, CPU Feature: AVX512 -func (x Uint8x64) moveMasked(mask Mask8x64) Uint8x64 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU16, CPU Feature: AVX512 -func (x Uint16x32) moveMasked(mask Mask16x32) Uint16x32 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU32, CPU Feature: AVX512 -func (x Uint32x16) moveMasked(mask Mask32x16) Uint32x16 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU64, CPU Feature: AVX512 -func (x Uint64x8) moveMasked(mask Mask64x8) Uint64x8 +/* tern */ // tern performs a logical operation on three vectors based on the 8-bit truth table. // Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -- 2.52.0