From: Junyang Shao Date: Thu, 26 Jun 2025 04:07:48 +0000 (+0000) Subject: [dev.simd] cmd/compile, simd: add galois field operations X-Git-Tag: go1.26rc1~147^2~212 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=10c9621936;p=gostls13.git [dev.simd] cmd/compile, simd: add galois field operations This CL is generated by CL 684275. Change-Id: Ie1efd0979af0ef0a56781bf9013071bf4d2c52c5 Reviewed-on: https://go-review.googlesource.com/c/go/+/684175 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 6c1d365bfa..999f3c200c 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -118,6 +118,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPEQD256, ssa.OpAMD64VPCMPEQQ128, ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VGF2P8MULB128, + ssa.OpAMD64VGF2P8MULB256, + ssa.OpAMD64VGF2P8MULB512, ssa.OpAMD64VPCMPGTB128, ssa.OpAMD64VPCMPGTB256, ssa.OpAMD64VPCMPGTW128, @@ -395,6 +398,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VGF2P8MULBMasked128, + ssa.OpAMD64VGF2P8MULBMasked256, + ssa.OpAMD64VGF2P8MULBMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, ssa.OpAMD64VMAXPSMasked512, @@ -694,6 +700,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VGF2P8AFFINEQB128, + ssa.OpAMD64VGF2P8AFFINEQB256, + ssa.OpAMD64VGF2P8AFFINEQB512, + ssa.OpAMD64VGF2P8AFFINEINVQB128, + ssa.OpAMD64VGF2P8AFFINEINVQB256, + ssa.OpAMD64VGF2P8AFFINEINVQB512, ssa.OpAMD64VPSHLDW128, ssa.OpAMD64VPSHLDW256, ssa.OpAMD64VPSHLDW512, @@ -920,7 +932,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXTRQ128: p = simdFpgpImm8(s, v) - case ssa.OpAMD64VPSHLDWMasked128, + case ssa.OpAMD64VGF2P8AFFINEQBMasked128, + ssa.OpAMD64VGF2P8AFFINEQBMasked256, + ssa.OpAMD64VGF2P8AFFINEQBMasked512, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, + ssa.OpAMD64VPSHLDWMasked128, ssa.OpAMD64VPSHLDWMasked256, ssa.OpAMD64VPSHLDWMasked512, ssa.OpAMD64VPSHLDDMasked128, @@ -1055,6 +1073,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, + ssa.OpAMD64VGF2P8AFFINEQBMasked128, + ssa.OpAMD64VGF2P8AFFINEQBMasked256, + ssa.OpAMD64VGF2P8AFFINEQBMasked512, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, + ssa.OpAMD64VGF2P8MULBMasked128, + ssa.OpAMD64VGF2P8MULBMasked256, + ssa.OpAMD64VGF2P8MULBMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, ssa.OpAMD64VMAXPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 968ded2131..6a4ded0ec4 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -251,6 +251,15 @@ (FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) (FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) (FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) +(GaloisFieldAffineTransformUint8x16 [a] x y) => (VGF2P8AFFINEQB128 [a] x y) +(GaloisFieldAffineTransformUint8x32 [a] x y) => (VGF2P8AFFINEQB256 [a] x y) +(GaloisFieldAffineTransformUint8x64 [a] x y) => (VGF2P8AFFINEQB512 [a] x y) +(GaloisFieldAffineTransformInversedUint8x16 [a] x y) => (VGF2P8AFFINEINVQB128 [a] x y) +(GaloisFieldAffineTransformInversedUint8x32 [a] x y) => (VGF2P8AFFINEINVQB256 [a] x y) +(GaloisFieldAffineTransformInversedUint8x64 [a] x y) => (VGF2P8AFFINEINVQB512 [a] x y) +(GaloisFieldMulUint8x16 ...) => (VGF2P8MULB128 ...) +(GaloisFieldMulUint8x32 ...) => (VGF2P8MULB256 ...) +(GaloisFieldMulUint8x64 ...) => (VGF2P8MULB512 ...) (GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) (GetElemInt16x8 [a] x) => (VPEXTRW128 [a] x) (GetElemInt32x4 [a] x) => (VPEXTRD128 [a] x) @@ -607,6 +616,15 @@ (MaskedFusedMultiplySubAddFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) (MaskedFusedMultiplySubAddFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) (MaskedFusedMultiplySubAddFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedGaloisFieldAffineTransformUint8x16 [a] x y mask) => (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(MaskedGaloisFieldAffineTransformUint8x32 [a] x y mask) => (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(MaskedGaloisFieldAffineTransformUint8x64 [a] x y mask) => (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) +(MaskedGaloisFieldAffineTransformInversedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(MaskedGaloisFieldAffineTransformInversedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(MaskedGaloisFieldAffineTransformInversedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) +(MaskedGaloisFieldMulUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedGaloisFieldMulUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedGaloisFieldMulUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) (MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) (MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index cbddbe0ff6..5e627e696e 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -719,7 +719,9 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULB128", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGBMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -727,7 +729,9 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULB256", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGBMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -735,7 +739,9 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULB512", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGBMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -894,10 +900,22 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPCMPUQ512", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB128", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPUBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPUB256", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPUBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 0f3d3f8214..4907b78d12 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1365,6 +1365,7 @@ func simdGenericOps() []opData { {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AverageUint8x16", argLength: 2, commutative: true}, {name: "EqualUint8x16", argLength: 2, commutative: true}, + {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, {name: "GreaterUint8x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, {name: "LessUint8x16", argLength: 2, commutative: false}, @@ -1372,6 +1373,7 @@ func simdGenericOps() []opData { {name: "MaskedAddUint8x16", argLength: 3, commutative: true}, {name: "MaskedAverageUint8x16", argLength: 3, commutative: true}, {name: "MaskedEqualUint8x16", argLength: 3, commutative: true}, + {name: "MaskedGaloisFieldMulUint8x16", argLength: 3, commutative: false}, {name: "MaskedGreaterUint8x16", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint8x16", argLength: 3, commutative: false}, {name: "MaskedLessUint8x16", argLength: 3, commutative: false}, @@ -1399,6 +1401,7 @@ func simdGenericOps() []opData { {name: "AndNotUint8x32", argLength: 2, commutative: false}, {name: "AverageUint8x32", argLength: 2, commutative: true}, {name: "EqualUint8x32", argLength: 2, commutative: true}, + {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, {name: "GreaterUint8x32", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, {name: "LessUint8x32", argLength: 2, commutative: false}, @@ -1406,6 +1409,7 @@ func simdGenericOps() []opData { {name: "MaskedAddUint8x32", argLength: 3, commutative: true}, {name: "MaskedAverageUint8x32", argLength: 3, commutative: true}, {name: "MaskedEqualUint8x32", argLength: 3, commutative: true}, + {name: "MaskedGaloisFieldMulUint8x32", argLength: 3, commutative: false}, {name: "MaskedGreaterUint8x32", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint8x32", argLength: 3, commutative: false}, {name: "MaskedLessUint8x32", argLength: 3, commutative: false}, @@ -1431,6 +1435,7 @@ func simdGenericOps() []opData { {name: "AddUint8x64", argLength: 2, commutative: true}, {name: "AverageUint8x64", argLength: 2, commutative: true}, {name: "EqualUint8x64", argLength: 2, commutative: true}, + {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, {name: "GreaterUint8x64", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, {name: "LessUint8x64", argLength: 2, commutative: false}, @@ -1438,6 +1443,7 @@ func simdGenericOps() []opData { {name: "MaskedAddUint8x64", argLength: 3, commutative: true}, {name: "MaskedAverageUint8x64", argLength: 3, commutative: true}, {name: "MaskedEqualUint8x64", argLength: 3, commutative: true}, + {name: "MaskedGaloisFieldMulUint8x64", argLength: 3, commutative: false}, {name: "MaskedGreaterUint8x64", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint8x64", argLength: 3, commutative: false}, {name: "MaskedLessUint8x64", argLength: 3, commutative: false}, @@ -1784,7 +1790,19 @@ func simdGenericOps() []opData { {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformUint8x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformInversedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformInversedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformInversedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 2bdbd5156e..906bd74cdc 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1912,7 +1912,9 @@ const ( OpAMD64VPMINUQ512 OpAMD64VPMULUDQ512 OpAMD64VPAVGB128 + OpAMD64VGF2P8MULB128 OpAMD64VPAVGBMasked128 + OpAMD64VGF2P8MULBMasked128 OpAMD64VPMAXUBMasked128 OpAMD64VPMINUBMasked128 OpAMD64VPMADDUBSWMasked128 @@ -1920,7 +1922,9 @@ const ( OpAMD64VPMINUB128 OpAMD64VPMADDUBSW128 OpAMD64VPAVGB256 + OpAMD64VGF2P8MULB256 OpAMD64VPAVGBMasked256 + OpAMD64VGF2P8MULBMasked256 OpAMD64VPMAXUBMasked256 OpAMD64VPMINUBMasked256 OpAMD64VPMADDUBSWMasked256 @@ -1928,7 +1932,9 @@ const ( OpAMD64VPMINUB256 OpAMD64VPMADDUBSW256 OpAMD64VPAVGB512 + OpAMD64VGF2P8MULB512 OpAMD64VPAVGBMasked512 + OpAMD64VGF2P8MULBMasked512 OpAMD64VPMAXUBMasked512 OpAMD64VPMINUBMasked512 OpAMD64VPMADDUBSWMasked512 @@ -2087,11 +2093,23 @@ const ( OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 OpAMD64VPCMPUB128 + OpAMD64VGF2P8AFFINEQB128 + OpAMD64VGF2P8AFFINEINVQB128 OpAMD64VPCMPUBMasked128 + OpAMD64VGF2P8AFFINEQBMasked128 + OpAMD64VGF2P8AFFINEINVQBMasked128 OpAMD64VPCMPUB256 + OpAMD64VGF2P8AFFINEQB256 + OpAMD64VGF2P8AFFINEINVQB256 OpAMD64VPCMPUBMasked256 + OpAMD64VGF2P8AFFINEQBMasked256 + OpAMD64VGF2P8AFFINEINVQBMasked256 OpAMD64VPCMPUB512 + OpAMD64VGF2P8AFFINEQB512 + OpAMD64VGF2P8AFFINEINVQB512 OpAMD64VPCMPUBMasked512 + OpAMD64VGF2P8AFFINEQBMasked512 + OpAMD64VGF2P8AFFINEINVQBMasked512 OpARMADD OpARMADDconst @@ -5680,6 +5698,7 @@ const ( OpAndNotUint8x16 OpAverageUint8x16 OpEqualUint8x16 + OpGaloisFieldMulUint8x16 OpGreaterUint8x16 OpGreaterEqualUint8x16 OpLessUint8x16 @@ -5687,6 +5706,7 @@ const ( OpMaskedAddUint8x16 OpMaskedAverageUint8x16 OpMaskedEqualUint8x16 + OpMaskedGaloisFieldMulUint8x16 OpMaskedGreaterUint8x16 OpMaskedGreaterEqualUint8x16 OpMaskedLessUint8x16 @@ -5714,6 +5734,7 @@ const ( OpAndNotUint8x32 OpAverageUint8x32 OpEqualUint8x32 + OpGaloisFieldMulUint8x32 OpGreaterUint8x32 OpGreaterEqualUint8x32 OpLessUint8x32 @@ -5721,6 +5742,7 @@ const ( OpMaskedAddUint8x32 OpMaskedAverageUint8x32 OpMaskedEqualUint8x32 + OpMaskedGaloisFieldMulUint8x32 OpMaskedGreaterUint8x32 OpMaskedGreaterEqualUint8x32 OpMaskedLessUint8x32 @@ -5746,6 +5768,7 @@ const ( OpAddUint8x64 OpAverageUint8x64 OpEqualUint8x64 + OpGaloisFieldMulUint8x64 OpGreaterUint8x64 OpGreaterEqualUint8x64 OpLessUint8x64 @@ -5753,6 +5776,7 @@ const ( OpMaskedAddUint8x64 OpMaskedAverageUint8x64 OpMaskedEqualUint8x64 + OpMaskedGaloisFieldMulUint8x64 OpMaskedGreaterUint8x64 OpMaskedGreaterEqualUint8x64 OpMaskedLessUint8x64 @@ -6099,8 +6123,20 @@ const ( OpRotateAllRightUint64x8 OpShiftAllLeftAndFillUpperFromUint64x8 OpShiftAllRightAndFillUpperFromUint64x8 + OpGaloisFieldAffineTransformUint8x16 + OpGaloisFieldAffineTransformInversedUint8x16 OpGetElemUint8x16 + OpMaskedGaloisFieldAffineTransformUint8x16 + OpMaskedGaloisFieldAffineTransformInversedUint8x16 OpSetElemUint8x16 + OpGaloisFieldAffineTransformUint8x32 + OpGaloisFieldAffineTransformInversedUint8x32 + OpMaskedGaloisFieldAffineTransformUint8x32 + OpMaskedGaloisFieldAffineTransformInversedUint8x32 + OpGaloisFieldAffineTransformUint8x64 + OpGaloisFieldAffineTransformInversedUint8x64 + OpMaskedGaloisFieldAffineTransformUint8x64 + OpMaskedGaloisFieldAffineTransformInversedUint8x64 ) var opcodeTable = [...]opInfo{ @@ -29452,6 +29488,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULB128", + argLen: 2, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGBMasked128", argLen: 3, @@ -29468,6 +29518,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULBMasked128", + argLen: 3, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUBMasked128", argLen: 3, @@ -29574,6 +29639,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULB256", + argLen: 2, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGBMasked256", argLen: 3, @@ -29590,6 +29669,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULBMasked256", + argLen: 3, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUBMasked256", argLen: 3, @@ -29696,6 +29790,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULB512", + argLen: 2, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGBMasked512", argLen: 3, @@ -29712,6 +29820,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULBMasked512", + argLen: 3, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUBMasked512", argLen: 3, @@ -32144,6 +32267,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUBMasked128", auxType: auxInt8, @@ -32161,6 +32314,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUB256", auxType: auxInt8, @@ -32177,6 +32362,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUBMasked256", auxType: auxInt8, @@ -32194,6 +32409,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUB512", auxType: auxInt8, @@ -32210,6 +32457,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUBMasked512", auxType: auxInt8, @@ -32227,6 +32504,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQBMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "ADD", @@ -66684,6 +66993,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "GaloisFieldMulUint8x16", + argLen: 2, + generic: true, + }, { name: "GreaterUint8x16", argLen: 2, @@ -66722,6 +67036,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedGaloisFieldMulUint8x16", + argLen: 3, + generic: true, + }, { name: "MaskedGreaterUint8x16", argLen: 3, @@ -66871,6 +67190,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "GaloisFieldMulUint8x32", + argLen: 2, + generic: true, + }, { name: "GreaterUint8x32", argLen: 2, @@ -66909,6 +67233,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedGaloisFieldMulUint8x32", + argLen: 3, + generic: true, + }, { name: "MaskedGreaterUint8x32", argLen: 3, @@ -67047,6 +67376,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "GaloisFieldMulUint8x64", + argLen: 2, + generic: true, + }, { name: "GreaterUint8x64", argLen: 2, @@ -67085,6 +67419,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedGaloisFieldMulUint8x64", + argLen: 3, + generic: true, + }, { name: "MaskedGreaterUint8x64", argLen: 3, @@ -69149,18 +69488,90 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "GaloisFieldAffineTransformUint8x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "GaloisFieldAffineTransformInversedUint8x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "GetElemUint8x16", auxType: auxInt8, argLen: 1, generic: true, }, + { + name: "MaskedGaloisFieldAffineTransformUint8x16", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformInversedUint8x16", + auxType: auxInt8, + argLen: 3, + generic: true, + }, { name: "SetElemUint8x16", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GaloisFieldAffineTransformUint8x32", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "GaloisFieldAffineTransformInversedUint8x32", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformUint8x32", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformInversedUint8x32", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "GaloisFieldAffineTransformUint8x64", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "GaloisFieldAffineTransformInversedUint8x64", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformUint8x64", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformInversedUint8x64", + auxType: auxInt8, + argLen: 3, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d7aa0339e7..22085dc80e 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1439,6 +1439,27 @@ func rewriteValueAMD64(v *Value) bool { case OpFusedMultiplySubAddFloat64x8: v.Op = OpAMD64VFMSUBADD213PD512 return true + case OpGaloisFieldAffineTransformInversedUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v) + case OpGaloisFieldAffineTransformInversedUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v) + case OpGaloisFieldAffineTransformInversedUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v) + case OpGaloisFieldAffineTransformUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v) + case OpGaloisFieldAffineTransformUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v) + case OpGaloisFieldAffineTransformUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v) + case OpGaloisFieldMulUint8x16: + v.Op = OpAMD64VGF2P8MULB128 + return true + case OpGaloisFieldMulUint8x32: + v.Op = OpAMD64VGF2P8MULB256 + return true + case OpGaloisFieldMulUint8x64: + v.Op = OpAMD64VGF2P8MULB512 + return true case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -2268,6 +2289,24 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v) case OpMaskedFusedMultiplySubAddFloat64x8: return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v) + case OpMaskedGaloisFieldAffineTransformInversedUint8x16: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x16(v) + case OpMaskedGaloisFieldAffineTransformInversedUint8x32: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x32(v) + case OpMaskedGaloisFieldAffineTransformInversedUint8x64: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x64(v) + case OpMaskedGaloisFieldAffineTransformUint8x16: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x16(v) + case OpMaskedGaloisFieldAffineTransformUint8x32: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x32(v) + case OpMaskedGaloisFieldAffineTransformUint8x64: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x64(v) + case OpMaskedGaloisFieldMulUint8x16: + return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x16(v) + case OpMaskedGaloisFieldMulUint8x32: + return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x32(v) + case OpMaskedGaloisFieldMulUint8x64: + return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x64(v) case OpMaskedGreaterEqualFloat32x16: return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) case OpMaskedGreaterEqualFloat32x4: @@ -31510,6 +31549,96 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x16 [a] x y) + // result: (VGF2P8AFFINEINVQB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x32 [a] x y) + // result: (VGF2P8AFFINEINVQB256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x64 [a] x y) + // result: (VGF2P8AFFINEINVQB512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x16 [a] x y) + // result: (VGF2P8AFFINEQB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x32 [a] x y) + // result: (VGF2P8AFFINEQB256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x64 [a] x y) + // result: (VGF2P8AFFINEQB512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { v_0 := v.Args[0] // match: (GetElemInt16x8 [a] x) @@ -38990,6 +39119,180 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformInversedUint8x16 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformInversedUint8x32 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformInversedUint8x64 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformUint8x16 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEQBMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformUint8x32 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEQBMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformUint8x64 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEQBMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldMulUint8x16 x y mask) + // result: (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8MULBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldMulUint8x32 x y mask) + // result: (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8MULBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldMulUint8x64 x y mask) + // result: (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8MULBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index d20c939293..d14b6be425 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -262,6 +262,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) addF(simdPackage, "Int16x8.GetElem", opLen1Imm8(ssa.OpGetElemInt16x8, types.Types[types.TINT16], 0), sys.AMD64) addF(simdPackage, "Int32x4.GetElem", opLen1Imm8(ssa.OpGetElemInt32x4, types.Types[types.TINT32], 0), sys.AMD64) @@ -618,6 +627,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) @@ -2197,3 +2215,23 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) } + +func opGaloisFieldAffineTransform(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[0].Op == ssa.OpConst8 { + return s.newValue2I(op, t, args[0].AuxInt, args[0], args[1]) + } + plainPanicSimdImm(s) + return s.newValue2I(op, t, 0, args[0], args[1]) + } +} + +func opGaloisFieldAffineTransformMasked(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[0].Op == ssa.OpConst8 { + return s.newValue3I(op, t, args[0].AuxInt, args[0], args[1], args[3]) + } + plainPanicSimdImm(s) + return s.newValue3I(op, t, 0, args[0], args[1], args[3]) + } +} diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index ad828e9d3f..6399136fb1 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -4884,6 +4884,8 @@ func testUint8x16Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, whic gotv = vec0.AndNot(vec1) case "Average": gotv = vec0.Average(vec1) + case "GaloisFieldMul": + gotv = vec0.GaloisFieldMul(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -4922,6 +4924,8 @@ func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) case "MaskedAverage": gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x16()) + case "MaskedGaloisFieldMul": + gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x16()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) case "MaskedMin": @@ -5106,6 +5110,8 @@ func testUint8x32Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, whic gotv = vec0.AndNot(vec1) case "Average": gotv = vec0.Average(vec1) + case "GaloisFieldMul": + gotv = vec0.GaloisFieldMul(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -5144,6 +5150,8 @@ func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) case "MaskedAverage": gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x32()) + case "MaskedGaloisFieldMul": + gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x32()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) case "MaskedMin": @@ -5324,6 +5332,8 @@ func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, whic gotv = vec0.Add(vec1) case "Average": gotv = vec0.Average(vec1) + case "GaloisFieldMul": + gotv = vec0.GaloisFieldMul(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -5358,6 +5368,8 @@ func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) case "MaskedAverage": gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x64()) + case "MaskedGaloisFieldMul": + gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x64()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) case "MaskedMin": @@ -7946,6 +7958,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // DiffWithTruncWithPrecision // FloorSuppressExceptionWithPrecision // FloorWithPrecision +// GaloisFieldAffineTransform +// GaloisFieldAffineTransformInversed // GetElem // MaskedCeilSuppressExceptionWithPrecision // MaskedCeilWithPrecision @@ -7959,6 +7973,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // MaskedDiffWithTruncWithPrecision // MaskedFloorSuppressExceptionWithPrecision // MaskedFloorWithPrecision +// MaskedGaloisFieldAffineTransform +// MaskedGaloisFieldAffineTransformInversed // MaskedRotateAllLeft // MaskedRotateAllRight // MaskedRoundSuppressExceptionWithPrecision diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 330ad6aca2..f20a9b17ae 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -1426,6 +1426,81 @@ func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 +/* GaloisFieldAffineTransform */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 + +/* GaloisFieldAffineTransformInversed */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransformInversed(y Uint64x8, b uint8) Uint8x64 + +/* GaloisFieldMul */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldMul(y Uint8x16) Uint8x16 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 + /* GetElem */ // GetElem retrieves a single constant-indexed element's value. @@ -3494,6 +3569,81 @@ func (x Float64x4) MaskedFusedMultiplySubAdd(y Float64x4, z Float64x4, u Mask64x // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +/* MaskedGaloisFieldAffineTransform */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldAffineTransform(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldAffineTransform(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldAffineTransform(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* MaskedGaloisFieldAffineTransformInversed */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldAffineTransformInversed(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldAffineTransformInversed(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldAffineTransformInversed(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* MaskedGaloisFieldMul */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldMul(y Uint8x16, z Mask8x16) Uint8x16 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldMul(y Uint8x32, z Mask8x32) Uint8x32 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldMul(y Uint8x64, z Mask8x64) Uint8x64 + /* MaskedGreater */ // Greater compares for greater than.