This CL is generated by CL 687975.
Change-Id: I21707d108773cc6d8e6f07aaed60e756faa1e6cb
Reviewed-on: https://go-review.googlesource.com/c/go/+/687995
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: David Chase <drchase@google.com>
ssa.OpAMD64VRSQRT14PDMasked128,
ssa.OpAMD64VRSQRT14PDMasked256,
ssa.OpAMD64VRSQRT14PDMasked512,
+ ssa.OpAMD64VCOMPRESSPSMasked128,
+ ssa.OpAMD64VCOMPRESSPSMasked256,
+ ssa.OpAMD64VCOMPRESSPSMasked512,
+ ssa.OpAMD64VCOMPRESSPDMasked128,
+ ssa.OpAMD64VCOMPRESSPDMasked256,
+ ssa.OpAMD64VCOMPRESSPDMasked512,
+ ssa.OpAMD64VPCOMPRESSBMasked128,
+ ssa.OpAMD64VPCOMPRESSBMasked256,
+ ssa.OpAMD64VPCOMPRESSBMasked512,
+ ssa.OpAMD64VPCOMPRESSWMasked128,
+ ssa.OpAMD64VPCOMPRESSWMasked256,
+ ssa.OpAMD64VPCOMPRESSWMasked512,
+ ssa.OpAMD64VPCOMPRESSDMasked128,
+ ssa.OpAMD64VPCOMPRESSDMasked256,
+ ssa.OpAMD64VPCOMPRESSDMasked512,
+ ssa.OpAMD64VPCOMPRESSQMasked128,
+ ssa.OpAMD64VPCOMPRESSQMasked256,
+ ssa.OpAMD64VPCOMPRESSQMasked512,
ssa.OpAMD64VPOPCNTBMasked128,
ssa.OpAMD64VPOPCNTBMasked256,
ssa.OpAMD64VPOPCNTBMasked512,
ssa.OpAMD64VRNDSCALEPDMasked128,
ssa.OpAMD64VRNDSCALEPDMasked256,
ssa.OpAMD64VRNDSCALEPDMasked512,
+ ssa.OpAMD64VCOMPRESSPSMasked128,
+ ssa.OpAMD64VCOMPRESSPSMasked256,
+ ssa.OpAMD64VCOMPRESSPSMasked512,
+ ssa.OpAMD64VCOMPRESSPDMasked128,
+ ssa.OpAMD64VCOMPRESSPDMasked256,
+ ssa.OpAMD64VCOMPRESSPDMasked512,
+ ssa.OpAMD64VPCOMPRESSBMasked128,
+ ssa.OpAMD64VPCOMPRESSBMasked256,
+ ssa.OpAMD64VPCOMPRESSBMasked512,
+ ssa.OpAMD64VPCOMPRESSWMasked128,
+ ssa.OpAMD64VPCOMPRESSWMasked256,
+ ssa.OpAMD64VPCOMPRESSWMasked512,
+ ssa.OpAMD64VPCOMPRESSDMasked128,
+ ssa.OpAMD64VPCOMPRESSDMasked256,
+ ssa.OpAMD64VPCOMPRESSDMasked512,
+ ssa.OpAMD64VPCOMPRESSQMasked128,
+ ssa.OpAMD64VPCOMPRESSQMasked256,
+ ssa.OpAMD64VPCOMPRESSQMasked512,
ssa.OpAMD64VREDUCEPSMasked128,
ssa.OpAMD64VREDUCEPSMasked256,
ssa.OpAMD64VREDUCEPSMasked512,
(CeilWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM <types.TypeMask> mask))
(CeilWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM <types.TypeMask> mask))
(CeilWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM <types.TypeMask> mask))
+(CompressFloat32x4 x mask) => (VCOMPRESSPSMasked128 x (VPMOVVec32x4ToM <types.TypeMask> mask))
+(CompressFloat32x8 x mask) => (VCOMPRESSPSMasked256 x (VPMOVVec32x8ToM <types.TypeMask> mask))
+(CompressFloat32x16 x mask) => (VCOMPRESSPSMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+(CompressFloat64x2 x mask) => (VCOMPRESSPDMasked128 x (VPMOVVec64x2ToM <types.TypeMask> mask))
+(CompressFloat64x4 x mask) => (VCOMPRESSPDMasked256 x (VPMOVVec64x4ToM <types.TypeMask> mask))
+(CompressFloat64x8 x mask) => (VCOMPRESSPDMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
+(CompressInt8x16 x mask) => (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM <types.TypeMask> mask))
+(CompressInt8x32 x mask) => (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM <types.TypeMask> mask))
+(CompressInt8x64 x mask) => (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
+(CompressInt16x8 x mask) => (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM <types.TypeMask> mask))
+(CompressInt16x16 x mask) => (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM <types.TypeMask> mask))
+(CompressInt16x32 x mask) => (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
+(CompressInt32x4 x mask) => (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM <types.TypeMask> mask))
+(CompressInt32x8 x mask) => (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM <types.TypeMask> mask))
+(CompressInt32x16 x mask) => (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+(CompressInt64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM <types.TypeMask> mask))
+(CompressInt64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM <types.TypeMask> mask))
+(CompressInt64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
+(CompressUint8x16 x mask) => (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM <types.TypeMask> mask))
+(CompressUint8x32 x mask) => (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM <types.TypeMask> mask))
+(CompressUint8x64 x mask) => (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
+(CompressUint16x8 x mask) => (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM <types.TypeMask> mask))
+(CompressUint16x16 x mask) => (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM <types.TypeMask> mask))
+(CompressUint16x32 x mask) => (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
+(CompressUint32x4 x mask) => (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM <types.TypeMask> mask))
+(CompressUint32x8 x mask) => (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM <types.TypeMask> mask))
+(CompressUint32x16 x mask) => (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+(CompressUint64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM <types.TypeMask> mask))
+(CompressUint64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM <types.TypeMask> mask))
+(CompressUint64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
(DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x)
(DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x)
(DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x)
{name: "VRCP14PSMasked512", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PS512", argLength: 1, reg: w11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PSMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VCOMPRESSPSMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPS512", argLength: 2, reg: w21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPSMasked512", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VFMADD213PS512", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VRCP14PSMasked128", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PSMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VCOMPRESSPSMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPS128", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPSMasked128", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VFMADD213PS128", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VRCP14PSMasked256", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PSMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VCOMPRESSPSMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPS256", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPSMasked256", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VFMADD213PS256", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VRCP14PDMasked128", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PD128", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PDMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VCOMPRESSPDMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPDMasked128", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VFMADD213PD128", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VRCP14PDMasked256", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PD256", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PDMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VCOMPRESSPDMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPDMasked256", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VFMADD213PD256", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VRCP14PDMasked512", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PD512", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PDMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VCOMPRESSPDMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPD512", argLength: 2, reg: w21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPDMasked512", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VFMADD213PD512", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VPABSWMasked256", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPADDWMasked256", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false},
+ {name: "VPCOMPRESSWMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPCMPEQW256", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPCMPGTW256", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPMAXSW256", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPABSWMasked512", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPADDWMasked512", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false},
+ {name: "VPCOMPRESSWMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPMAXSW512", argLength: 2, reg: w21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPMAXSWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPMINSW512", argLength: 2, reg: w21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPABSWMasked128", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPADDW128", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPADDWMasked128", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false},
+ {name: "VPCOMPRESSWMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPCMPEQW128", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPCMPGTW128", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPMAXSW128", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPANDDMasked512", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPANDND512", argLength: 2, reg: w21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPANDNDMasked512", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPCOMPRESSDMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPMAXSD512", argLength: 2, reg: w21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPMAXSDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPMINSD512", argLength: 2, reg: w21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPADDDMasked128", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPANDDMasked128", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPANDNDMasked128", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPCOMPRESSDMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPCMPEQD128", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPCMPGTD128", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPMAXSD128", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPADDDMasked256", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPANDDMasked256", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPANDNDMasked256", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPCOMPRESSDMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPCMPEQD256", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPCMPGTD256", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPMAXSD256", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPADDQMasked128", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPANDQMasked128", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPANDNQMasked128", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPCOMPRESSQMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPCMPGTQ128", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPMAXSQ128", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPADDQMasked256", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPANDQMasked256", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPANDNQMasked256", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPCOMPRESSQMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPCMPEQQ256", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPCMPGTQ256", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPMAXSQ256", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPANDQMasked512", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPANDNQ512", argLength: 2, reg: w21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPANDNQMasked512", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPCOMPRESSQMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPMAXSQ512", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPMAXSQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPMINSQ512", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPADDBMasked128", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPAND128", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPANDN128", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPCOMPRESSBMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPCMPGTB128", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPMAXSB128", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPADDBMasked256", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPAND256", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPANDN256", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPCOMPRESSBMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPCMPGTB256", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPMAXSB256", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPABSBMasked512", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPADDBMasked512", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false},
+ {name: "VPCOMPRESSBMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPMAXSB512", argLength: 2, reg: w21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPMAXSBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPMINSB512", argLength: 2, reg: w21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false},
- {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPERMD512", argLength: 2, reg: w21, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPERMI2PS512", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VPERMI2PSMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VPERMPSMasked512", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPERMDMasked512", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPSRLD512", argLength: 2, reg: wfpw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false},
- {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VPERMI2PS256", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VPERMPSMasked256", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPERMDMasked256", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VPERMI2PD128", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VPERMI2Q128", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VPERMI2PDMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPERMPD256", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VPERMI2PDMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VPERMI2QMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPERMQMasked256", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPERMI2PD512", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VPERMI2QMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VPERMI2PDMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "ApproximateReciprocalMaskedFloat32x16", argLength: 2, commutative: false},
{name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", argLength: 2, commutative: false},
+ {name: "CompressFloat32x16", argLength: 2, commutative: false},
{name: "DivFloat32x16", argLength: 2, commutative: false},
{name: "DivMaskedFloat32x16", argLength: 3, commutative: false},
{name: "EqualFloat32x16", argLength: 2, commutative: true},
{name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", argLength: 2, commutative: false},
{name: "CeilFloat32x4", argLength: 1, commutative: false},
+ {name: "CompressFloat32x4", argLength: 2, commutative: false},
{name: "DivFloat32x4", argLength: 2, commutative: false},
{name: "DivMaskedFloat32x4", argLength: 3, commutative: false},
{name: "DotProdBroadcastFloat32x4", argLength: 2, commutative: true},
{name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", argLength: 2, commutative: false},
{name: "CeilFloat32x8", argLength: 1, commutative: false},
+ {name: "CompressFloat32x8", argLength: 2, commutative: false},
{name: "DivFloat32x8", argLength: 2, commutative: false},
{name: "DivMaskedFloat32x8", argLength: 3, commutative: false},
{name: "DotProdBroadcastFloat32x8", argLength: 2, commutative: true},
{name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLength: 2, commutative: false},
{name: "CeilFloat64x2", argLength: 1, commutative: false},
+ {name: "CompressFloat64x2", argLength: 2, commutative: false},
{name: "DivFloat64x2", argLength: 2, commutative: false},
{name: "DivMaskedFloat64x2", argLength: 3, commutative: false},
{name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true},
{name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLength: 2, commutative: false},
{name: "CeilFloat64x4", argLength: 1, commutative: false},
+ {name: "CompressFloat64x4", argLength: 2, commutative: false},
{name: "DivFloat64x4", argLength: 2, commutative: false},
{name: "DivMaskedFloat64x4", argLength: 3, commutative: false},
{name: "EqualFloat64x4", argLength: 2, commutative: true},
{name: "ApproximateReciprocalMaskedFloat64x8", argLength: 2, commutative: false},
{name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", argLength: 2, commutative: false},
+ {name: "CompressFloat64x8", argLength: 2, commutative: false},
{name: "DivFloat64x8", argLength: 2, commutative: false},
{name: "DivMaskedFloat64x8", argLength: 3, commutative: false},
{name: "EqualFloat64x8", argLength: 2, commutative: true},
{name: "AddMaskedInt16x16", argLength: 3, commutative: true},
{name: "AndInt16x16", argLength: 2, commutative: true},
{name: "AndNotInt16x16", argLength: 2, commutative: false},
+ {name: "CompressInt16x16", argLength: 2, commutative: false},
{name: "EqualInt16x16", argLength: 2, commutative: true},
{name: "EqualMaskedInt16x16", argLength: 3, commutative: true},
{name: "GreaterInt16x16", argLength: 2, commutative: false},
{name: "AbsoluteMaskedInt16x32", argLength: 2, commutative: false},
{name: "AddInt16x32", argLength: 2, commutative: true},
{name: "AddMaskedInt16x32", argLength: 3, commutative: true},
+ {name: "CompressInt16x32", argLength: 2, commutative: false},
{name: "EqualInt16x32", argLength: 2, commutative: true},
{name: "EqualMaskedInt16x32", argLength: 3, commutative: true},
{name: "GreaterInt16x32", argLength: 2, commutative: false},
{name: "AddMaskedInt16x8", argLength: 3, commutative: true},
{name: "AndInt16x8", argLength: 2, commutative: true},
{name: "AndNotInt16x8", argLength: 2, commutative: false},
+ {name: "CompressInt16x8", argLength: 2, commutative: false},
{name: "EqualInt16x8", argLength: 2, commutative: true},
{name: "EqualMaskedInt16x8", argLength: 3, commutative: true},
{name: "GreaterInt16x8", argLength: 2, commutative: false},
{name: "AndMaskedInt32x16", argLength: 3, commutative: true},
{name: "AndNotInt32x16", argLength: 2, commutative: false},
{name: "AndNotMaskedInt32x16", argLength: 3, commutative: false},
+ {name: "CompressInt32x16", argLength: 2, commutative: false},
{name: "EqualInt32x16", argLength: 2, commutative: true},
{name: "EqualMaskedInt32x16", argLength: 3, commutative: true},
{name: "GreaterInt32x16", argLength: 2, commutative: false},
{name: "AndMaskedInt32x4", argLength: 3, commutative: true},
{name: "AndNotInt32x4", argLength: 2, commutative: false},
{name: "AndNotMaskedInt32x4", argLength: 3, commutative: false},
+ {name: "CompressInt32x4", argLength: 2, commutative: false},
{name: "EqualInt32x4", argLength: 2, commutative: true},
{name: "EqualMaskedInt32x4", argLength: 3, commutative: true},
{name: "GreaterInt32x4", argLength: 2, commutative: false},
{name: "AndMaskedInt32x8", argLength: 3, commutative: true},
{name: "AndNotInt32x8", argLength: 2, commutative: false},
{name: "AndNotMaskedInt32x8", argLength: 3, commutative: false},
+ {name: "CompressInt32x8", argLength: 2, commutative: false},
{name: "EqualInt32x8", argLength: 2, commutative: true},
{name: "EqualMaskedInt32x8", argLength: 3, commutative: true},
{name: "GreaterInt32x8", argLength: 2, commutative: false},
{name: "AndMaskedInt64x2", argLength: 3, commutative: true},
{name: "AndNotInt64x2", argLength: 2, commutative: false},
{name: "AndNotMaskedInt64x2", argLength: 3, commutative: false},
+ {name: "CompressInt64x2", argLength: 2, commutative: false},
{name: "EqualInt64x2", argLength: 2, commutative: true},
{name: "EqualMaskedInt64x2", argLength: 3, commutative: true},
{name: "GreaterInt64x2", argLength: 2, commutative: false},
{name: "AndMaskedInt64x4", argLength: 3, commutative: true},
{name: "AndNotInt64x4", argLength: 2, commutative: false},
{name: "AndNotMaskedInt64x4", argLength: 3, commutative: false},
+ {name: "CompressInt64x4", argLength: 2, commutative: false},
{name: "EqualInt64x4", argLength: 2, commutative: true},
{name: "EqualMaskedInt64x4", argLength: 3, commutative: true},
{name: "GreaterInt64x4", argLength: 2, commutative: false},
{name: "AndMaskedInt64x8", argLength: 3, commutative: true},
{name: "AndNotInt64x8", argLength: 2, commutative: false},
{name: "AndNotMaskedInt64x8", argLength: 3, commutative: false},
+ {name: "CompressInt64x8", argLength: 2, commutative: false},
{name: "EqualInt64x8", argLength: 2, commutative: true},
{name: "EqualMaskedInt64x8", argLength: 3, commutative: true},
{name: "GreaterInt64x8", argLength: 2, commutative: false},
{name: "AddMaskedInt8x16", argLength: 3, commutative: true},
{name: "AndInt8x16", argLength: 2, commutative: true},
{name: "AndNotInt8x16", argLength: 2, commutative: false},
+ {name: "CompressInt8x16", argLength: 2, commutative: false},
{name: "EqualInt8x16", argLength: 2, commutative: true},
{name: "EqualMaskedInt8x16", argLength: 3, commutative: true},
{name: "GreaterInt8x16", argLength: 2, commutative: false},
{name: "AddMaskedInt8x32", argLength: 3, commutative: true},
{name: "AndInt8x32", argLength: 2, commutative: true},
{name: "AndNotInt8x32", argLength: 2, commutative: false},
+ {name: "CompressInt8x32", argLength: 2, commutative: false},
{name: "EqualInt8x32", argLength: 2, commutative: true},
{name: "EqualMaskedInt8x32", argLength: 3, commutative: true},
{name: "GreaterInt8x32", argLength: 2, commutative: false},
{name: "AbsoluteMaskedInt8x64", argLength: 2, commutative: false},
{name: "AddInt8x64", argLength: 2, commutative: true},
{name: "AddMaskedInt8x64", argLength: 3, commutative: true},
+ {name: "CompressInt8x64", argLength: 2, commutative: false},
{name: "EqualInt8x64", argLength: 2, commutative: true},
{name: "EqualMaskedInt8x64", argLength: 3, commutative: true},
{name: "GreaterInt8x64", argLength: 2, commutative: false},
{name: "AndNotUint16x16", argLength: 2, commutative: false},
{name: "AverageUint16x16", argLength: 2, commutative: true},
{name: "AverageMaskedUint16x16", argLength: 3, commutative: true},
+ {name: "CompressUint16x16", argLength: 2, commutative: false},
{name: "EqualUint16x16", argLength: 2, commutative: true},
{name: "EqualMaskedUint16x16", argLength: 3, commutative: true},
{name: "GreaterUint16x16", argLength: 2, commutative: false},
{name: "PermuteUint16x16", argLength: 2, commutative: false},
{name: "Permute2Uint16x16", argLength: 3, commutative: false},
{name: "Permute2Int16x16", argLength: 3, commutative: false},
- {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false},
{name: "Permute2MaskedInt16x16", argLength: 4, commutative: false},
- {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false},
+ {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false},
{name: "PermuteMaskedInt16x16", argLength: 3, commutative: false},
+ {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false},
{name: "PopCountUint16x16", argLength: 1, commutative: false},
{name: "PopCountMaskedUint16x16", argLength: 2, commutative: false},
{name: "SaturatedAddUint16x16", argLength: 2, commutative: true},
{name: "AddMaskedUint16x32", argLength: 3, commutative: true},
{name: "AverageUint16x32", argLength: 2, commutative: true},
{name: "AverageMaskedUint16x32", argLength: 3, commutative: true},
+ {name: "CompressUint16x32", argLength: 2, commutative: false},
{name: "EqualUint16x32", argLength: 2, commutative: true},
{name: "EqualMaskedUint16x32", argLength: 3, commutative: true},
{name: "GreaterUint16x32", argLength: 2, commutative: false},
{name: "MulHighMaskedUint16x32", argLength: 3, commutative: true},
{name: "NotEqualUint16x32", argLength: 2, commutative: true},
{name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true},
- {name: "PermuteUint16x32", argLength: 2, commutative: false},
{name: "PermuteInt16x32", argLength: 2, commutative: false},
+ {name: "PermuteUint16x32", argLength: 2, commutative: false},
{name: "Permute2Int16x32", argLength: 3, commutative: false},
{name: "Permute2Uint16x32", argLength: 3, commutative: false},
- {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false},
{name: "Permute2MaskedInt16x32", argLength: 4, commutative: false},
+ {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false},
{name: "PermuteMaskedUint16x32", argLength: 3, commutative: false},
{name: "PermuteMaskedInt16x32", argLength: 3, commutative: false},
{name: "PopCountUint16x32", argLength: 1, commutative: false},
{name: "AndNotUint16x8", argLength: 2, commutative: false},
{name: "AverageUint16x8", argLength: 2, commutative: true},
{name: "AverageMaskedUint16x8", argLength: 3, commutative: true},
+ {name: "CompressUint16x8", argLength: 2, commutative: false},
{name: "EqualUint16x8", argLength: 2, commutative: true},
{name: "EqualMaskedUint16x8", argLength: 3, commutative: true},
{name: "GreaterUint16x8", argLength: 2, commutative: false},
{name: "AndMaskedUint32x16", argLength: 3, commutative: true},
{name: "AndNotUint32x16", argLength: 2, commutative: false},
{name: "AndNotMaskedUint32x16", argLength: 3, commutative: false},
+ {name: "CompressUint32x16", argLength: 2, commutative: false},
{name: "EqualUint32x16", argLength: 2, commutative: true},
{name: "EqualMaskedUint32x16", argLength: 3, commutative: true},
{name: "GreaterUint32x16", argLength: 2, commutative: false},
{name: "OrUint32x16", argLength: 2, commutative: true},
{name: "OrMaskedUint32x16", argLength: 3, commutative: true},
{name: "PermuteInt32x16", argLength: 2, commutative: false},
- {name: "PermuteUint32x16", argLength: 2, commutative: false},
{name: "PermuteFloat32x16", argLength: 2, commutative: false},
- {name: "Permute2Int32x16", argLength: 3, commutative: false},
+ {name: "PermuteUint32x16", argLength: 2, commutative: false},
{name: "Permute2Uint32x16", argLength: 3, commutative: false},
{name: "Permute2Float32x16", argLength: 3, commutative: false},
+ {name: "Permute2Int32x16", argLength: 3, commutative: false},
{name: "Permute2MaskedUint32x16", argLength: 4, commutative: false},
{name: "Permute2MaskedInt32x16", argLength: 4, commutative: false},
{name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false},
+ {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false},
{name: "PermuteMaskedUint32x16", argLength: 3, commutative: false},
{name: "PermuteMaskedInt32x16", argLength: 3, commutative: false},
- {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false},
{name: "PopCountUint32x16", argLength: 1, commutative: false},
{name: "PopCountMaskedUint32x16", argLength: 2, commutative: false},
{name: "RotateLeftUint32x16", argLength: 2, commutative: false},
{name: "AndMaskedUint32x4", argLength: 3, commutative: true},
{name: "AndNotUint32x4", argLength: 2, commutative: false},
{name: "AndNotMaskedUint32x4", argLength: 3, commutative: false},
+ {name: "CompressUint32x4", argLength: 2, commutative: false},
{name: "EqualUint32x4", argLength: 2, commutative: true},
{name: "EqualMaskedUint32x4", argLength: 3, commutative: true},
{name: "GreaterUint32x4", argLength: 2, commutative: false},
{name: "PairwiseAddUint32x4", argLength: 2, commutative: false},
{name: "PairwiseSubUint32x4", argLength: 2, commutative: false},
{name: "Permute2Uint32x4", argLength: 3, commutative: false},
- {name: "Permute2Float32x4", argLength: 3, commutative: false},
{name: "Permute2Int32x4", argLength: 3, commutative: false},
- {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false},
- {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false},
+ {name: "Permute2Float32x4", argLength: 3, commutative: false},
{name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false},
+ {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false},
+ {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false},
{name: "PopCountUint32x4", argLength: 1, commutative: false},
{name: "PopCountMaskedUint32x4", argLength: 2, commutative: false},
{name: "RotateLeftUint32x4", argLength: 2, commutative: false},
{name: "AndMaskedUint32x8", argLength: 3, commutative: true},
{name: "AndNotUint32x8", argLength: 2, commutative: false},
{name: "AndNotMaskedUint32x8", argLength: 3, commutative: false},
+ {name: "CompressUint32x8", argLength: 2, commutative: false},
{name: "EqualUint32x8", argLength: 2, commutative: true},
{name: "EqualMaskedUint32x8", argLength: 3, commutative: true},
{name: "GreaterUint32x8", argLength: 2, commutative: false},
{name: "OrMaskedUint32x8", argLength: 3, commutative: true},
{name: "PairwiseAddUint32x8", argLength: 2, commutative: false},
{name: "PairwiseSubUint32x8", argLength: 2, commutative: false},
+ {name: "PermuteUint32x8", argLength: 2, commutative: false},
{name: "PermuteInt32x8", argLength: 2, commutative: false},
{name: "PermuteFloat32x8", argLength: 2, commutative: false},
- {name: "PermuteUint32x8", argLength: 2, commutative: false},
{name: "Permute2Uint32x8", argLength: 3, commutative: false},
{name: "Permute2Float32x8", argLength: 3, commutative: false},
{name: "Permute2Int32x8", argLength: 3, commutative: false},
{name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false},
- {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false},
{name: "Permute2MaskedInt32x8", argLength: 4, commutative: false},
+ {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false},
{name: "PermuteMaskedInt32x8", argLength: 3, commutative: false},
- {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false},
{name: "PermuteMaskedUint32x8", argLength: 3, commutative: false},
+ {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false},
{name: "PopCountUint32x8", argLength: 1, commutative: false},
{name: "PopCountMaskedUint32x8", argLength: 2, commutative: false},
{name: "RotateLeftUint32x8", argLength: 2, commutative: false},
{name: "AndMaskedUint64x2", argLength: 3, commutative: true},
{name: "AndNotUint64x2", argLength: 2, commutative: false},
{name: "AndNotMaskedUint64x2", argLength: 3, commutative: false},
+ {name: "CompressUint64x2", argLength: 2, commutative: false},
{name: "EqualUint64x2", argLength: 2, commutative: true},
{name: "EqualMaskedUint64x2", argLength: 3, commutative: true},
{name: "GreaterUint64x2", argLength: 2, commutative: false},
{name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true},
{name: "OrUint64x2", argLength: 2, commutative: true},
{name: "OrMaskedUint64x2", argLength: 3, commutative: true},
+ {name: "Permute2Float64x2", argLength: 3, commutative: false},
{name: "Permute2Uint64x2", argLength: 3, commutative: false},
{name: "Permute2Int64x2", argLength: 3, commutative: false},
- {name: "Permute2Float64x2", argLength: 3, commutative: false},
- {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false},
{name: "Permute2MaskedInt64x2", argLength: 4, commutative: false},
+ {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false},
{name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false},
{name: "PopCountUint64x2", argLength: 1, commutative: false},
{name: "PopCountMaskedUint64x2", argLength: 2, commutative: false},
{name: "AndMaskedUint64x4", argLength: 3, commutative: true},
{name: "AndNotUint64x4", argLength: 2, commutative: false},
{name: "AndNotMaskedUint64x4", argLength: 3, commutative: false},
+ {name: "CompressUint64x4", argLength: 2, commutative: false},
{name: "EqualUint64x4", argLength: 2, commutative: true},
{name: "EqualMaskedUint64x4", argLength: 3, commutative: true},
{name: "GreaterUint64x4", argLength: 2, commutative: false},
{name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true},
{name: "OrUint64x4", argLength: 2, commutative: true},
{name: "OrMaskedUint64x4", argLength: 3, commutative: true},
+ {name: "PermuteFloat64x4", argLength: 2, commutative: false},
{name: "PermuteUint64x4", argLength: 2, commutative: false},
{name: "PermuteInt64x4", argLength: 2, commutative: false},
- {name: "PermuteFloat64x4", argLength: 2, commutative: false},
- {name: "Permute2Uint64x4", argLength: 3, commutative: false},
{name: "Permute2Int64x4", argLength: 3, commutative: false},
+ {name: "Permute2Uint64x4", argLength: 3, commutative: false},
{name: "Permute2Float64x4", argLength: 3, commutative: false},
- {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false},
- {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false},
{name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false},
+ {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false},
+ {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false},
{name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false},
- {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false},
{name: "PermuteMaskedUint64x4", argLength: 3, commutative: false},
+ {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false},
{name: "PopCountUint64x4", argLength: 1, commutative: false},
{name: "PopCountMaskedUint64x4", argLength: 2, commutative: false},
{name: "RotateLeftUint64x4", argLength: 2, commutative: false},
{name: "AndMaskedUint64x8", argLength: 3, commutative: true},
{name: "AndNotUint64x8", argLength: 2, commutative: false},
{name: "AndNotMaskedUint64x8", argLength: 3, commutative: false},
+ {name: "CompressUint64x8", argLength: 2, commutative: false},
{name: "EqualUint64x8", argLength: 2, commutative: true},
{name: "EqualMaskedUint64x8", argLength: 3, commutative: true},
{name: "GreaterUint64x8", argLength: 2, commutative: false},
{name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true},
{name: "OrUint64x8", argLength: 2, commutative: true},
{name: "OrMaskedUint64x8", argLength: 3, commutative: true},
- {name: "PermuteUint64x8", argLength: 2, commutative: false},
{name: "PermuteInt64x8", argLength: 2, commutative: false},
+ {name: "PermuteUint64x8", argLength: 2, commutative: false},
{name: "PermuteFloat64x8", argLength: 2, commutative: false},
- {name: "Permute2Int64x8", argLength: 3, commutative: false},
{name: "Permute2Uint64x8", argLength: 3, commutative: false},
{name: "Permute2Float64x8", argLength: 3, commutative: false},
+ {name: "Permute2Int64x8", argLength: 3, commutative: false},
{name: "Permute2MaskedUint64x8", argLength: 4, commutative: false},
- {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false},
{name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false},
- {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false},
- {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false},
+ {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false},
{name: "PermuteMaskedUint64x8", argLength: 3, commutative: false},
+ {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false},
+ {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false},
{name: "PopCountUint64x8", argLength: 1, commutative: false},
{name: "PopCountMaskedUint64x8", argLength: 2, commutative: false},
{name: "RotateLeftUint64x8", argLength: 2, commutative: false},
{name: "AndNotUint8x16", argLength: 2, commutative: false},
{name: "AverageUint8x16", argLength: 2, commutative: true},
{name: "AverageMaskedUint8x16", argLength: 3, commutative: true},
+ {name: "CompressUint8x16", argLength: 2, commutative: false},
{name: "EqualUint8x16", argLength: 2, commutative: true},
{name: "EqualMaskedUint8x16", argLength: 3, commutative: true},
{name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false},
{name: "OrUint8x16", argLength: 2, commutative: true},
{name: "PermuteUint8x16", argLength: 2, commutative: false},
{name: "PermuteInt8x16", argLength: 2, commutative: false},
- {name: "Permute2Uint8x16", argLength: 3, commutative: false},
{name: "Permute2Int8x16", argLength: 3, commutative: false},
+ {name: "Permute2Uint8x16", argLength: 3, commutative: false},
{name: "Permute2MaskedInt8x16", argLength: 4, commutative: false},
{name: "Permute2MaskedUint8x16", argLength: 4, commutative: false},
- {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false},
{name: "PermuteMaskedUint8x16", argLength: 3, commutative: false},
+ {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false},
{name: "PopCountUint8x16", argLength: 1, commutative: false},
{name: "PopCountMaskedUint8x16", argLength: 2, commutative: false},
{name: "SaturatedAddUint8x16", argLength: 2, commutative: true},
{name: "AndNotUint8x32", argLength: 2, commutative: false},
{name: "AverageUint8x32", argLength: 2, commutative: true},
{name: "AverageMaskedUint8x32", argLength: 3, commutative: true},
+ {name: "CompressUint8x32", argLength: 2, commutative: false},
{name: "EqualUint8x32", argLength: 2, commutative: true},
{name: "EqualMaskedUint8x32", argLength: 3, commutative: true},
{name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false},
{name: "PermuteInt8x32", argLength: 2, commutative: false},
{name: "Permute2Int8x32", argLength: 3, commutative: false},
{name: "Permute2Uint8x32", argLength: 3, commutative: false},
- {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false},
{name: "Permute2MaskedInt8x32", argLength: 4, commutative: false},
- {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false},
+ {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false},
{name: "PermuteMaskedInt8x32", argLength: 3, commutative: false},
+ {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false},
{name: "PopCountUint8x32", argLength: 1, commutative: false},
{name: "PopCountMaskedUint8x32", argLength: 2, commutative: false},
{name: "SaturatedAddUint8x32", argLength: 2, commutative: true},
{name: "AddMaskedUint8x64", argLength: 3, commutative: true},
{name: "AverageUint8x64", argLength: 2, commutative: true},
{name: "AverageMaskedUint8x64", argLength: 3, commutative: true},
+ {name: "CompressUint8x64", argLength: 2, commutative: false},
{name: "EqualUint8x64", argLength: 2, commutative: true},
{name: "EqualMaskedUint8x64", argLength: 3, commutative: true},
{name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false},
{name: "MinMaskedUint8x64", argLength: 3, commutative: true},
{name: "NotEqualUint8x64", argLength: 2, commutative: true},
{name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true},
- {name: "PermuteUint8x64", argLength: 2, commutative: false},
{name: "PermuteInt8x64", argLength: 2, commutative: false},
- {name: "Permute2Int8x64", argLength: 3, commutative: false},
+ {name: "PermuteUint8x64", argLength: 2, commutative: false},
{name: "Permute2Uint8x64", argLength: 3, commutative: false},
+ {name: "Permute2Int8x64", argLength: 3, commutative: false},
{name: "Permute2MaskedUint8x64", argLength: 4, commutative: false},
{name: "Permute2MaskedInt8x64", argLength: 4, commutative: false},
- {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false},
{name: "PermuteMaskedUint8x64", argLength: 3, commutative: false},
+ {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false},
{name: "PopCountUint8x64", argLength: 1, commutative: false},
{name: "PopCountMaskedUint8x64", argLength: 2, commutative: false},
{name: "SaturatedAddUint8x64", argLength: 2, commutative: true},
OpAMD64VRCP14PSMasked512
OpAMD64VRSQRT14PS512
OpAMD64VRSQRT14PSMasked512
+ OpAMD64VCOMPRESSPSMasked512
OpAMD64VDIVPS512
OpAMD64VDIVPSMasked512
OpAMD64VFMADD213PS512
OpAMD64VRCP14PSMasked128
OpAMD64VRSQRTPS128
OpAMD64VRSQRT14PSMasked128
+ OpAMD64VCOMPRESSPSMasked128
OpAMD64VDIVPS128
OpAMD64VDIVPSMasked128
OpAMD64VFMADD213PS128
OpAMD64VRCP14PSMasked256
OpAMD64VRSQRTPS256
OpAMD64VRSQRT14PSMasked256
+ OpAMD64VCOMPRESSPSMasked256
OpAMD64VDIVPS256
OpAMD64VDIVPSMasked256
OpAMD64VFMADD213PS256
OpAMD64VRCP14PDMasked128
OpAMD64VRSQRT14PD128
OpAMD64VRSQRT14PDMasked128
+ OpAMD64VCOMPRESSPDMasked128
OpAMD64VDIVPD128
OpAMD64VDIVPDMasked128
OpAMD64VFMADD213PD128
OpAMD64VRCP14PDMasked256
OpAMD64VRSQRT14PD256
OpAMD64VRSQRT14PDMasked256
+ OpAMD64VCOMPRESSPDMasked256
OpAMD64VDIVPD256
OpAMD64VDIVPDMasked256
OpAMD64VFMADD213PD256
OpAMD64VRCP14PDMasked512
OpAMD64VRSQRT14PD512
OpAMD64VRSQRT14PDMasked512
+ OpAMD64VCOMPRESSPDMasked512
OpAMD64VDIVPD512
OpAMD64VDIVPDMasked512
OpAMD64VFMADD213PD512
OpAMD64VPABSWMasked256
OpAMD64VPADDW256
OpAMD64VPADDWMasked256
+ OpAMD64VPCOMPRESSWMasked256
OpAMD64VPCMPEQW256
OpAMD64VPCMPGTW256
OpAMD64VPMAXSW256
OpAMD64VPABSWMasked512
OpAMD64VPADDW512
OpAMD64VPADDWMasked512
+ OpAMD64VPCOMPRESSWMasked512
OpAMD64VPMAXSW512
OpAMD64VPMAXSWMasked512
OpAMD64VPMINSW512
OpAMD64VPABSWMasked128
OpAMD64VPADDW128
OpAMD64VPADDWMasked128
+ OpAMD64VPCOMPRESSWMasked128
OpAMD64VPCMPEQW128
OpAMD64VPCMPGTW128
OpAMD64VPMAXSW128
OpAMD64VPANDDMasked512
OpAMD64VPANDND512
OpAMD64VPANDNDMasked512
+ OpAMD64VPCOMPRESSDMasked512
OpAMD64VPMAXSD512
OpAMD64VPMAXSDMasked512
OpAMD64VPMINSD512
OpAMD64VPADDDMasked128
OpAMD64VPANDDMasked128
OpAMD64VPANDNDMasked128
+ OpAMD64VPCOMPRESSDMasked128
OpAMD64VPCMPEQD128
OpAMD64VPCMPGTD128
OpAMD64VPMAXSD128
OpAMD64VPADDDMasked256
OpAMD64VPANDDMasked256
OpAMD64VPANDNDMasked256
+ OpAMD64VPCOMPRESSDMasked256
OpAMD64VPCMPEQD256
OpAMD64VPCMPGTD256
OpAMD64VPMAXSD256
OpAMD64VPADDQMasked128
OpAMD64VPANDQMasked128
OpAMD64VPANDNQMasked128
+ OpAMD64VPCOMPRESSQMasked128
OpAMD64VPCMPEQQ128
OpAMD64VPCMPGTQ128
OpAMD64VPMAXSQ128
OpAMD64VPADDQMasked256
OpAMD64VPANDQMasked256
OpAMD64VPANDNQMasked256
+ OpAMD64VPCOMPRESSQMasked256
OpAMD64VPCMPEQQ256
OpAMD64VPCMPGTQ256
OpAMD64VPMAXSQ256
OpAMD64VPANDQMasked512
OpAMD64VPANDNQ512
OpAMD64VPANDNQMasked512
+ OpAMD64VPCOMPRESSQMasked512
OpAMD64VPMAXSQ512
OpAMD64VPMAXSQMasked512
OpAMD64VPMINSQ512
OpAMD64VPADDBMasked128
OpAMD64VPAND128
OpAMD64VPANDN128
+ OpAMD64VPCOMPRESSBMasked128
OpAMD64VPCMPEQB128
OpAMD64VPCMPGTB128
OpAMD64VPMAXSB128
OpAMD64VPADDBMasked256
OpAMD64VPAND256
OpAMD64VPANDN256
+ OpAMD64VPCOMPRESSBMasked256
OpAMD64VPCMPEQB256
OpAMD64VPCMPGTB256
OpAMD64VPMAXSB256
OpAMD64VPABSBMasked512
OpAMD64VPADDB512
OpAMD64VPADDBMasked512
+ OpAMD64VPCOMPRESSBMasked512
OpAMD64VPMAXSB512
OpAMD64VPMAXSBMasked512
OpAMD64VPMINSB512
OpAMD64VPMAXUDMasked512
OpAMD64VPMINUD512
OpAMD64VPMINUDMasked512
- OpAMD64VPERMPS512
OpAMD64VPERMD512
- OpAMD64VPERMI2D512
+ OpAMD64VPERMPS512
OpAMD64VPERMI2PS512
- OpAMD64VPERMI2DMasked512
+ OpAMD64VPERMI2D512
OpAMD64VPERMI2PSMasked512
+ OpAMD64VPERMI2DMasked512
OpAMD64VPERMPSMasked512
OpAMD64VPERMDMasked512
OpAMD64VPSRLD512
OpAMD64VPMINUD256
OpAMD64VPMINUDMasked256
OpAMD64VPMULUDQ256
- OpAMD64VPERMD256
OpAMD64VPERMPS256
+ OpAMD64VPERMD256
OpAMD64VPERMI2D256
OpAMD64VPERMI2PS256
- OpAMD64VPERMI2PSMasked256
OpAMD64VPERMI2DMasked256
+ OpAMD64VPERMI2PSMasked256
OpAMD64VPERMPSMasked256
OpAMD64VPERMDMasked256
OpAMD64VPSRLD256
OpAMD64VPMULUDQMasked128
OpAMD64VPERMI2PD128
OpAMD64VPERMI2Q128
- OpAMD64VPERMI2QMasked128
OpAMD64VPERMI2PDMasked128
+ OpAMD64VPERMI2QMasked128
OpAMD64VPSRLQ128
OpAMD64VPSRLQMasked128
OpAMD64VPSRLVQ128
OpAMD64VPMULUDQMasked256
OpAMD64VPERMQ256
OpAMD64VPERMPD256
- OpAMD64VPERMI2PD256
OpAMD64VPERMI2Q256
+ OpAMD64VPERMI2PD256
OpAMD64VPERMI2PDMasked256
OpAMD64VPERMI2QMasked256
- OpAMD64VPERMPDMasked256
OpAMD64VPERMQMasked256
+ OpAMD64VPERMPDMasked256
OpAMD64VPSRLQ256
OpAMD64VPSRLQMasked256
OpAMD64VPSRLVQ256
OpAMD64VPERMI2PD512
OpAMD64VPERMI2QMasked512
OpAMD64VPERMI2PDMasked512
- OpAMD64VPERMPDMasked512
OpAMD64VPERMQMasked512
+ OpAMD64VPERMPDMasked512
OpAMD64VPSRLQ512
OpAMD64VPSRLQMasked512
OpAMD64VPSRLVQ512
OpApproximateReciprocalMaskedFloat32x16
OpApproximateReciprocalOfSqrtFloat32x16
OpApproximateReciprocalOfSqrtMaskedFloat32x16
+ OpCompressFloat32x16
OpDivFloat32x16
OpDivMaskedFloat32x16
OpEqualFloat32x16
OpApproximateReciprocalOfSqrtFloat32x4
OpApproximateReciprocalOfSqrtMaskedFloat32x4
OpCeilFloat32x4
+ OpCompressFloat32x4
OpDivFloat32x4
OpDivMaskedFloat32x4
OpDotProdBroadcastFloat32x4
OpApproximateReciprocalOfSqrtFloat32x8
OpApproximateReciprocalOfSqrtMaskedFloat32x8
OpCeilFloat32x8
+ OpCompressFloat32x8
OpDivFloat32x8
OpDivMaskedFloat32x8
OpDotProdBroadcastFloat32x8
OpApproximateReciprocalOfSqrtFloat64x2
OpApproximateReciprocalOfSqrtMaskedFloat64x2
OpCeilFloat64x2
+ OpCompressFloat64x2
OpDivFloat64x2
OpDivMaskedFloat64x2
OpDotProdBroadcastFloat64x2
OpApproximateReciprocalOfSqrtFloat64x4
OpApproximateReciprocalOfSqrtMaskedFloat64x4
OpCeilFloat64x4
+ OpCompressFloat64x4
OpDivFloat64x4
OpDivMaskedFloat64x4
OpEqualFloat64x4
OpApproximateReciprocalMaskedFloat64x8
OpApproximateReciprocalOfSqrtFloat64x8
OpApproximateReciprocalOfSqrtMaskedFloat64x8
+ OpCompressFloat64x8
OpDivFloat64x8
OpDivMaskedFloat64x8
OpEqualFloat64x8
OpAddMaskedInt16x16
OpAndInt16x16
OpAndNotInt16x16
+ OpCompressInt16x16
OpEqualInt16x16
OpEqualMaskedInt16x16
OpGreaterInt16x16
OpAbsoluteMaskedInt16x32
OpAddInt16x32
OpAddMaskedInt16x32
+ OpCompressInt16x32
OpEqualInt16x32
OpEqualMaskedInt16x32
OpGreaterInt16x32
OpAddMaskedInt16x8
OpAndInt16x8
OpAndNotInt16x8
+ OpCompressInt16x8
OpEqualInt16x8
OpEqualMaskedInt16x8
OpGreaterInt16x8
OpAndMaskedInt32x16
OpAndNotInt32x16
OpAndNotMaskedInt32x16
+ OpCompressInt32x16
OpEqualInt32x16
OpEqualMaskedInt32x16
OpGreaterInt32x16
OpAndMaskedInt32x4
OpAndNotInt32x4
OpAndNotMaskedInt32x4
+ OpCompressInt32x4
OpEqualInt32x4
OpEqualMaskedInt32x4
OpGreaterInt32x4
OpAndMaskedInt32x8
OpAndNotInt32x8
OpAndNotMaskedInt32x8
+ OpCompressInt32x8
OpEqualInt32x8
OpEqualMaskedInt32x8
OpGreaterInt32x8
OpAndMaskedInt64x2
OpAndNotInt64x2
OpAndNotMaskedInt64x2
+ OpCompressInt64x2
OpEqualInt64x2
OpEqualMaskedInt64x2
OpGreaterInt64x2
OpAndMaskedInt64x4
OpAndNotInt64x4
OpAndNotMaskedInt64x4
+ OpCompressInt64x4
OpEqualInt64x4
OpEqualMaskedInt64x4
OpGreaterInt64x4
OpAndMaskedInt64x8
OpAndNotInt64x8
OpAndNotMaskedInt64x8
+ OpCompressInt64x8
OpEqualInt64x8
OpEqualMaskedInt64x8
OpGreaterInt64x8
OpAddMaskedInt8x16
OpAndInt8x16
OpAndNotInt8x16
+ OpCompressInt8x16
OpEqualInt8x16
OpEqualMaskedInt8x16
OpGreaterInt8x16
OpAddMaskedInt8x32
OpAndInt8x32
OpAndNotInt8x32
+ OpCompressInt8x32
OpEqualInt8x32
OpEqualMaskedInt8x32
OpGreaterInt8x32
OpAbsoluteMaskedInt8x64
OpAddInt8x64
OpAddMaskedInt8x64
+ OpCompressInt8x64
OpEqualInt8x64
OpEqualMaskedInt8x64
OpGreaterInt8x64
OpAndNotUint16x16
OpAverageUint16x16
OpAverageMaskedUint16x16
+ OpCompressUint16x16
OpEqualUint16x16
OpEqualMaskedUint16x16
OpGreaterUint16x16
OpPermuteUint16x16
OpPermute2Uint16x16
OpPermute2Int16x16
- OpPermute2MaskedUint16x16
OpPermute2MaskedInt16x16
- OpPermuteMaskedUint16x16
+ OpPermute2MaskedUint16x16
OpPermuteMaskedInt16x16
+ OpPermuteMaskedUint16x16
OpPopCountUint16x16
OpPopCountMaskedUint16x16
OpSaturatedAddUint16x16
OpAddMaskedUint16x32
OpAverageUint16x32
OpAverageMaskedUint16x32
+ OpCompressUint16x32
OpEqualUint16x32
OpEqualMaskedUint16x32
OpGreaterUint16x32
OpMulHighMaskedUint16x32
OpNotEqualUint16x32
OpNotEqualMaskedUint16x32
- OpPermuteUint16x32
OpPermuteInt16x32
+ OpPermuteUint16x32
OpPermute2Int16x32
OpPermute2Uint16x32
- OpPermute2MaskedUint16x32
OpPermute2MaskedInt16x32
+ OpPermute2MaskedUint16x32
OpPermuteMaskedUint16x32
OpPermuteMaskedInt16x32
OpPopCountUint16x32
OpAndNotUint16x8
OpAverageUint16x8
OpAverageMaskedUint16x8
+ OpCompressUint16x8
OpEqualUint16x8
OpEqualMaskedUint16x8
OpGreaterUint16x8
OpAndMaskedUint32x16
OpAndNotUint32x16
OpAndNotMaskedUint32x16
+ OpCompressUint32x16
OpEqualUint32x16
OpEqualMaskedUint32x16
OpGreaterUint32x16
OpOrUint32x16
OpOrMaskedUint32x16
OpPermuteInt32x16
- OpPermuteUint32x16
OpPermuteFloat32x16
- OpPermute2Int32x16
+ OpPermuteUint32x16
OpPermute2Uint32x16
OpPermute2Float32x16
+ OpPermute2Int32x16
OpPermute2MaskedUint32x16
OpPermute2MaskedInt32x16
OpPermute2MaskedFloat32x16
+ OpPermuteMaskedFloat32x16
OpPermuteMaskedUint32x16
OpPermuteMaskedInt32x16
- OpPermuteMaskedFloat32x16
OpPopCountUint32x16
OpPopCountMaskedUint32x16
OpRotateLeftUint32x16
OpAndMaskedUint32x4
OpAndNotUint32x4
OpAndNotMaskedUint32x4
+ OpCompressUint32x4
OpEqualUint32x4
OpEqualMaskedUint32x4
OpGreaterUint32x4
OpPairwiseAddUint32x4
OpPairwiseSubUint32x4
OpPermute2Uint32x4
- OpPermute2Float32x4
OpPermute2Int32x4
- OpPermute2MaskedUint32x4
- OpPermute2MaskedInt32x4
+ OpPermute2Float32x4
OpPermute2MaskedFloat32x4
+ OpPermute2MaskedInt32x4
+ OpPermute2MaskedUint32x4
OpPopCountUint32x4
OpPopCountMaskedUint32x4
OpRotateLeftUint32x4
OpAndMaskedUint32x8
OpAndNotUint32x8
OpAndNotMaskedUint32x8
+ OpCompressUint32x8
OpEqualUint32x8
OpEqualMaskedUint32x8
OpGreaterUint32x8
OpOrMaskedUint32x8
OpPairwiseAddUint32x8
OpPairwiseSubUint32x8
+ OpPermuteUint32x8
OpPermuteInt32x8
OpPermuteFloat32x8
- OpPermuteUint32x8
OpPermute2Uint32x8
OpPermute2Float32x8
OpPermute2Int32x8
OpPermute2MaskedFloat32x8
- OpPermute2MaskedUint32x8
OpPermute2MaskedInt32x8
+ OpPermute2MaskedUint32x8
OpPermuteMaskedInt32x8
- OpPermuteMaskedFloat32x8
OpPermuteMaskedUint32x8
+ OpPermuteMaskedFloat32x8
OpPopCountUint32x8
OpPopCountMaskedUint32x8
OpRotateLeftUint32x8
OpAndMaskedUint64x2
OpAndNotUint64x2
OpAndNotMaskedUint64x2
+ OpCompressUint64x2
OpEqualUint64x2
OpEqualMaskedUint64x2
OpGreaterUint64x2
OpNotEqualMaskedUint64x2
OpOrUint64x2
OpOrMaskedUint64x2
+ OpPermute2Float64x2
OpPermute2Uint64x2
OpPermute2Int64x2
- OpPermute2Float64x2
- OpPermute2MaskedUint64x2
OpPermute2MaskedInt64x2
+ OpPermute2MaskedUint64x2
OpPermute2MaskedFloat64x2
OpPopCountUint64x2
OpPopCountMaskedUint64x2
OpAndMaskedUint64x4
OpAndNotUint64x4
OpAndNotMaskedUint64x4
+ OpCompressUint64x4
OpEqualUint64x4
OpEqualMaskedUint64x4
OpGreaterUint64x4
OpNotEqualMaskedUint64x4
OpOrUint64x4
OpOrMaskedUint64x4
+ OpPermuteFloat64x4
OpPermuteUint64x4
OpPermuteInt64x4
- OpPermuteFloat64x4
- OpPermute2Uint64x4
OpPermute2Int64x4
+ OpPermute2Uint64x4
OpPermute2Float64x4
- OpPermute2MaskedInt64x4
- OpPermute2MaskedUint64x4
OpPermute2MaskedFloat64x4
+ OpPermute2MaskedUint64x4
+ OpPermute2MaskedInt64x4
OpPermuteMaskedFloat64x4
- OpPermuteMaskedInt64x4
OpPermuteMaskedUint64x4
+ OpPermuteMaskedInt64x4
OpPopCountUint64x4
OpPopCountMaskedUint64x4
OpRotateLeftUint64x4
OpAndMaskedUint64x8
OpAndNotUint64x8
OpAndNotMaskedUint64x8
+ OpCompressUint64x8
OpEqualUint64x8
OpEqualMaskedUint64x8
OpGreaterUint64x8
OpNotEqualMaskedUint64x8
OpOrUint64x8
OpOrMaskedUint64x8
- OpPermuteUint64x8
OpPermuteInt64x8
+ OpPermuteUint64x8
OpPermuteFloat64x8
- OpPermute2Int64x8
OpPermute2Uint64x8
OpPermute2Float64x8
+ OpPermute2Int64x8
OpPermute2MaskedUint64x8
- OpPermute2MaskedInt64x8
OpPermute2MaskedFloat64x8
- OpPermuteMaskedFloat64x8
- OpPermuteMaskedInt64x8
+ OpPermute2MaskedInt64x8
OpPermuteMaskedUint64x8
+ OpPermuteMaskedInt64x8
+ OpPermuteMaskedFloat64x8
OpPopCountUint64x8
OpPopCountMaskedUint64x8
OpRotateLeftUint64x8
OpAndNotUint8x16
OpAverageUint8x16
OpAverageMaskedUint8x16
+ OpCompressUint8x16
OpEqualUint8x16
OpEqualMaskedUint8x16
OpGaloisFieldMulUint8x16
OpOrUint8x16
OpPermuteUint8x16
OpPermuteInt8x16
- OpPermute2Uint8x16
OpPermute2Int8x16
+ OpPermute2Uint8x16
OpPermute2MaskedInt8x16
OpPermute2MaskedUint8x16
- OpPermuteMaskedInt8x16
OpPermuteMaskedUint8x16
+ OpPermuteMaskedInt8x16
OpPopCountUint8x16
OpPopCountMaskedUint8x16
OpSaturatedAddUint8x16
OpAndNotUint8x32
OpAverageUint8x32
OpAverageMaskedUint8x32
+ OpCompressUint8x32
OpEqualUint8x32
OpEqualMaskedUint8x32
OpGaloisFieldMulUint8x32
OpPermuteInt8x32
OpPermute2Int8x32
OpPermute2Uint8x32
- OpPermute2MaskedUint8x32
OpPermute2MaskedInt8x32
- OpPermuteMaskedUint8x32
+ OpPermute2MaskedUint8x32
OpPermuteMaskedInt8x32
+ OpPermuteMaskedUint8x32
OpPopCountUint8x32
OpPopCountMaskedUint8x32
OpSaturatedAddUint8x32
OpAddMaskedUint8x64
OpAverageUint8x64
OpAverageMaskedUint8x64
+ OpCompressUint8x64
OpEqualUint8x64
OpEqualMaskedUint8x64
OpGaloisFieldMulUint8x64
OpMinMaskedUint8x64
OpNotEqualUint8x64
OpNotEqualMaskedUint8x64
- OpPermuteUint8x64
OpPermuteInt8x64
- OpPermute2Int8x64
+ OpPermuteUint8x64
OpPermute2Uint8x64
+ OpPermute2Int8x64
OpPermute2MaskedUint8x64
OpPermute2MaskedInt8x64
- OpPermuteMaskedInt8x64
OpPermuteMaskedUint8x64
+ OpPermuteMaskedInt8x64
OpPopCountUint8x64
OpPopCountMaskedUint8x64
OpSaturatedAddUint8x64
},
},
},
+ {
+ name: "VCOMPRESSPSMasked512",
+ argLen: 2,
+ asm: x86.AVCOMPRESSPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VDIVPS512",
argLen: 2,
},
},
},
+ {
+ name: "VCOMPRESSPSMasked128",
+ argLen: 2,
+ asm: x86.AVCOMPRESSPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VDIVPS128",
argLen: 2,
},
},
},
+ {
+ name: "VCOMPRESSPSMasked256",
+ argLen: 2,
+ asm: x86.AVCOMPRESSPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VDIVPS256",
argLen: 2,
},
},
},
+ {
+ name: "VCOMPRESSPDMasked128",
+ argLen: 2,
+ asm: x86.AVCOMPRESSPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VDIVPD128",
argLen: 2,
},
},
},
+ {
+ name: "VCOMPRESSPDMasked256",
+ argLen: 2,
+ asm: x86.AVCOMPRESSPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VDIVPD256",
argLen: 2,
},
},
},
+ {
+ name: "VCOMPRESSPDMasked512",
+ argLen: 2,
+ asm: x86.AVCOMPRESSPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VDIVPD512",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSWMasked256",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPCMPEQW256",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSWMasked512",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPMAXSW512",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSWMasked128",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPCMPEQW128",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSDMasked512",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPMAXSD512",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSDMasked128",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPCMPEQD128",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSDMasked256",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPCMPEQD256",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSQMasked128",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPCMPEQQ128",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSQMasked256",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPCMPEQQ256",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSQMasked512",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPMAXSQ512",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSBMasked128",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPCMPEQB128",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSBMasked256",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPCMPEQB256",
argLen: 2,
},
},
},
+ {
+ name: "VPCOMPRESSBMasked512",
+ argLen: 2,
+ asm: x86.AVPCOMPRESSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
{
name: "VPMAXSB512",
argLen: 2,
},
},
{
- name: "VPERMPS512",
+ name: "VPERMD512",
argLen: 2,
- asm: x86.AVPERMPS,
+ asm: x86.AVPERMD,
reg: regInfo{
inputs: []inputInfo{
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
},
},
{
- name: "VPERMD512",
+ name: "VPERMPS512",
argLen: 2,
- asm: x86.AVPERMD,
+ asm: x86.AVPERMPS,
reg: regInfo{
inputs: []inputInfo{
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
},
},
{
- name: "VPERMI2D512",
+ name: "VPERMI2PS512",
argLen: 3,
resultInArg0: true,
- asm: x86.AVPERMI2D,
+ asm: x86.AVPERMI2PS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VPERMI2PS512",
+ name: "VPERMI2D512",
argLen: 3,
resultInArg0: true,
- asm: x86.AVPERMI2PS,
+ asm: x86.AVPERMI2D,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VPERMI2DMasked512",
+ name: "VPERMI2PSMasked512",
argLen: 4,
resultInArg0: true,
- asm: x86.AVPERMI2D,
+ asm: x86.AVPERMI2PS,
reg: regInfo{
inputs: []inputInfo{
{3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VPERMI2PSMasked512",
+ name: "VPERMI2DMasked512",
argLen: 4,
resultInArg0: true,
- asm: x86.AVPERMI2PS,
+ asm: x86.AVPERMI2D,
reg: regInfo{
inputs: []inputInfo{
{3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VPERMD256",
+ name: "VPERMPS256",
argLen: 2,
- asm: x86.AVPERMD,
+ asm: x86.AVPERMPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VPERMPS256",
+ name: "VPERMD256",
argLen: 2,
- asm: x86.AVPERMPS,
+ asm: x86.AVPERMD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VPERMI2PSMasked256",
+ name: "VPERMI2DMasked256",
argLen: 4,
resultInArg0: true,
- asm: x86.AVPERMI2PS,
+ asm: x86.AVPERMI2D,
reg: regInfo{
inputs: []inputInfo{
{3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VPERMI2DMasked256",
+ name: "VPERMI2PSMasked256",
argLen: 4,
resultInArg0: true,
- asm: x86.AVPERMI2D,
+ asm: x86.AVPERMI2PS,
reg: regInfo{
inputs: []inputInfo{
{3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VPERMI2QMasked128",
+ name: "VPERMI2PDMasked128",
argLen: 4,
resultInArg0: true,
- asm: x86.AVPERMI2Q,
+ asm: x86.AVPERMI2PD,
reg: regInfo{
inputs: []inputInfo{
{3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VPERMI2PDMasked128",
+ name: "VPERMI2QMasked128",
argLen: 4,
resultInArg0: true,
- asm: x86.AVPERMI2PD,
+ asm: x86.AVPERMI2Q,
reg: regInfo{
inputs: []inputInfo{
{3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VPERMI2PD256",
+ name: "VPERMI2Q256",
argLen: 3,
resultInArg0: true,
- asm: x86.AVPERMI2PD,
+ asm: x86.AVPERMI2Q,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VPERMI2Q256",
+ name: "VPERMI2PD256",
argLen: 3,
resultInArg0: true,
- asm: x86.AVPERMI2Q,
+ asm: x86.AVPERMI2PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VPERMPDMasked256",
+ name: "VPERMQMasked256",
argLen: 3,
- asm: x86.AVPERMPD,
+ asm: x86.AVPERMQ,
reg: regInfo{
inputs: []inputInfo{
{2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VPERMQMasked256",
+ name: "VPERMPDMasked256",
argLen: 3,
- asm: x86.AVPERMQ,
+ asm: x86.AVPERMPD,
reg: regInfo{
inputs: []inputInfo{
{2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VPERMPDMasked512",
+ name: "VPERMQMasked512",
argLen: 3,
- asm: x86.AVPERMPD,
+ asm: x86.AVPERMQ,
reg: regInfo{
inputs: []inputInfo{
{2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VPERMQMasked512",
+ name: "VPERMPDMasked512",
argLen: 3,
- asm: x86.AVPERMQ,
+ asm: x86.AVPERMPD,
reg: regInfo{
inputs: []inputInfo{
{2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
argLen: 2,
generic: true,
},
+ {
+ name: "CompressFloat32x16",
+ argLen: 2,
+ generic: true,
+ },
{
name: "DivFloat32x16",
argLen: 2,
argLen: 1,
generic: true,
},
+ {
+ name: "CompressFloat32x4",
+ argLen: 2,
+ generic: true,
+ },
{
name: "DivFloat32x4",
argLen: 2,
argLen: 1,
generic: true,
},
+ {
+ name: "CompressFloat32x8",
+ argLen: 2,
+ generic: true,
+ },
{
name: "DivFloat32x8",
argLen: 2,
argLen: 1,
generic: true,
},
+ {
+ name: "CompressFloat64x2",
+ argLen: 2,
+ generic: true,
+ },
{
name: "DivFloat64x2",
argLen: 2,
argLen: 1,
generic: true,
},
+ {
+ name: "CompressFloat64x4",
+ argLen: 2,
+ generic: true,
+ },
{
name: "DivFloat64x4",
argLen: 2,
argLen: 2,
generic: true,
},
+ {
+ name: "CompressFloat64x8",
+ argLen: 2,
+ generic: true,
+ },
{
name: "DivFloat64x8",
argLen: 2,
argLen: 2,
generic: true,
},
+ {
+ name: "CompressInt16x16",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt16x16",
argLen: 2,
commutative: true,
generic: true,
},
+ {
+ name: "CompressInt16x32",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt16x32",
argLen: 2,
argLen: 2,
generic: true,
},
+ {
+ name: "CompressInt16x8",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt16x8",
argLen: 2,
argLen: 3,
generic: true,
},
+ {
+ name: "CompressInt32x16",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt32x16",
argLen: 2,
argLen: 3,
generic: true,
},
+ {
+ name: "CompressInt32x4",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt32x4",
argLen: 2,
argLen: 3,
generic: true,
},
+ {
+ name: "CompressInt32x8",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt32x8",
argLen: 2,
argLen: 3,
generic: true,
},
+ {
+ name: "CompressInt64x2",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt64x2",
argLen: 2,
argLen: 3,
generic: true,
},
+ {
+ name: "CompressInt64x4",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt64x4",
argLen: 2,
argLen: 3,
generic: true,
},
+ {
+ name: "CompressInt64x8",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt64x8",
argLen: 2,
argLen: 2,
generic: true,
},
+ {
+ name: "CompressInt8x16",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt8x16",
argLen: 2,
argLen: 2,
generic: true,
},
+ {
+ name: "CompressInt8x32",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt8x32",
argLen: 2,
commutative: true,
generic: true,
},
+ {
+ name: "CompressInt8x64",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualInt8x64",
argLen: 2,
commutative: true,
generic: true,
},
+ {
+ name: "CompressUint16x16",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint16x16",
argLen: 2,
generic: true,
},
{
- name: "Permute2MaskedUint16x16",
+ name: "Permute2MaskedInt16x16",
argLen: 4,
generic: true,
},
{
- name: "Permute2MaskedInt16x16",
+ name: "Permute2MaskedUint16x16",
argLen: 4,
generic: true,
},
{
- name: "PermuteMaskedUint16x16",
+ name: "PermuteMaskedInt16x16",
argLen: 3,
generic: true,
},
{
- name: "PermuteMaskedInt16x16",
+ name: "PermuteMaskedUint16x16",
argLen: 3,
generic: true,
},
commutative: true,
generic: true,
},
+ {
+ name: "CompressUint16x32",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint16x32",
argLen: 2,
generic: true,
},
{
- name: "PermuteUint16x32",
+ name: "PermuteInt16x32",
argLen: 2,
generic: true,
},
{
- name: "PermuteInt16x32",
+ name: "PermuteUint16x32",
argLen: 2,
generic: true,
},
generic: true,
},
{
- name: "Permute2MaskedUint16x32",
+ name: "Permute2MaskedInt16x32",
argLen: 4,
generic: true,
},
{
- name: "Permute2MaskedInt16x32",
+ name: "Permute2MaskedUint16x32",
argLen: 4,
generic: true,
},
commutative: true,
generic: true,
},
+ {
+ name: "CompressUint16x8",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint16x8",
argLen: 2,
argLen: 3,
generic: true,
},
+ {
+ name: "CompressUint32x16",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint32x16",
argLen: 2,
generic: true,
},
{
- name: "PermuteUint32x16",
+ name: "PermuteFloat32x16",
argLen: 2,
generic: true,
},
{
- name: "PermuteFloat32x16",
+ name: "PermuteUint32x16",
argLen: 2,
generic: true,
},
{
- name: "Permute2Int32x16",
+ name: "Permute2Uint32x16",
argLen: 3,
generic: true,
},
{
- name: "Permute2Uint32x16",
+ name: "Permute2Float32x16",
argLen: 3,
generic: true,
},
{
- name: "Permute2Float32x16",
+ name: "Permute2Int32x16",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "PermuteMaskedUint32x16",
+ name: "PermuteMaskedFloat32x16",
argLen: 3,
generic: true,
},
{
- name: "PermuteMaskedInt32x16",
+ name: "PermuteMaskedUint32x16",
argLen: 3,
generic: true,
},
{
- name: "PermuteMaskedFloat32x16",
+ name: "PermuteMaskedInt32x16",
argLen: 3,
generic: true,
},
argLen: 3,
generic: true,
},
+ {
+ name: "CompressUint32x4",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint32x4",
argLen: 2,
generic: true,
},
{
- name: "Permute2Float32x4",
+ name: "Permute2Int32x4",
argLen: 3,
generic: true,
},
{
- name: "Permute2Int32x4",
+ name: "Permute2Float32x4",
argLen: 3,
generic: true,
},
{
- name: "Permute2MaskedUint32x4",
+ name: "Permute2MaskedFloat32x4",
argLen: 4,
generic: true,
},
generic: true,
},
{
- name: "Permute2MaskedFloat32x4",
+ name: "Permute2MaskedUint32x4",
argLen: 4,
generic: true,
},
argLen: 3,
generic: true,
},
+ {
+ name: "CompressUint32x8",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint32x8",
argLen: 2,
generic: true,
},
{
- name: "PermuteInt32x8",
+ name: "PermuteUint32x8",
argLen: 2,
generic: true,
},
{
- name: "PermuteFloat32x8",
+ name: "PermuteInt32x8",
argLen: 2,
generic: true,
},
{
- name: "PermuteUint32x8",
+ name: "PermuteFloat32x8",
argLen: 2,
generic: true,
},
generic: true,
},
{
- name: "Permute2MaskedUint32x8",
+ name: "Permute2MaskedInt32x8",
argLen: 4,
generic: true,
},
{
- name: "Permute2MaskedInt32x8",
+ name: "Permute2MaskedUint32x8",
argLen: 4,
generic: true,
},
generic: true,
},
{
- name: "PermuteMaskedFloat32x8",
+ name: "PermuteMaskedUint32x8",
argLen: 3,
generic: true,
},
{
- name: "PermuteMaskedUint32x8",
+ name: "PermuteMaskedFloat32x8",
argLen: 3,
generic: true,
},
argLen: 3,
generic: true,
},
+ {
+ name: "CompressUint64x2",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint64x2",
argLen: 2,
generic: true,
},
{
- name: "Permute2Uint64x2",
+ name: "Permute2Float64x2",
argLen: 3,
generic: true,
},
{
- name: "Permute2Int64x2",
+ name: "Permute2Uint64x2",
argLen: 3,
generic: true,
},
{
- name: "Permute2Float64x2",
+ name: "Permute2Int64x2",
argLen: 3,
generic: true,
},
{
- name: "Permute2MaskedUint64x2",
+ name: "Permute2MaskedInt64x2",
argLen: 4,
generic: true,
},
{
- name: "Permute2MaskedInt64x2",
+ name: "Permute2MaskedUint64x2",
argLen: 4,
generic: true,
},
argLen: 3,
generic: true,
},
+ {
+ name: "CompressUint64x4",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint64x4",
argLen: 2,
generic: true,
},
{
- name: "PermuteUint64x4",
+ name: "PermuteFloat64x4",
argLen: 2,
generic: true,
},
{
- name: "PermuteInt64x4",
+ name: "PermuteUint64x4",
argLen: 2,
generic: true,
},
{
- name: "PermuteFloat64x4",
+ name: "PermuteInt64x4",
argLen: 2,
generic: true,
},
{
- name: "Permute2Uint64x4",
+ name: "Permute2Int64x4",
argLen: 3,
generic: true,
},
{
- name: "Permute2Int64x4",
+ name: "Permute2Uint64x4",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "Permute2MaskedInt64x4",
+ name: "Permute2MaskedFloat64x4",
argLen: 4,
generic: true,
},
generic: true,
},
{
- name: "Permute2MaskedFloat64x4",
+ name: "Permute2MaskedInt64x4",
argLen: 4,
generic: true,
},
generic: true,
},
{
- name: "PermuteMaskedInt64x4",
+ name: "PermuteMaskedUint64x4",
argLen: 3,
generic: true,
},
{
- name: "PermuteMaskedUint64x4",
+ name: "PermuteMaskedInt64x4",
argLen: 3,
generic: true,
},
argLen: 3,
generic: true,
},
+ {
+ name: "CompressUint64x8",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint64x8",
argLen: 2,
generic: true,
},
{
- name: "PermuteUint64x8",
+ name: "PermuteInt64x8",
argLen: 2,
generic: true,
},
{
- name: "PermuteInt64x8",
+ name: "PermuteUint64x8",
argLen: 2,
generic: true,
},
generic: true,
},
{
- name: "Permute2Int64x8",
+ name: "Permute2Uint64x8",
argLen: 3,
generic: true,
},
{
- name: "Permute2Uint64x8",
+ name: "Permute2Float64x8",
argLen: 3,
generic: true,
},
{
- name: "Permute2Float64x8",
+ name: "Permute2Int64x8",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "Permute2MaskedInt64x8",
+ name: "Permute2MaskedFloat64x8",
argLen: 4,
generic: true,
},
{
- name: "Permute2MaskedFloat64x8",
+ name: "Permute2MaskedInt64x8",
argLen: 4,
generic: true,
},
{
- name: "PermuteMaskedFloat64x8",
+ name: "PermuteMaskedUint64x8",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "PermuteMaskedUint64x8",
+ name: "PermuteMaskedFloat64x8",
argLen: 3,
generic: true,
},
commutative: true,
generic: true,
},
+ {
+ name: "CompressUint8x16",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint8x16",
argLen: 2,
generic: true,
},
{
- name: "Permute2Uint8x16",
+ name: "Permute2Int8x16",
argLen: 3,
generic: true,
},
{
- name: "Permute2Int8x16",
+ name: "Permute2Uint8x16",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "PermuteMaskedInt8x16",
+ name: "PermuteMaskedUint8x16",
argLen: 3,
generic: true,
},
{
- name: "PermuteMaskedUint8x16",
+ name: "PermuteMaskedInt8x16",
argLen: 3,
generic: true,
},
commutative: true,
generic: true,
},
+ {
+ name: "CompressUint8x32",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint8x32",
argLen: 2,
generic: true,
},
{
- name: "Permute2MaskedUint8x32",
+ name: "Permute2MaskedInt8x32",
argLen: 4,
generic: true,
},
{
- name: "Permute2MaskedInt8x32",
+ name: "Permute2MaskedUint8x32",
argLen: 4,
generic: true,
},
{
- name: "PermuteMaskedUint8x32",
+ name: "PermuteMaskedInt8x32",
argLen: 3,
generic: true,
},
{
- name: "PermuteMaskedInt8x32",
+ name: "PermuteMaskedUint8x32",
argLen: 3,
generic: true,
},
commutative: true,
generic: true,
},
+ {
+ name: "CompressUint8x64",
+ argLen: 2,
+ generic: true,
+ },
{
name: "EqualUint8x64",
argLen: 2,
generic: true,
},
{
- name: "PermuteUint8x64",
+ name: "PermuteInt8x64",
argLen: 2,
generic: true,
},
{
- name: "PermuteInt8x64",
+ name: "PermuteUint8x64",
argLen: 2,
generic: true,
},
{
- name: "Permute2Int8x64",
+ name: "Permute2Uint8x64",
argLen: 3,
generic: true,
},
{
- name: "Permute2Uint8x64",
+ name: "Permute2Int8x64",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "PermuteMaskedInt8x64",
+ name: "PermuteMaskedUint8x64",
argLen: 3,
generic: true,
},
{
- name: "PermuteMaskedUint8x64",
+ name: "PermuteMaskedInt8x64",
argLen: 3,
generic: true,
},
case OpCom8:
v.Op = OpAMD64NOTL
return true
+ case OpCompressFloat32x16:
+ return rewriteValueAMD64_OpCompressFloat32x16(v)
+ case OpCompressFloat32x4:
+ return rewriteValueAMD64_OpCompressFloat32x4(v)
+ case OpCompressFloat32x8:
+ return rewriteValueAMD64_OpCompressFloat32x8(v)
+ case OpCompressFloat64x2:
+ return rewriteValueAMD64_OpCompressFloat64x2(v)
+ case OpCompressFloat64x4:
+ return rewriteValueAMD64_OpCompressFloat64x4(v)
+ case OpCompressFloat64x8:
+ return rewriteValueAMD64_OpCompressFloat64x8(v)
+ case OpCompressInt16x16:
+ return rewriteValueAMD64_OpCompressInt16x16(v)
+ case OpCompressInt16x32:
+ return rewriteValueAMD64_OpCompressInt16x32(v)
+ case OpCompressInt16x8:
+ return rewriteValueAMD64_OpCompressInt16x8(v)
+ case OpCompressInt32x16:
+ return rewriteValueAMD64_OpCompressInt32x16(v)
+ case OpCompressInt32x4:
+ return rewriteValueAMD64_OpCompressInt32x4(v)
+ case OpCompressInt32x8:
+ return rewriteValueAMD64_OpCompressInt32x8(v)
+ case OpCompressInt64x2:
+ return rewriteValueAMD64_OpCompressInt64x2(v)
+ case OpCompressInt64x4:
+ return rewriteValueAMD64_OpCompressInt64x4(v)
+ case OpCompressInt64x8:
+ return rewriteValueAMD64_OpCompressInt64x8(v)
+ case OpCompressInt8x16:
+ return rewriteValueAMD64_OpCompressInt8x16(v)
+ case OpCompressInt8x32:
+ return rewriteValueAMD64_OpCompressInt8x32(v)
+ case OpCompressInt8x64:
+ return rewriteValueAMD64_OpCompressInt8x64(v)
+ case OpCompressUint16x16:
+ return rewriteValueAMD64_OpCompressUint16x16(v)
+ case OpCompressUint16x32:
+ return rewriteValueAMD64_OpCompressUint16x32(v)
+ case OpCompressUint16x8:
+ return rewriteValueAMD64_OpCompressUint16x8(v)
+ case OpCompressUint32x16:
+ return rewriteValueAMD64_OpCompressUint32x16(v)
+ case OpCompressUint32x4:
+ return rewriteValueAMD64_OpCompressUint32x4(v)
+ case OpCompressUint32x8:
+ return rewriteValueAMD64_OpCompressUint32x8(v)
+ case OpCompressUint64x2:
+ return rewriteValueAMD64_OpCompressUint64x2(v)
+ case OpCompressUint64x4:
+ return rewriteValueAMD64_OpCompressUint64x4(v)
+ case OpCompressUint64x8:
+ return rewriteValueAMD64_OpCompressUint64x8(v)
+ case OpCompressUint8x16:
+ return rewriteValueAMD64_OpCompressUint8x16(v)
+ case OpCompressUint8x32:
+ return rewriteValueAMD64_OpCompressUint8x32(v)
+ case OpCompressUint8x64:
+ return rewriteValueAMD64_OpCompressUint8x64(v)
case OpCondSelect:
return rewriteValueAMD64_OpCondSelect(v)
case OpConst16:
return true
}
}
+func rewriteValueAMD64_OpCompressFloat32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressFloat32x16 x mask)
+ // result: (VCOMPRESSPSMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VCOMPRESSPSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressFloat32x4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressFloat32x4 x mask)
+ // result: (VCOMPRESSPSMasked128 x (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VCOMPRESSPSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressFloat32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressFloat32x8 x mask)
+ // result: (VCOMPRESSPSMasked256 x (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VCOMPRESSPSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressFloat64x2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressFloat64x2 x mask)
+ // result: (VCOMPRESSPDMasked128 x (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VCOMPRESSPDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressFloat64x4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressFloat64x4 x mask)
+ // result: (VCOMPRESSPDMasked256 x (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VCOMPRESSPDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressFloat64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressFloat64x8 x mask)
+ // result: (VCOMPRESSPDMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VCOMPRESSPDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt16x16 x mask)
+ // result: (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSWMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt16x32 x mask)
+ // result: (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSWMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt16x8 x mask)
+ // result: (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSWMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt32x16 x mask)
+ // result: (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt32x4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt32x4 x mask)
+ // result: (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt32x8 x mask)
+ // result: (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt64x2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt64x2 x mask)
+ // result: (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSQMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt64x4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt64x4 x mask)
+ // result: (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSQMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt64x8 x mask)
+ // result: (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSQMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt8x16 x mask)
+ // result: (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSBMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt8x32 x mask)
+ // result: (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSBMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressInt8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressInt8x64 x mask)
+ // result: (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSBMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint16x16 x mask)
+ // result: (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSWMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint16x32 x mask)
+ // result: (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSWMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint16x8 x mask)
+ // result: (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSWMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint32x16 x mask)
+ // result: (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint32x4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint32x4 x mask)
+ // result: (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint32x8 x mask)
+ // result: (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint64x2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint64x2 x mask)
+ // result: (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSQMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint64x4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint64x4 x mask)
+ // result: (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSQMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint64x8 x mask)
+ // result: (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSQMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint8x16 x mask)
+ // result: (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSBMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint8x32 x mask)
+ // result: (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSBMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCompressUint8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CompressUint8x64 x mask)
+ // result: (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ mask := v_1
+ v.reset(OpAMD64VPCOMPRESSBMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
func rewriteValueAMD64_OpCondSelect(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
addF(simdPackage, "Float64x2.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float64x4.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float64x8.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64)
+ addF(simdPackage, "Float32x4.Compress", opLen2(ssa.OpCompressFloat32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x8.Compress", opLen2(ssa.OpCompressFloat32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x16.Compress", opLen2(ssa.OpCompressFloat32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x2.Compress", opLen2(ssa.OpCompressFloat64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x4.Compress", opLen2(ssa.OpCompressFloat64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x8.Compress", opLen2(ssa.OpCompressFloat64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Int8x16.Compress", opLen2(ssa.OpCompressInt8x16, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Int8x32.Compress", opLen2(ssa.OpCompressInt8x32, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Int8x64.Compress", opLen2(ssa.OpCompressInt8x64, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Int16x8.Compress", opLen2(ssa.OpCompressInt16x8, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Int16x16.Compress", opLen2(ssa.OpCompressInt16x16, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Int16x32.Compress", opLen2(ssa.OpCompressInt16x32, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Int32x4.Compress", opLen2(ssa.OpCompressInt32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Int32x8.Compress", opLen2(ssa.OpCompressInt32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Int32x16.Compress", opLen2(ssa.OpCompressInt32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Int64x2.Compress", opLen2(ssa.OpCompressInt64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Int64x4.Compress", opLen2(ssa.OpCompressInt64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Int64x8.Compress", opLen2(ssa.OpCompressInt64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Uint8x16.Compress", opLen2(ssa.OpCompressUint8x16, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Uint8x32.Compress", opLen2(ssa.OpCompressUint8x32, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Uint8x64.Compress", opLen2(ssa.OpCompressUint8x64, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Uint16x8.Compress", opLen2(ssa.OpCompressUint16x8, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Uint16x16.Compress", opLen2(ssa.OpCompressUint16x16, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Uint16x32.Compress", opLen2(ssa.OpCompressUint16x32, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Uint32x4.Compress", opLen2(ssa.OpCompressUint32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Uint32x8.Compress", opLen2(ssa.OpCompressUint32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Uint32x16.Compress", opLen2(ssa.OpCompressUint32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Uint64x2.Compress", opLen2(ssa.OpCompressUint64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Uint64x4.Compress", opLen2(ssa.OpCompressUint64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Uint64x8.Compress", opLen2(ssa.OpCompressUint64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x8) CeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8
+/* Compress */
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPS, CPU Feature: AVX512F
+func (x Float32x4) Compress(mask Mask32x4) Float32x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPS, CPU Feature: AVX512F
+func (x Float32x8) Compress(mask Mask32x8) Float32x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPS, CPU Feature: AVX512F
+func (x Float32x16) Compress(mask Mask32x16) Float32x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPD, CPU Feature: AVX512F
+func (x Float64x2) Compress(mask Mask64x2) Float64x2
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPD, CPU Feature: AVX512F
+func (x Float64x4) Compress(mask Mask64x4) Float64x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VCOMPRESSPD, CPU Feature: AVX512F
+func (x Float64x8) Compress(mask Mask64x8) Float64x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Int8x16) Compress(mask Mask8x16) Int8x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Int8x32) Compress(mask Mask8x32) Int8x32
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Int8x64) Compress(mask Mask8x64) Int8x64
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Int16x8) Compress(mask Mask16x8) Int16x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Int16x16) Compress(mask Mask16x16) Int16x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Int16x32) Compress(mask Mask16x32) Int16x32
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Int32x4) Compress(mask Mask32x4) Int32x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Int32x8) Compress(mask Mask32x8) Int32x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Int32x16) Compress(mask Mask32x16) Int32x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Int64x2) Compress(mask Mask64x2) Int64x2
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Int64x4) Compress(mask Mask64x4) Int64x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Int64x8) Compress(mask Mask64x8) Int64x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Uint8x16) Compress(mask Mask8x16) Uint8x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Uint8x32) Compress(mask Mask8x32) Uint8x32
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2
+func (x Uint8x64) Compress(mask Mask8x64) Uint8x64
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Uint16x8) Compress(mask Mask16x8) Uint16x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Uint16x16) Compress(mask Mask16x16) Uint16x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2
+func (x Uint16x32) Compress(mask Mask16x32) Uint16x32
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Uint32x4) Compress(mask Mask32x4) Uint32x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Uint32x8) Compress(mask Mask32x8) Uint32x8
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSD, CPU Feature: AVX512F
+func (x Uint32x16) Compress(mask Mask32x16) Uint32x16
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Uint64x2) Compress(mask Mask64x2) Uint64x2
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Uint64x4) Compress(mask Mask64x4) Uint64x4
+
+// Compress performs a compression on vector x using mask by
+// selecting elements as indicated by mask, and pack them to lower indexed elements.
+//
+// Asm: VPCOMPRESSQ, CPU Feature: AVX512F
+func (x Uint64x8) Compress(mask Mask64x8) Uint64x8
+
/* DiffWithCeilWithPrecision */
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
}
}
+func TestCompress(t *testing.T) {
+ if !simd.HasAVX512() {
+ t.Skip("Test requires HasAVX512, not available on this hardware")
+ return
+ }
+ testInt32x4Mask32x4Int32x4(t, []int32{1, 2, 3, 4},
+ []int32{0, -1, 0, -1},
+ []int32{2, 4, 0, 0}, "Compress")
+}
+
// checkInt8Slices ensures that b and a are equal, to the end of b.
// also serves to use the slices, to prevent accidental optimization.
func checkInt8Slices(t *testing.T, a, b []int8) {
}
}
+func testFloat32x4Mask32x4Float32x4(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) {
+ t.Helper()
+ var gotv simd.Float32x4
+ got := make([]float32, len(want))
+ vec0 := simd.LoadFloat32x4Slice(v0)
+ vec1 := simd.LoadInt32x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x4())
+
+ default:
+ t.Errorf("Unknown method: Float32x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat32x4MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x4
}
}
+func testFloat32x8Mask32x8Float32x8(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) {
+ t.Helper()
+ var gotv simd.Float32x8
+ got := make([]float32, len(want))
+ vec0 := simd.LoadFloat32x8Slice(v0)
+ vec1 := simd.LoadInt32x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x8())
+
+ default:
+ t.Errorf("Unknown method: Float32x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat32x8MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x8
}
}
+func testFloat32x16Mask32x16Float32x16(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) {
+ t.Helper()
+ var gotv simd.Float32x16
+ got := make([]float32, len(want))
+ vec0 := simd.LoadFloat32x16Slice(v0)
+ vec1 := simd.LoadInt32x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x16())
+
+ default:
+ t.Errorf("Unknown method: Float32x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x16
}
}
+func testFloat64x2Mask64x2Float64x2(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) {
+ t.Helper()
+ var gotv simd.Float64x2
+ got := make([]float64, len(want))
+ vec0 := simd.LoadFloat64x2Slice(v0)
+ vec1 := simd.LoadInt64x2Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x2())
+
+ default:
+ t.Errorf("Unknown method: Float64x2.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat64x2MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x2
}
}
+func testFloat64x4Mask64x4Float64x4(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) {
+ t.Helper()
+ var gotv simd.Float64x4
+ got := make([]float64, len(want))
+ vec0 := simd.LoadFloat64x4Slice(v0)
+ vec1 := simd.LoadInt64x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x4())
+
+ default:
+ t.Errorf("Unknown method: Float64x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat64x4MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x4
}
}
+func testFloat64x8Mask64x8Float64x8(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) {
+ t.Helper()
+ var gotv simd.Float64x8
+ got := make([]float64, len(want))
+ vec0 := simd.LoadFloat64x8Slice(v0)
+ vec1 := simd.LoadInt64x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x8())
+
+ default:
+ t.Errorf("Unknown method: Float64x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testFloat64x8MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x8
}
}
+func testInt8x16Mask8x16Int8x16(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) {
+ t.Helper()
+ var gotv simd.Int8x16
+ got := make([]int8, len(want))
+ vec0 := simd.LoadInt8x16Slice(v0)
+ vec1 := simd.LoadInt8x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x16())
+
+ default:
+ t.Errorf("Unknown method: Int8x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x16
}
}
+func testInt8x32Mask8x32Int8x32(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) {
+ t.Helper()
+ var gotv simd.Int8x32
+ got := make([]int8, len(want))
+ vec0 := simd.LoadInt8x32Slice(v0)
+ vec1 := simd.LoadInt8x32Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x32())
+
+ default:
+ t.Errorf("Unknown method: Int8x32.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x32
}
}
+func testInt8x64Mask8x64Int8x64(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) {
+ t.Helper()
+ var gotv simd.Int8x64
+ got := make([]int8, len(want))
+ vec0 := simd.LoadInt8x64Slice(v0)
+ vec1 := simd.LoadInt8x64Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x64())
+
+ default:
+ t.Errorf("Unknown method: Int8x64.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x64
}
}
+func testInt16x8Mask16x8Int16x8(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) {
+ t.Helper()
+ var gotv simd.Int16x8
+ got := make([]int16, len(want))
+ vec0 := simd.LoadInt16x8Slice(v0)
+ vec1 := simd.LoadInt16x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x8())
+
+ default:
+ t.Errorf("Unknown method: Int16x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x8
}
}
+func testInt16x16Mask16x16Int16x16(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) {
+ t.Helper()
+ var gotv simd.Int16x16
+ got := make([]int16, len(want))
+ vec0 := simd.LoadInt16x16Slice(v0)
+ vec1 := simd.LoadInt16x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x16())
+
+ default:
+ t.Errorf("Unknown method: Int16x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x16
}
}
+func testInt16x32Mask16x32Int16x32(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) {
+ t.Helper()
+ var gotv simd.Int16x32
+ got := make([]int16, len(want))
+ vec0 := simd.LoadInt16x32Slice(v0)
+ vec1 := simd.LoadInt16x32Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x32())
+
+ default:
+ t.Errorf("Unknown method: Int16x32.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x32
}
}
+func testInt32x4Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) {
+ t.Helper()
+ var gotv simd.Int32x4
+ got := make([]int32, len(want))
+ vec0 := simd.LoadInt32x4Slice(v0)
+ vec1 := simd.LoadInt32x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x4())
+
+ default:
+ t.Errorf("Unknown method: Int32x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x4
}
}
+func testInt32x8Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) {
+ t.Helper()
+ var gotv simd.Int32x8
+ got := make([]int32, len(want))
+ vec0 := simd.LoadInt32x8Slice(v0)
+ vec1 := simd.LoadInt32x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x8())
+
+ default:
+ t.Errorf("Unknown method: Int32x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x8
}
}
-func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) {
+func testInt32x16Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x16
got := make([]int32, len(want))
vec0 := simd.LoadInt32x16Slice(v0)
vec1 := simd.LoadInt32x16Slice(v1)
- vec2 := simd.LoadInt32x16Slice(v2)
switch which {
- case "EqualMasked":
- gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16()
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x16())
+
+ default:
+ t.Errorf("Unknown method: Int32x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
+func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) {
+ t.Helper()
+ var gotv simd.Int32x16
+ got := make([]int32, len(want))
+ vec0 := simd.LoadInt32x16Slice(v0)
+ vec1 := simd.LoadInt32x16Slice(v1)
+ vec2 := simd.LoadInt32x16Slice(v2)
+ switch which {
+ case "EqualMasked":
+ gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16()
case "GreaterEqualMasked":
gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16()
case "GreaterMasked":
}
}
+func testInt64x2Mask64x2Int64x2(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) {
+ t.Helper()
+ var gotv simd.Int64x2
+ got := make([]int64, len(want))
+ vec0 := simd.LoadInt64x2Slice(v0)
+ vec1 := simd.LoadInt64x2Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x2())
+
+ default:
+ t.Errorf("Unknown method: Int64x2.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x2
}
}
+func testInt64x4Mask64x4Int64x4(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) {
+ t.Helper()
+ var gotv simd.Int64x4
+ got := make([]int64, len(want))
+ vec0 := simd.LoadInt64x4Slice(v0)
+ vec1 := simd.LoadInt64x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x4())
+
+ default:
+ t.Errorf("Unknown method: Int64x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x4
}
}
+func testInt64x8Mask64x8Int64x8(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) {
+ t.Helper()
+ var gotv simd.Int64x8
+ got := make([]int64, len(want))
+ vec0 := simd.LoadInt64x8Slice(v0)
+ vec1 := simd.LoadInt64x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x8())
+
+ default:
+ t.Errorf("Unknown method: Int64x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x8
}
}
+func testUint8x16Mask8x16Uint8x16(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) {
+ t.Helper()
+ var gotv simd.Uint8x16
+ got := make([]uint8, len(want))
+ vec0 := simd.LoadUint8x16Slice(v0)
+ vec1 := simd.LoadInt8x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x16())
+
+ default:
+ t.Errorf("Unknown method: Uint8x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x16
}
}
+func testUint8x32Mask8x32Uint8x32(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) {
+ t.Helper()
+ var gotv simd.Uint8x32
+ got := make([]uint8, len(want))
+ vec0 := simd.LoadUint8x32Slice(v0)
+ vec1 := simd.LoadInt8x32Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x32())
+
+ default:
+ t.Errorf("Unknown method: Uint8x32.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x32
}
}
+func testUint8x64Mask8x64Uint8x64(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) {
+ t.Helper()
+ var gotv simd.Uint8x64
+ got := make([]uint8, len(want))
+ vec0 := simd.LoadUint8x64Slice(v0)
+ vec1 := simd.LoadInt8x64Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask8x64())
+
+ default:
+ t.Errorf("Unknown method: Uint8x64.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) {
t.Helper()
var gotv simd.Int8x64
}
}
+func testUint16x8Mask16x8Uint16x8(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) {
+ t.Helper()
+ var gotv simd.Uint16x8
+ got := make([]uint16, len(want))
+ vec0 := simd.LoadUint16x8Slice(v0)
+ vec1 := simd.LoadInt16x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x8())
+
+ default:
+ t.Errorf("Unknown method: Uint16x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x8
}
}
+func testUint16x16Mask16x16Uint16x16(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) {
+ t.Helper()
+ var gotv simd.Uint16x16
+ got := make([]uint16, len(want))
+ vec0 := simd.LoadUint16x16Slice(v0)
+ vec1 := simd.LoadInt16x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x16())
+
+ default:
+ t.Errorf("Unknown method: Uint16x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x16
}
}
+func testUint16x32Mask16x32Uint16x32(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) {
+ t.Helper()
+ var gotv simd.Uint16x32
+ got := make([]uint16, len(want))
+ vec0 := simd.LoadUint16x32Slice(v0)
+ vec1 := simd.LoadInt16x32Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask16x32())
+
+ default:
+ t.Errorf("Unknown method: Uint16x32.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) {
t.Helper()
var gotv simd.Int16x32
}
}
+func testUint32x4Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) {
+ t.Helper()
+ var gotv simd.Uint32x4
+ got := make([]uint32, len(want))
+ vec0 := simd.LoadUint32x4Slice(v0)
+ vec1 := simd.LoadInt32x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x4())
+
+ default:
+ t.Errorf("Unknown method: Uint32x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x4
}
}
+func testUint32x8Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) {
+ t.Helper()
+ var gotv simd.Uint32x8
+ got := make([]uint32, len(want))
+ vec0 := simd.LoadUint32x8Slice(v0)
+ vec1 := simd.LoadInt32x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x8())
+
+ default:
+ t.Errorf("Unknown method: Uint32x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x8
}
}
+func testUint32x16Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) {
+ t.Helper()
+ var gotv simd.Uint32x16
+ got := make([]uint32, len(want))
+ vec0 := simd.LoadUint32x16Slice(v0)
+ vec1 := simd.LoadInt32x16Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask32x16())
+
+ default:
+ t.Errorf("Unknown method: Uint32x16.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) {
t.Helper()
var gotv simd.Int32x16
}
}
+func testUint64x2Mask64x2Uint64x2(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) {
+ t.Helper()
+ var gotv simd.Uint64x2
+ got := make([]uint64, len(want))
+ vec0 := simd.LoadUint64x2Slice(v0)
+ vec1 := simd.LoadInt64x2Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x2())
+
+ default:
+ t.Errorf("Unknown method: Uint64x2.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x2
}
}
+func testUint64x4Mask64x4Uint64x4(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) {
+ t.Helper()
+ var gotv simd.Uint64x4
+ got := make([]uint64, len(want))
+ vec0 := simd.LoadUint64x4Slice(v0)
+ vec1 := simd.LoadInt64x4Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x4())
+
+ default:
+ t.Errorf("Unknown method: Uint64x4.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x4
}
}
+func testUint64x8Mask64x8Uint64x8(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) {
+ t.Helper()
+ var gotv simd.Uint64x8
+ got := make([]uint64, len(want))
+ vec0 := simd.LoadUint64x8Slice(v0)
+ vec1 := simd.LoadInt64x8Slice(v1)
+ switch which {
+ case "Compress":
+ gotv = vec0.Compress(vec1.AsMask64x8())
+
+ default:
+ t.Errorf("Unknown method: Uint64x8.%s", which)
+ }
+ gotv.StoreSlice(got)
+ for i := range len(want) {
+ if got[i] != want[i] {
+ t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i])
+ }
+ }
+}
+
func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) {
t.Helper()
var gotv simd.Int64x8