This CL is generated by CL 686555.
Change-Id: I0efb86a919692cd97c1c5b6365d77361a30bf7cf
Reviewed-on: https://go-review.googlesource.com/c/go/+/686496
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: David Chase <drchase@google.com>
ssa.OpAMD64VADDSUBPS256,
ssa.OpAMD64VADDSUBPD128,
ssa.OpAMD64VADDSUBPD256,
- ssa.OpAMD64VANDPS128,
- ssa.OpAMD64VANDPS256,
- ssa.OpAMD64VANDPS512,
- ssa.OpAMD64VANDPD128,
- ssa.OpAMD64VANDPD256,
- ssa.OpAMD64VANDPD512,
ssa.OpAMD64VPAND128,
ssa.OpAMD64VPAND256,
ssa.OpAMD64VPANDD512,
ssa.OpAMD64VPANDQ512,
- ssa.OpAMD64VANDNPS128,
- ssa.OpAMD64VANDNPS256,
- ssa.OpAMD64VANDNPS512,
- ssa.OpAMD64VANDNPD128,
- ssa.OpAMD64VANDNPD256,
- ssa.OpAMD64VANDNPD512,
ssa.OpAMD64VPANDN128,
ssa.OpAMD64VPANDN256,
ssa.OpAMD64VPANDND512,
ssa.OpAMD64VPMULLQ128,
ssa.OpAMD64VPMULLQ256,
ssa.OpAMD64VPMULLQ512,
- ssa.OpAMD64VORPS128,
- ssa.OpAMD64VORPS256,
- ssa.OpAMD64VORPS512,
- ssa.OpAMD64VORPD128,
- ssa.OpAMD64VORPD256,
- ssa.OpAMD64VORPD512,
ssa.OpAMD64VPOR128,
ssa.OpAMD64VPOR256,
ssa.OpAMD64VPORD512,
ssa.OpAMD64VPSUBQ128,
ssa.OpAMD64VPSUBQ256,
ssa.OpAMD64VPSUBQ512,
- ssa.OpAMD64VXORPS128,
- ssa.OpAMD64VXORPS256,
- ssa.OpAMD64VXORPS512,
- ssa.OpAMD64VXORPD128,
- ssa.OpAMD64VXORPD256,
- ssa.OpAMD64VXORPD512,
ssa.OpAMD64VPXOR128,
ssa.OpAMD64VPXOR256,
ssa.OpAMD64VPXORD512,
ssa.OpAMD64VPADDQMasked128,
ssa.OpAMD64VPADDQMasked256,
ssa.OpAMD64VPADDQMasked512,
- ssa.OpAMD64VANDPSMasked128,
- ssa.OpAMD64VANDPSMasked256,
- ssa.OpAMD64VANDPSMasked512,
- ssa.OpAMD64VANDPDMasked128,
- ssa.OpAMD64VANDPDMasked256,
- ssa.OpAMD64VANDPDMasked512,
ssa.OpAMD64VPANDDMasked128,
ssa.OpAMD64VPANDDMasked256,
ssa.OpAMD64VPANDDMasked512,
ssa.OpAMD64VPANDQMasked128,
ssa.OpAMD64VPANDQMasked256,
ssa.OpAMD64VPANDQMasked512,
- ssa.OpAMD64VANDNPSMasked128,
- ssa.OpAMD64VANDNPSMasked256,
- ssa.OpAMD64VANDNPSMasked512,
- ssa.OpAMD64VANDNPDMasked128,
- ssa.OpAMD64VANDNPDMasked256,
- ssa.OpAMD64VANDNPDMasked512,
ssa.OpAMD64VPANDNDMasked128,
ssa.OpAMD64VPANDNDMasked256,
ssa.OpAMD64VPANDNDMasked512,
ssa.OpAMD64VPMULLQMasked128,
ssa.OpAMD64VPMULLQMasked256,
ssa.OpAMD64VPMULLQMasked512,
- ssa.OpAMD64VORPSMasked128,
- ssa.OpAMD64VORPSMasked256,
- ssa.OpAMD64VORPSMasked512,
- ssa.OpAMD64VORPDMasked128,
- ssa.OpAMD64VORPDMasked256,
- ssa.OpAMD64VORPDMasked512,
ssa.OpAMD64VPORDMasked128,
ssa.OpAMD64VPORDMasked256,
ssa.OpAMD64VPORDMasked512,
ssa.OpAMD64VPSUBQMasked128,
ssa.OpAMD64VPSUBQMasked256,
ssa.OpAMD64VPSUBQMasked512,
- ssa.OpAMD64VXORPSMasked128,
- ssa.OpAMD64VXORPSMasked256,
- ssa.OpAMD64VXORPSMasked512,
- ssa.OpAMD64VXORPDMasked128,
- ssa.OpAMD64VXORPDMasked256,
- ssa.OpAMD64VXORPDMasked512,
ssa.OpAMD64VPXORDMasked128,
ssa.OpAMD64VPXORDMasked256,
ssa.OpAMD64VPXORDMasked512,
ssa.OpAMD64VPADDQMasked128,
ssa.OpAMD64VPADDQMasked256,
ssa.OpAMD64VPADDQMasked512,
- ssa.OpAMD64VANDPSMasked128,
- ssa.OpAMD64VANDPSMasked256,
- ssa.OpAMD64VANDPSMasked512,
- ssa.OpAMD64VANDPDMasked128,
- ssa.OpAMD64VANDPDMasked256,
- ssa.OpAMD64VANDPDMasked512,
ssa.OpAMD64VPANDDMasked128,
ssa.OpAMD64VPANDDMasked256,
ssa.OpAMD64VPANDDMasked512,
ssa.OpAMD64VPANDQMasked128,
ssa.OpAMD64VPANDQMasked256,
ssa.OpAMD64VPANDQMasked512,
- ssa.OpAMD64VANDNPSMasked128,
- ssa.OpAMD64VANDNPSMasked256,
- ssa.OpAMD64VANDNPSMasked512,
- ssa.OpAMD64VANDNPDMasked128,
- ssa.OpAMD64VANDNPDMasked256,
- ssa.OpAMD64VANDNPDMasked512,
ssa.OpAMD64VPANDNDMasked128,
ssa.OpAMD64VPANDNDMasked256,
ssa.OpAMD64VPANDNDMasked512,
ssa.OpAMD64VPMULLQMasked128,
ssa.OpAMD64VPMULLQMasked256,
ssa.OpAMD64VPMULLQMasked512,
- ssa.OpAMD64VORPSMasked128,
- ssa.OpAMD64VORPSMasked256,
- ssa.OpAMD64VORPSMasked512,
- ssa.OpAMD64VORPDMasked128,
- ssa.OpAMD64VORPDMasked256,
- ssa.OpAMD64VORPDMasked512,
ssa.OpAMD64VPORDMasked128,
ssa.OpAMD64VPORDMasked256,
ssa.OpAMD64VPORDMasked512,
ssa.OpAMD64VPDPBUSDMasked128,
ssa.OpAMD64VPDPBUSDMasked256,
ssa.OpAMD64VPDPBUSDMasked512,
- ssa.OpAMD64VXORPSMasked128,
- ssa.OpAMD64VXORPSMasked256,
- ssa.OpAMD64VXORPSMasked512,
- ssa.OpAMD64VXORPDMasked128,
- ssa.OpAMD64VXORPDMasked256,
- ssa.OpAMD64VXORPDMasked512,
ssa.OpAMD64VPXORDMasked128,
ssa.OpAMD64VPXORDMasked256,
ssa.OpAMD64VPXORDMasked512,
(AddSubFloat32x8 ...) => (VADDSUBPS256 ...)
(AddSubFloat64x2 ...) => (VADDSUBPD128 ...)
(AddSubFloat64x4 ...) => (VADDSUBPD256 ...)
-(AndFloat32x4 ...) => (VANDPS128 ...)
-(AndFloat32x8 ...) => (VANDPS256 ...)
-(AndFloat32x16 ...) => (VANDPS512 ...)
-(AndFloat64x2 ...) => (VANDPD128 ...)
-(AndFloat64x4 ...) => (VANDPD256 ...)
-(AndFloat64x8 ...) => (VANDPD512 ...)
(AndInt8x16 ...) => (VPAND128 ...)
(AndInt8x32 ...) => (VPAND256 ...)
(AndInt16x8 ...) => (VPAND128 ...)
(AndUint64x2 ...) => (VPAND128 ...)
(AndUint64x4 ...) => (VPAND256 ...)
(AndUint64x8 ...) => (VPANDQ512 ...)
-(AndNotFloat32x4 ...) => (VANDNPS128 ...)
-(AndNotFloat32x8 ...) => (VANDNPS256 ...)
-(AndNotFloat32x16 ...) => (VANDNPS512 ...)
-(AndNotFloat64x2 ...) => (VANDNPD128 ...)
-(AndNotFloat64x4 ...) => (VANDNPD256 ...)
-(AndNotFloat64x8 ...) => (VANDNPD512 ...)
(AndNotInt8x16 ...) => (VPANDN128 ...)
(AndNotInt8x32 ...) => (VPANDN256 ...)
(AndNotInt16x8 ...) => (VPANDN128 ...)
(MaskedAddUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
(MaskedAddUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
(MaskedAddUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedAndInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedAndInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
(MaskedAndUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
(MaskedAndUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
(MaskedAndUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
(MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
(MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
(MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
-(MaskedOrFloat32x4 x y mask) => (VORPSMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedOrFloat32x8 x y mask) => (VORPSMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedOrFloat64x2 x y mask) => (VORPDMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedOrFloat64x4 x y mask) => (VORPDMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedOrFloat64x8 x y mask) => (VORPDMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedOrInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedOrInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
(MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedXorInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedXorInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
(NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y))
(NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y))
(NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y))
-(OrFloat32x4 ...) => (VORPS128 ...)
-(OrFloat32x8 ...) => (VORPS256 ...)
-(OrFloat32x16 ...) => (VORPS512 ...)
-(OrFloat64x2 ...) => (VORPD128 ...)
-(OrFloat64x4 ...) => (VORPD256 ...)
-(OrFloat64x8 ...) => (VORPD512 ...)
(OrInt8x16 ...) => (VPOR128 ...)
(OrInt8x32 ...) => (VPOR256 ...)
(OrInt16x8 ...) => (VPOR128 ...)
(UnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSD128 ...)
(UnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSD256 ...)
(UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...)
-(XorFloat32x4 ...) => (VXORPS128 ...)
-(XorFloat32x8 ...) => (VXORPS256 ...)
-(XorFloat32x16 ...) => (VXORPS512 ...)
-(XorFloat64x2 ...) => (VXORPD128 ...)
-(XorFloat64x4 ...) => (VXORPD256 ...)
-(XorFloat64x8 ...) => (VXORPD512 ...)
(XorInt8x16 ...) => (VPXOR128 ...)
(XorInt8x32 ...) => (VPXOR256 ...)
(XorInt16x8 ...) => (VPXOR128 ...)
func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, fpgp regInfo) []opData {
return []opData{
{name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false},
- {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false},
- {name: "VANDNPS512", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VADDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false},
- {name: "VANDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false},
- {name: "VANDNPSMasked512", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRCP14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPSMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VMINPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMULPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VSCALEFPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VORPSMasked512", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VSQRTPSMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VSUBPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VXORPSMasked512", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VORPS512", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VSUBPS512", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VXORPS512", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VANDPS128", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false},
- {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VADDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false},
- {name: "VANDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false},
- {name: "VANDNPSMasked128", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPSMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VMINPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMULPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VSCALEFPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VORPSMasked128", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VSQRTPSMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VSUBPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VXORPSMasked128", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VSCALEFPS128", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VORPS128", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VSUBPS128", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VXORPS128", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VANDPS256", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false},
- {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VADDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false},
- {name: "VANDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false},
- {name: "VANDNPSMasked256", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPSMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VMINPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMULPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VSCALEFPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VORPSMasked256", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VSQRTPSMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VSUBPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VXORPSMasked256", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VSCALEFPS256", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VORPS256", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VSUBPS256", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VXORPS256", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VANDPD128", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false},
- {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VADDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false},
- {name: "VANDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false},
- {name: "VANDNPDMasked128", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPDMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VMINPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMULPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VSCALEFPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VORPDMasked128", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VSQRTPDMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VSUBPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VXORPDMasked128", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VSCALEFPD128", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VORPD128", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VSUBPD128", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VXORPD128", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VANDPD256", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false},
- {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VADDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false},
- {name: "VANDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false},
- {name: "VANDNPDMasked256", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPDMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VMINPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMULPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VSCALEFPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VORPDMasked256", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VSQRTPDMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VSUBPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VXORPDMasked256", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VSCALEFPD256", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VORPD256", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VSUBPD256", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VXORPD256", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false},
- {name: "VANDPD512", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false},
- {name: "VANDNPD512", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VADDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false},
- {name: "VANDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false},
- {name: "VANDNPDMasked512", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRCP14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPDMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VMINPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMULPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VSCALEFPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VORPDMasked512", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VSQRTPDMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VSUBPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VXORPDMasked512", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VORPD512", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VSUBPD512", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VXORPD512", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false},
func simdGenericOps() []opData {
return []opData{
{name: "AddFloat32x16", argLength: 2, commutative: true},
- {name: "AndFloat32x16", argLength: 2, commutative: true},
- {name: "AndNotFloat32x16", argLength: 2, commutative: false},
{name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false},
{name: "DivFloat32x16", argLength: 2, commutative: false},
{name: "LessFloat32x16", argLength: 2, commutative: false},
{name: "LessEqualFloat32x16", argLength: 2, commutative: false},
{name: "MaskedAddFloat32x16", argLength: 3, commutative: true},
- {name: "MaskedAndFloat32x16", argLength: 3, commutative: true},
- {name: "MaskedAndNotFloat32x16", argLength: 3, commutative: false},
{name: "MaskedApproximateReciprocalFloat32x16", argLength: 2, commutative: false},
{name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false},
{name: "MaskedDivFloat32x16", argLength: 3, commutative: false},
{name: "MaskedMulFloat32x16", argLength: 3, commutative: true},
{name: "MaskedMulByPowOf2Float32x16", argLength: 3, commutative: false},
{name: "MaskedNotEqualFloat32x16", argLength: 3, commutative: true},
- {name: "MaskedOrFloat32x16", argLength: 3, commutative: true},
{name: "MaskedSqrtFloat32x16", argLength: 2, commutative: false},
{name: "MaskedSubFloat32x16", argLength: 3, commutative: false},
- {name: "MaskedXorFloat32x16", argLength: 3, commutative: true},
{name: "MaxFloat32x16", argLength: 2, commutative: true},
{name: "MinFloat32x16", argLength: 2, commutative: true},
{name: "MulFloat32x16", argLength: 2, commutative: true},
{name: "MulByPowOf2Float32x16", argLength: 2, commutative: false},
{name: "NotEqualFloat32x16", argLength: 2, commutative: true},
- {name: "OrFloat32x16", argLength: 2, commutative: true},
{name: "SqrtFloat32x16", argLength: 1, commutative: false},
{name: "SubFloat32x16", argLength: 2, commutative: false},
- {name: "XorFloat32x16", argLength: 2, commutative: true},
{name: "AddFloat32x4", argLength: 2, commutative: true},
{name: "AddSubFloat32x4", argLength: 2, commutative: false},
- {name: "AndFloat32x4", argLength: 2, commutative: true},
- {name: "AndNotFloat32x4", argLength: 2, commutative: false},
{name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false},
{name: "CeilFloat32x4", argLength: 1, commutative: false},
{name: "LessFloat32x4", argLength: 2, commutative: false},
{name: "LessEqualFloat32x4", argLength: 2, commutative: false},
{name: "MaskedAddFloat32x4", argLength: 3, commutative: true},
- {name: "MaskedAndFloat32x4", argLength: 3, commutative: true},
- {name: "MaskedAndNotFloat32x4", argLength: 3, commutative: false},
{name: "MaskedApproximateReciprocalFloat32x4", argLength: 2, commutative: false},
{name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false},
{name: "MaskedDivFloat32x4", argLength: 3, commutative: false},
{name: "MaskedMulFloat32x4", argLength: 3, commutative: true},
{name: "MaskedMulByPowOf2Float32x4", argLength: 3, commutative: false},
{name: "MaskedNotEqualFloat32x4", argLength: 3, commutative: true},
- {name: "MaskedOrFloat32x4", argLength: 3, commutative: true},
{name: "MaskedSqrtFloat32x4", argLength: 2, commutative: false},
{name: "MaskedSubFloat32x4", argLength: 3, commutative: false},
- {name: "MaskedXorFloat32x4", argLength: 3, commutative: true},
{name: "MaxFloat32x4", argLength: 2, commutative: true},
{name: "MinFloat32x4", argLength: 2, commutative: true},
{name: "MulFloat32x4", argLength: 2, commutative: true},
{name: "MulByPowOf2Float32x4", argLength: 2, commutative: false},
{name: "NotEqualFloat32x4", argLength: 2, commutative: true},
- {name: "OrFloat32x4", argLength: 2, commutative: true},
{name: "PairwiseAddFloat32x4", argLength: 2, commutative: false},
{name: "PairwiseSubFloat32x4", argLength: 2, commutative: false},
{name: "RoundFloat32x4", argLength: 1, commutative: false},
{name: "SqrtFloat32x4", argLength: 1, commutative: false},
{name: "SubFloat32x4", argLength: 2, commutative: false},
{name: "TruncFloat32x4", argLength: 1, commutative: false},
- {name: "XorFloat32x4", argLength: 2, commutative: true},
{name: "AddFloat32x8", argLength: 2, commutative: true},
{name: "AddSubFloat32x8", argLength: 2, commutative: false},
- {name: "AndFloat32x8", argLength: 2, commutative: true},
- {name: "AndNotFloat32x8", argLength: 2, commutative: false},
{name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false},
{name: "CeilFloat32x8", argLength: 1, commutative: false},
{name: "LessFloat32x8", argLength: 2, commutative: false},
{name: "LessEqualFloat32x8", argLength: 2, commutative: false},
{name: "MaskedAddFloat32x8", argLength: 3, commutative: true},
- {name: "MaskedAndFloat32x8", argLength: 3, commutative: true},
- {name: "MaskedAndNotFloat32x8", argLength: 3, commutative: false},
{name: "MaskedApproximateReciprocalFloat32x8", argLength: 2, commutative: false},
{name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false},
{name: "MaskedDivFloat32x8", argLength: 3, commutative: false},
{name: "MaskedMulFloat32x8", argLength: 3, commutative: true},
{name: "MaskedMulByPowOf2Float32x8", argLength: 3, commutative: false},
{name: "MaskedNotEqualFloat32x8", argLength: 3, commutative: true},
- {name: "MaskedOrFloat32x8", argLength: 3, commutative: true},
{name: "MaskedSqrtFloat32x8", argLength: 2, commutative: false},
{name: "MaskedSubFloat32x8", argLength: 3, commutative: false},
- {name: "MaskedXorFloat32x8", argLength: 3, commutative: true},
{name: "MaxFloat32x8", argLength: 2, commutative: true},
{name: "MinFloat32x8", argLength: 2, commutative: true},
{name: "MulFloat32x8", argLength: 2, commutative: true},
{name: "MulByPowOf2Float32x8", argLength: 2, commutative: false},
{name: "NotEqualFloat32x8", argLength: 2, commutative: true},
- {name: "OrFloat32x8", argLength: 2, commutative: true},
{name: "PairwiseAddFloat32x8", argLength: 2, commutative: false},
{name: "PairwiseSubFloat32x8", argLength: 2, commutative: false},
{name: "RoundFloat32x8", argLength: 1, commutative: false},
{name: "SqrtFloat32x8", argLength: 1, commutative: false},
{name: "SubFloat32x8", argLength: 2, commutative: false},
{name: "TruncFloat32x8", argLength: 1, commutative: false},
- {name: "XorFloat32x8", argLength: 2, commutative: true},
{name: "AddFloat64x2", argLength: 2, commutative: true},
{name: "AddSubFloat64x2", argLength: 2, commutative: false},
- {name: "AndFloat64x2", argLength: 2, commutative: true},
- {name: "AndNotFloat64x2", argLength: 2, commutative: false},
{name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false},
{name: "CeilFloat64x2", argLength: 1, commutative: false},
{name: "LessFloat64x2", argLength: 2, commutative: false},
{name: "LessEqualFloat64x2", argLength: 2, commutative: false},
{name: "MaskedAddFloat64x2", argLength: 3, commutative: true},
- {name: "MaskedAndFloat64x2", argLength: 3, commutative: true},
- {name: "MaskedAndNotFloat64x2", argLength: 3, commutative: false},
{name: "MaskedApproximateReciprocalFloat64x2", argLength: 2, commutative: false},
{name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false},
{name: "MaskedDivFloat64x2", argLength: 3, commutative: false},
{name: "MaskedMulFloat64x2", argLength: 3, commutative: true},
{name: "MaskedMulByPowOf2Float64x2", argLength: 3, commutative: false},
{name: "MaskedNotEqualFloat64x2", argLength: 3, commutative: true},
- {name: "MaskedOrFloat64x2", argLength: 3, commutative: true},
{name: "MaskedSqrtFloat64x2", argLength: 2, commutative: false},
{name: "MaskedSubFloat64x2", argLength: 3, commutative: false},
- {name: "MaskedXorFloat64x2", argLength: 3, commutative: true},
{name: "MaxFloat64x2", argLength: 2, commutative: true},
{name: "MinFloat64x2", argLength: 2, commutative: true},
{name: "MulFloat64x2", argLength: 2, commutative: true},
{name: "MulByPowOf2Float64x2", argLength: 2, commutative: false},
{name: "NotEqualFloat64x2", argLength: 2, commutative: true},
- {name: "OrFloat64x2", argLength: 2, commutative: true},
{name: "PairwiseAddFloat64x2", argLength: 2, commutative: false},
{name: "PairwiseSubFloat64x2", argLength: 2, commutative: false},
{name: "RoundFloat64x2", argLength: 1, commutative: false},
{name: "SqrtFloat64x2", argLength: 1, commutative: false},
{name: "SubFloat64x2", argLength: 2, commutative: false},
{name: "TruncFloat64x2", argLength: 1, commutative: false},
- {name: "XorFloat64x2", argLength: 2, commutative: true},
{name: "AddFloat64x4", argLength: 2, commutative: true},
{name: "AddSubFloat64x4", argLength: 2, commutative: false},
- {name: "AndFloat64x4", argLength: 2, commutative: true},
- {name: "AndNotFloat64x4", argLength: 2, commutative: false},
{name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false},
{name: "CeilFloat64x4", argLength: 1, commutative: false},
{name: "LessFloat64x4", argLength: 2, commutative: false},
{name: "LessEqualFloat64x4", argLength: 2, commutative: false},
{name: "MaskedAddFloat64x4", argLength: 3, commutative: true},
- {name: "MaskedAndFloat64x4", argLength: 3, commutative: true},
- {name: "MaskedAndNotFloat64x4", argLength: 3, commutative: false},
{name: "MaskedApproximateReciprocalFloat64x4", argLength: 2, commutative: false},
{name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false},
{name: "MaskedDivFloat64x4", argLength: 3, commutative: false},
{name: "MaskedMulFloat64x4", argLength: 3, commutative: true},
{name: "MaskedMulByPowOf2Float64x4", argLength: 3, commutative: false},
{name: "MaskedNotEqualFloat64x4", argLength: 3, commutative: true},
- {name: "MaskedOrFloat64x4", argLength: 3, commutative: true},
{name: "MaskedSqrtFloat64x4", argLength: 2, commutative: false},
{name: "MaskedSubFloat64x4", argLength: 3, commutative: false},
- {name: "MaskedXorFloat64x4", argLength: 3, commutative: true},
{name: "MaxFloat64x4", argLength: 2, commutative: true},
{name: "MinFloat64x4", argLength: 2, commutative: true},
{name: "MulFloat64x4", argLength: 2, commutative: true},
{name: "MulByPowOf2Float64x4", argLength: 2, commutative: false},
{name: "NotEqualFloat64x4", argLength: 2, commutative: true},
- {name: "OrFloat64x4", argLength: 2, commutative: true},
{name: "PairwiseAddFloat64x4", argLength: 2, commutative: false},
{name: "PairwiseSubFloat64x4", argLength: 2, commutative: false},
{name: "RoundFloat64x4", argLength: 1, commutative: false},
{name: "SqrtFloat64x4", argLength: 1, commutative: false},
{name: "SubFloat64x4", argLength: 2, commutative: false},
{name: "TruncFloat64x4", argLength: 1, commutative: false},
- {name: "XorFloat64x4", argLength: 2, commutative: true},
{name: "AddFloat64x8", argLength: 2, commutative: true},
- {name: "AndFloat64x8", argLength: 2, commutative: true},
- {name: "AndNotFloat64x8", argLength: 2, commutative: false},
{name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false},
{name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false},
{name: "DivFloat64x8", argLength: 2, commutative: false},
{name: "LessFloat64x8", argLength: 2, commutative: false},
{name: "LessEqualFloat64x8", argLength: 2, commutative: false},
{name: "MaskedAddFloat64x8", argLength: 3, commutative: true},
- {name: "MaskedAndFloat64x8", argLength: 3, commutative: true},
- {name: "MaskedAndNotFloat64x8", argLength: 3, commutative: false},
{name: "MaskedApproximateReciprocalFloat64x8", argLength: 2, commutative: false},
{name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false},
{name: "MaskedDivFloat64x8", argLength: 3, commutative: false},
{name: "MaskedMulFloat64x8", argLength: 3, commutative: true},
{name: "MaskedMulByPowOf2Float64x8", argLength: 3, commutative: false},
{name: "MaskedNotEqualFloat64x8", argLength: 3, commutative: true},
- {name: "MaskedOrFloat64x8", argLength: 3, commutative: true},
{name: "MaskedSqrtFloat64x8", argLength: 2, commutative: false},
{name: "MaskedSubFloat64x8", argLength: 3, commutative: false},
- {name: "MaskedXorFloat64x8", argLength: 3, commutative: true},
{name: "MaxFloat64x8", argLength: 2, commutative: true},
{name: "MinFloat64x8", argLength: 2, commutative: true},
{name: "MulFloat64x8", argLength: 2, commutative: true},
{name: "MulByPowOf2Float64x8", argLength: 2, commutative: false},
{name: "NotEqualFloat64x8", argLength: 2, commutative: true},
- {name: "OrFloat64x8", argLength: 2, commutative: true},
{name: "SqrtFloat64x8", argLength: 1, commutative: false},
{name: "SubFloat64x8", argLength: 2, commutative: false},
- {name: "XorFloat64x8", argLength: 2, commutative: true},
{name: "AbsoluteInt16x16", argLength: 1, commutative: false},
{name: "AddInt16x16", argLength: 2, commutative: true},
{name: "AndInt16x16", argLength: 2, commutative: true},
OpAMD64Zero256
OpAMD64Zero512
OpAMD64VADDPS512
- OpAMD64VANDPS512
- OpAMD64VANDNPS512
OpAMD64VRCP14PS512
OpAMD64VRSQRT14PS512
OpAMD64VDIVPS512
OpAMD64VFMADDSUB213PS512
OpAMD64VFMSUBADD213PS512
OpAMD64VADDPSMasked512
- OpAMD64VANDPSMasked512
- OpAMD64VANDNPSMasked512
OpAMD64VRCP14PSMasked512
OpAMD64VRSQRT14PSMasked512
OpAMD64VDIVPSMasked512
OpAMD64VMINPSMasked512
OpAMD64VMULPSMasked512
OpAMD64VSCALEFPSMasked512
- OpAMD64VORPSMasked512
OpAMD64VSQRTPSMasked512
OpAMD64VSUBPSMasked512
- OpAMD64VXORPSMasked512
OpAMD64VMAXPS512
OpAMD64VMINPS512
OpAMD64VMULPS512
OpAMD64VSCALEFPS512
- OpAMD64VORPS512
OpAMD64VSQRTPS512
OpAMD64VSUBPS512
- OpAMD64VXORPS512
OpAMD64VADDPS128
OpAMD64VADDSUBPS128
- OpAMD64VANDPS128
- OpAMD64VANDNPS128
OpAMD64VRCP14PS128
OpAMD64VRSQRTPS128
OpAMD64VDIVPS128
OpAMD64VFMADDSUB213PS128
OpAMD64VFMSUBADD213PS128
OpAMD64VADDPSMasked128
- OpAMD64VANDPSMasked128
- OpAMD64VANDNPSMasked128
OpAMD64VRCP14PSMasked128
OpAMD64VRSQRT14PSMasked128
OpAMD64VDIVPSMasked128
OpAMD64VMINPSMasked128
OpAMD64VMULPSMasked128
OpAMD64VSCALEFPSMasked128
- OpAMD64VORPSMasked128
OpAMD64VSQRTPSMasked128
OpAMD64VSUBPSMasked128
- OpAMD64VXORPSMasked128
OpAMD64VMAXPS128
OpAMD64VMINPS128
OpAMD64VMULPS128
OpAMD64VSCALEFPS128
- OpAMD64VORPS128
OpAMD64VHADDPS128
OpAMD64VHSUBPS128
OpAMD64VSQRTPS128
OpAMD64VSUBPS128
- OpAMD64VXORPS128
OpAMD64VADDPS256
OpAMD64VADDSUBPS256
- OpAMD64VANDPS256
- OpAMD64VANDNPS256
OpAMD64VRCP14PS256
OpAMD64VRSQRTPS256
OpAMD64VDIVPS256
OpAMD64VFMADDSUB213PS256
OpAMD64VFMSUBADD213PS256
OpAMD64VADDPSMasked256
- OpAMD64VANDPSMasked256
- OpAMD64VANDNPSMasked256
OpAMD64VRCP14PSMasked256
OpAMD64VRSQRT14PSMasked256
OpAMD64VDIVPSMasked256
OpAMD64VMINPSMasked256
OpAMD64VMULPSMasked256
OpAMD64VSCALEFPSMasked256
- OpAMD64VORPSMasked256
OpAMD64VSQRTPSMasked256
OpAMD64VSUBPSMasked256
- OpAMD64VXORPSMasked256
OpAMD64VMAXPS256
OpAMD64VMINPS256
OpAMD64VMULPS256
OpAMD64VSCALEFPS256
- OpAMD64VORPS256
OpAMD64VHADDPS256
OpAMD64VHSUBPS256
OpAMD64VSQRTPS256
OpAMD64VSUBPS256
- OpAMD64VXORPS256
OpAMD64VADDPD128
OpAMD64VADDSUBPD128
- OpAMD64VANDPD128
- OpAMD64VANDNPD128
OpAMD64VRCP14PD128
OpAMD64VRSQRT14PD128
OpAMD64VDIVPD128
OpAMD64VFMADDSUB213PD128
OpAMD64VFMSUBADD213PD128
OpAMD64VADDPDMasked128
- OpAMD64VANDPDMasked128
- OpAMD64VANDNPDMasked128
OpAMD64VRCP14PDMasked128
OpAMD64VRSQRT14PDMasked128
OpAMD64VDIVPDMasked128
OpAMD64VMINPDMasked128
OpAMD64VMULPDMasked128
OpAMD64VSCALEFPDMasked128
- OpAMD64VORPDMasked128
OpAMD64VSQRTPDMasked128
OpAMD64VSUBPDMasked128
- OpAMD64VXORPDMasked128
OpAMD64VMAXPD128
OpAMD64VMINPD128
OpAMD64VMULPD128
OpAMD64VSCALEFPD128
- OpAMD64VORPD128
OpAMD64VHADDPD128
OpAMD64VHSUBPD128
OpAMD64VSQRTPD128
OpAMD64VSUBPD128
- OpAMD64VXORPD128
OpAMD64VADDPD256
OpAMD64VADDSUBPD256
- OpAMD64VANDPD256
- OpAMD64VANDNPD256
OpAMD64VRCP14PD256
OpAMD64VRSQRT14PD256
OpAMD64VDIVPD256
OpAMD64VFMADDSUB213PD256
OpAMD64VFMSUBADD213PD256
OpAMD64VADDPDMasked256
- OpAMD64VANDPDMasked256
- OpAMD64VANDNPDMasked256
OpAMD64VRCP14PDMasked256
OpAMD64VRSQRT14PDMasked256
OpAMD64VDIVPDMasked256
OpAMD64VMINPDMasked256
OpAMD64VMULPDMasked256
OpAMD64VSCALEFPDMasked256
- OpAMD64VORPDMasked256
OpAMD64VSQRTPDMasked256
OpAMD64VSUBPDMasked256
- OpAMD64VXORPDMasked256
OpAMD64VMAXPD256
OpAMD64VMINPD256
OpAMD64VMULPD256
OpAMD64VSCALEFPD256
- OpAMD64VORPD256
OpAMD64VHADDPD256
OpAMD64VHSUBPD256
OpAMD64VSQRTPD256
OpAMD64VSUBPD256
- OpAMD64VXORPD256
OpAMD64VADDPD512
- OpAMD64VANDPD512
- OpAMD64VANDNPD512
OpAMD64VRCP14PD512
OpAMD64VRSQRT14PD512
OpAMD64VDIVPD512
OpAMD64VFMADDSUB213PD512
OpAMD64VFMSUBADD213PD512
OpAMD64VADDPDMasked512
- OpAMD64VANDPDMasked512
- OpAMD64VANDNPDMasked512
OpAMD64VRCP14PDMasked512
OpAMD64VRSQRT14PDMasked512
OpAMD64VDIVPDMasked512
OpAMD64VMINPDMasked512
OpAMD64VMULPDMasked512
OpAMD64VSCALEFPDMasked512
- OpAMD64VORPDMasked512
OpAMD64VSQRTPDMasked512
OpAMD64VSUBPDMasked512
- OpAMD64VXORPDMasked512
OpAMD64VMAXPD512
OpAMD64VMINPD512
OpAMD64VMULPD512
OpAMD64VSCALEFPD512
- OpAMD64VORPD512
OpAMD64VSQRTPD512
OpAMD64VSUBPD512
- OpAMD64VXORPD512
OpAMD64VPABSW256
OpAMD64VPADDW256
OpAMD64VPCMPEQW256
OpAdd32x4
OpZeroSIMD
OpAddFloat32x16
- OpAndFloat32x16
- OpAndNotFloat32x16
OpApproximateReciprocalFloat32x16
OpApproximateReciprocalOfSqrtFloat32x16
OpDivFloat32x16
OpLessFloat32x16
OpLessEqualFloat32x16
OpMaskedAddFloat32x16
- OpMaskedAndFloat32x16
- OpMaskedAndNotFloat32x16
OpMaskedApproximateReciprocalFloat32x16
OpMaskedApproximateReciprocalOfSqrtFloat32x16
OpMaskedDivFloat32x16
OpMaskedMulFloat32x16
OpMaskedMulByPowOf2Float32x16
OpMaskedNotEqualFloat32x16
- OpMaskedOrFloat32x16
OpMaskedSqrtFloat32x16
OpMaskedSubFloat32x16
- OpMaskedXorFloat32x16
OpMaxFloat32x16
OpMinFloat32x16
OpMulFloat32x16
OpMulByPowOf2Float32x16
OpNotEqualFloat32x16
- OpOrFloat32x16
OpSqrtFloat32x16
OpSubFloat32x16
- OpXorFloat32x16
OpAddFloat32x4
OpAddSubFloat32x4
- OpAndFloat32x4
- OpAndNotFloat32x4
OpApproximateReciprocalFloat32x4
OpApproximateReciprocalOfSqrtFloat32x4
OpCeilFloat32x4
OpLessFloat32x4
OpLessEqualFloat32x4
OpMaskedAddFloat32x4
- OpMaskedAndFloat32x4
- OpMaskedAndNotFloat32x4
OpMaskedApproximateReciprocalFloat32x4
OpMaskedApproximateReciprocalOfSqrtFloat32x4
OpMaskedDivFloat32x4
OpMaskedMulFloat32x4
OpMaskedMulByPowOf2Float32x4
OpMaskedNotEqualFloat32x4
- OpMaskedOrFloat32x4
OpMaskedSqrtFloat32x4
OpMaskedSubFloat32x4
- OpMaskedXorFloat32x4
OpMaxFloat32x4
OpMinFloat32x4
OpMulFloat32x4
OpMulByPowOf2Float32x4
OpNotEqualFloat32x4
- OpOrFloat32x4
OpPairwiseAddFloat32x4
OpPairwiseSubFloat32x4
OpRoundFloat32x4
OpSqrtFloat32x4
OpSubFloat32x4
OpTruncFloat32x4
- OpXorFloat32x4
OpAddFloat32x8
OpAddSubFloat32x8
- OpAndFloat32x8
- OpAndNotFloat32x8
OpApproximateReciprocalFloat32x8
OpApproximateReciprocalOfSqrtFloat32x8
OpCeilFloat32x8
OpLessFloat32x8
OpLessEqualFloat32x8
OpMaskedAddFloat32x8
- OpMaskedAndFloat32x8
- OpMaskedAndNotFloat32x8
OpMaskedApproximateReciprocalFloat32x8
OpMaskedApproximateReciprocalOfSqrtFloat32x8
OpMaskedDivFloat32x8
OpMaskedMulFloat32x8
OpMaskedMulByPowOf2Float32x8
OpMaskedNotEqualFloat32x8
- OpMaskedOrFloat32x8
OpMaskedSqrtFloat32x8
OpMaskedSubFloat32x8
- OpMaskedXorFloat32x8
OpMaxFloat32x8
OpMinFloat32x8
OpMulFloat32x8
OpMulByPowOf2Float32x8
OpNotEqualFloat32x8
- OpOrFloat32x8
OpPairwiseAddFloat32x8
OpPairwiseSubFloat32x8
OpRoundFloat32x8
OpSqrtFloat32x8
OpSubFloat32x8
OpTruncFloat32x8
- OpXorFloat32x8
OpAddFloat64x2
OpAddSubFloat64x2
- OpAndFloat64x2
- OpAndNotFloat64x2
OpApproximateReciprocalFloat64x2
OpApproximateReciprocalOfSqrtFloat64x2
OpCeilFloat64x2
OpLessFloat64x2
OpLessEqualFloat64x2
OpMaskedAddFloat64x2
- OpMaskedAndFloat64x2
- OpMaskedAndNotFloat64x2
OpMaskedApproximateReciprocalFloat64x2
OpMaskedApproximateReciprocalOfSqrtFloat64x2
OpMaskedDivFloat64x2
OpMaskedMulFloat64x2
OpMaskedMulByPowOf2Float64x2
OpMaskedNotEqualFloat64x2
- OpMaskedOrFloat64x2
OpMaskedSqrtFloat64x2
OpMaskedSubFloat64x2
- OpMaskedXorFloat64x2
OpMaxFloat64x2
OpMinFloat64x2
OpMulFloat64x2
OpMulByPowOf2Float64x2
OpNotEqualFloat64x2
- OpOrFloat64x2
OpPairwiseAddFloat64x2
OpPairwiseSubFloat64x2
OpRoundFloat64x2
OpSqrtFloat64x2
OpSubFloat64x2
OpTruncFloat64x2
- OpXorFloat64x2
OpAddFloat64x4
OpAddSubFloat64x4
- OpAndFloat64x4
- OpAndNotFloat64x4
OpApproximateReciprocalFloat64x4
OpApproximateReciprocalOfSqrtFloat64x4
OpCeilFloat64x4
OpLessFloat64x4
OpLessEqualFloat64x4
OpMaskedAddFloat64x4
- OpMaskedAndFloat64x4
- OpMaskedAndNotFloat64x4
OpMaskedApproximateReciprocalFloat64x4
OpMaskedApproximateReciprocalOfSqrtFloat64x4
OpMaskedDivFloat64x4
OpMaskedMulFloat64x4
OpMaskedMulByPowOf2Float64x4
OpMaskedNotEqualFloat64x4
- OpMaskedOrFloat64x4
OpMaskedSqrtFloat64x4
OpMaskedSubFloat64x4
- OpMaskedXorFloat64x4
OpMaxFloat64x4
OpMinFloat64x4
OpMulFloat64x4
OpMulByPowOf2Float64x4
OpNotEqualFloat64x4
- OpOrFloat64x4
OpPairwiseAddFloat64x4
OpPairwiseSubFloat64x4
OpRoundFloat64x4
OpSqrtFloat64x4
OpSubFloat64x4
OpTruncFloat64x4
- OpXorFloat64x4
OpAddFloat64x8
- OpAndFloat64x8
- OpAndNotFloat64x8
OpApproximateReciprocalFloat64x8
OpApproximateReciprocalOfSqrtFloat64x8
OpDivFloat64x8
OpLessFloat64x8
OpLessEqualFloat64x8
OpMaskedAddFloat64x8
- OpMaskedAndFloat64x8
- OpMaskedAndNotFloat64x8
OpMaskedApproximateReciprocalFloat64x8
OpMaskedApproximateReciprocalOfSqrtFloat64x8
OpMaskedDivFloat64x8
OpMaskedMulFloat64x8
OpMaskedMulByPowOf2Float64x8
OpMaskedNotEqualFloat64x8
- OpMaskedOrFloat64x8
OpMaskedSqrtFloat64x8
OpMaskedSubFloat64x8
- OpMaskedXorFloat64x8
OpMaxFloat64x8
OpMinFloat64x8
OpMulFloat64x8
OpMulByPowOf2Float64x8
OpNotEqualFloat64x8
- OpOrFloat64x8
OpSqrtFloat64x8
OpSubFloat64x8
- OpXorFloat64x8
OpAbsoluteInt16x16
OpAddInt16x16
OpAndInt16x16
},
},
},
- {
- name: "VANDPS512",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPS512",
- argLen: 2,
- asm: x86.AVANDNPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PS512",
argLen: 1,
},
},
},
- {
- name: "VANDPSMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPSMasked512",
- argLen: 3,
- asm: x86.AVANDNPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PSMasked512",
argLen: 2,
},
},
},
- {
- name: "VORPSMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VSQRTPSMasked512",
argLen: 2,
},
},
},
- {
- name: "VXORPSMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVXORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VMAXPS512",
argLen: 2,
},
},
},
- {
- name: "VORPS512",
- argLen: 2,
- commutative: true,
- asm: x86.AVORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VSQRTPS512",
argLen: 1,
},
},
},
- {
- name: "VXORPS512",
- argLen: 2,
- commutative: true,
- asm: x86.AVXORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VADDPS128",
argLen: 2,
},
},
},
- {
- name: "VANDPS128",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPS128",
- argLen: 2,
- asm: x86.AVANDNPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PS128",
argLen: 1,
},
},
},
- {
- name: "VANDPSMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPSMasked128",
- argLen: 3,
- asm: x86.AVANDNPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PSMasked128",
argLen: 2,
},
},
},
- {
- name: "VORPSMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VSQRTPSMasked128",
argLen: 2,
},
},
},
- {
- name: "VXORPSMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVXORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VMAXPS128",
argLen: 2,
},
},
},
- {
- name: "VORPS128",
- argLen: 2,
- commutative: true,
- asm: x86.AVORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VHADDPS128",
argLen: 2,
},
},
},
- {
- name: "VXORPS128",
- argLen: 2,
- commutative: true,
- asm: x86.AVXORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VADDPS256",
argLen: 2,
},
},
},
- {
- name: "VANDPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPS256",
- argLen: 2,
- asm: x86.AVANDNPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PS256",
argLen: 1,
},
},
},
- {
- name: "VANDPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPSMasked256",
- argLen: 3,
- asm: x86.AVANDNPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PSMasked256",
argLen: 2,
},
},
},
- {
- name: "VORPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VSQRTPSMasked256",
argLen: 2,
},
},
},
- {
- name: "VXORPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVXORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VMAXPS256",
argLen: 2,
},
},
},
- {
- name: "VORPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VHADDPS256",
argLen: 2,
},
},
},
- {
- name: "VXORPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVXORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VADDPD128",
argLen: 2,
},
},
},
- {
- name: "VANDPD128",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPD128",
- argLen: 2,
- asm: x86.AVANDNPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PD128",
argLen: 1,
},
},
},
- {
- name: "VANDPDMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPDMasked128",
- argLen: 3,
- asm: x86.AVANDNPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PDMasked128",
argLen: 2,
},
},
},
- {
- name: "VORPDMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VSQRTPDMasked128",
argLen: 2,
},
},
},
- {
- name: "VXORPDMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVXORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VMAXPD128",
argLen: 2,
},
},
},
- {
- name: "VORPD128",
- argLen: 2,
- commutative: true,
- asm: x86.AVORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VHADDPD128",
argLen: 2,
},
},
},
- {
- name: "VXORPD128",
- argLen: 2,
- commutative: true,
- asm: x86.AVXORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VADDPD256",
argLen: 2,
},
},
},
- {
- name: "VANDPD256",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPD256",
- argLen: 2,
- asm: x86.AVANDNPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PD256",
argLen: 1,
},
},
},
- {
- name: "VANDPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPDMasked256",
- argLen: 3,
- asm: x86.AVANDNPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PDMasked256",
argLen: 2,
},
},
},
- {
- name: "VORPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VSQRTPDMasked256",
argLen: 2,
},
},
},
- {
- name: "VXORPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVXORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VMAXPD256",
argLen: 2,
},
},
},
- {
- name: "VORPD256",
- argLen: 2,
- commutative: true,
- asm: x86.AVORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VHADDPD256",
argLen: 2,
},
},
},
- {
- name: "VXORPD256",
- argLen: 2,
- commutative: true,
- asm: x86.AVXORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VADDPD512",
argLen: 2,
},
},
},
- {
- name: "VANDPD512",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPD512",
- argLen: 2,
- asm: x86.AVANDNPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PD512",
argLen: 1,
},
},
},
- {
- name: "VANDPDMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPDMasked512",
- argLen: 3,
- asm: x86.AVANDNPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VRCP14PDMasked512",
argLen: 2,
},
},
},
- {
- name: "VORPDMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VSQRTPDMasked512",
argLen: 2,
},
},
},
- {
- name: "VXORPDMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVXORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VMAXPD512",
argLen: 2,
},
},
},
- {
- name: "VORPD512",
- argLen: 2,
- commutative: true,
- asm: x86.AVORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VSQRTPD512",
argLen: 1,
},
},
},
- {
- name: "VXORPD512",
- argLen: 2,
- commutative: true,
- asm: x86.AVXORPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VPABSW256",
argLen: 1,
commutative: true,
generic: true,
},
- {
- name: "AndFloat32x16",
- argLen: 2,
- commutative: true,
- generic: true,
- },
- {
- name: "AndNotFloat32x16",
- argLen: 2,
- generic: true,
- },
{
name: "ApproximateReciprocalFloat32x16",
argLen: 1,
commutative: true,
generic: true,
},
- {
- name: "MaskedAndFloat32x16",
- argLen: 3,
- commutative: true,
- generic: true,
- },
- {
- name: "MaskedAndNotFloat32x16",
- argLen: 3,
- generic: true,
- },
{
name: "MaskedApproximateReciprocalFloat32x16",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "MaskedOrFloat32x16",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaskedSqrtFloat32x16",
argLen: 2,
argLen: 3,
generic: true,
},
- {
- name: "MaskedXorFloat32x16",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaxFloat32x16",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "OrFloat32x16",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "SqrtFloat32x16",
argLen: 1,
argLen: 2,
generic: true,
},
- {
- name: "XorFloat32x16",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "AddFloat32x4",
argLen: 2,
argLen: 2,
generic: true,
},
- {
- name: "AndFloat32x4",
- argLen: 2,
- commutative: true,
- generic: true,
- },
- {
- name: "AndNotFloat32x4",
- argLen: 2,
- generic: true,
- },
{
name: "ApproximateReciprocalFloat32x4",
argLen: 1,
commutative: true,
generic: true,
},
- {
- name: "MaskedAndFloat32x4",
- argLen: 3,
- commutative: true,
- generic: true,
- },
- {
- name: "MaskedAndNotFloat32x4",
- argLen: 3,
- generic: true,
- },
{
name: "MaskedApproximateReciprocalFloat32x4",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "MaskedOrFloat32x4",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaskedSqrtFloat32x4",
argLen: 2,
argLen: 3,
generic: true,
},
- {
- name: "MaskedXorFloat32x4",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaxFloat32x4",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "OrFloat32x4",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "PairwiseAddFloat32x4",
argLen: 2,
argLen: 1,
generic: true,
},
- {
- name: "XorFloat32x4",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "AddFloat32x8",
argLen: 2,
argLen: 2,
generic: true,
},
- {
- name: "AndFloat32x8",
- argLen: 2,
- commutative: true,
- generic: true,
- },
- {
- name: "AndNotFloat32x8",
- argLen: 2,
- generic: true,
- },
{
name: "ApproximateReciprocalFloat32x8",
argLen: 1,
commutative: true,
generic: true,
},
- {
- name: "MaskedAndFloat32x8",
- argLen: 3,
- commutative: true,
- generic: true,
- },
- {
- name: "MaskedAndNotFloat32x8",
- argLen: 3,
- generic: true,
- },
{
name: "MaskedApproximateReciprocalFloat32x8",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "MaskedOrFloat32x8",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaskedSqrtFloat32x8",
argLen: 2,
argLen: 3,
generic: true,
},
- {
- name: "MaskedXorFloat32x8",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaxFloat32x8",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "OrFloat32x8",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "PairwiseAddFloat32x8",
argLen: 2,
argLen: 1,
generic: true,
},
- {
- name: "XorFloat32x8",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "AddFloat64x2",
argLen: 2,
argLen: 2,
generic: true,
},
- {
- name: "AndFloat64x2",
- argLen: 2,
- commutative: true,
- generic: true,
- },
- {
- name: "AndNotFloat64x2",
- argLen: 2,
- generic: true,
- },
{
name: "ApproximateReciprocalFloat64x2",
argLen: 1,
commutative: true,
generic: true,
},
- {
- name: "MaskedAndFloat64x2",
- argLen: 3,
- commutative: true,
- generic: true,
- },
- {
- name: "MaskedAndNotFloat64x2",
- argLen: 3,
- generic: true,
- },
{
name: "MaskedApproximateReciprocalFloat64x2",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "MaskedOrFloat64x2",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaskedSqrtFloat64x2",
argLen: 2,
argLen: 3,
generic: true,
},
- {
- name: "MaskedXorFloat64x2",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaxFloat64x2",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "OrFloat64x2",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "PairwiseAddFloat64x2",
argLen: 2,
argLen: 1,
generic: true,
},
- {
- name: "XorFloat64x2",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "AddFloat64x4",
argLen: 2,
argLen: 2,
generic: true,
},
- {
- name: "AndFloat64x4",
- argLen: 2,
- commutative: true,
- generic: true,
- },
- {
- name: "AndNotFloat64x4",
- argLen: 2,
- generic: true,
- },
{
name: "ApproximateReciprocalFloat64x4",
argLen: 1,
commutative: true,
generic: true,
},
- {
- name: "MaskedAndFloat64x4",
- argLen: 3,
- commutative: true,
- generic: true,
- },
- {
- name: "MaskedAndNotFloat64x4",
- argLen: 3,
- generic: true,
- },
{
name: "MaskedApproximateReciprocalFloat64x4",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "MaskedOrFloat64x4",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaskedSqrtFloat64x4",
argLen: 2,
argLen: 3,
generic: true,
},
- {
- name: "MaskedXorFloat64x4",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaxFloat64x4",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "OrFloat64x4",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "PairwiseAddFloat64x4",
argLen: 2,
argLen: 1,
generic: true,
},
- {
- name: "XorFloat64x4",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "AddFloat64x8",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "AndFloat64x8",
- argLen: 2,
- commutative: true,
- generic: true,
- },
- {
- name: "AndNotFloat64x8",
- argLen: 2,
- generic: true,
- },
{
name: "ApproximateReciprocalFloat64x8",
argLen: 1,
commutative: true,
generic: true,
},
- {
- name: "MaskedAndFloat64x8",
- argLen: 3,
- commutative: true,
- generic: true,
- },
- {
- name: "MaskedAndNotFloat64x8",
- argLen: 3,
- generic: true,
- },
{
name: "MaskedApproximateReciprocalFloat64x8",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "MaskedOrFloat64x8",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaskedSqrtFloat64x8",
argLen: 2,
argLen: 3,
generic: true,
},
- {
- name: "MaskedXorFloat64x8",
- argLen: 3,
- commutative: true,
- generic: true,
- },
{
name: "MaxFloat64x8",
argLen: 2,
commutative: true,
generic: true,
},
- {
- name: "OrFloat64x8",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "SqrtFloat64x8",
argLen: 1,
argLen: 2,
generic: true,
},
- {
- name: "XorFloat64x8",
- argLen: 2,
- commutative: true,
- generic: true,
- },
{
name: "AbsoluteInt16x16",
argLen: 1,
case OpAndB:
v.Op = OpAMD64ANDL
return true
- case OpAndFloat32x16:
- v.Op = OpAMD64VANDPS512
- return true
- case OpAndFloat32x4:
- v.Op = OpAMD64VANDPS128
- return true
- case OpAndFloat32x8:
- v.Op = OpAMD64VANDPS256
- return true
- case OpAndFloat64x2:
- v.Op = OpAMD64VANDPD128
- return true
- case OpAndFloat64x4:
- v.Op = OpAMD64VANDPD256
- return true
- case OpAndFloat64x8:
- v.Op = OpAMD64VANDPD512
- return true
case OpAndInt16x16:
v.Op = OpAMD64VPAND256
return true
case OpAndInt8x32:
v.Op = OpAMD64VPAND256
return true
- case OpAndNotFloat32x16:
- v.Op = OpAMD64VANDNPS512
- return true
- case OpAndNotFloat32x4:
- v.Op = OpAMD64VANDNPS128
- return true
- case OpAndNotFloat32x8:
- v.Op = OpAMD64VANDNPS256
- return true
- case OpAndNotFloat64x2:
- v.Op = OpAMD64VANDNPD128
- return true
- case OpAndNotFloat64x4:
- v.Op = OpAMD64VANDNPD256
- return true
- case OpAndNotFloat64x8:
- v.Op = OpAMD64VANDNPD512
- return true
case OpAndNotInt16x16:
v.Op = OpAMD64VPANDN256
return true
return rewriteValueAMD64_OpMaskedAddUint8x32(v)
case OpMaskedAddUint8x64:
return rewriteValueAMD64_OpMaskedAddUint8x64(v)
- case OpMaskedAndFloat32x16:
- return rewriteValueAMD64_OpMaskedAndFloat32x16(v)
- case OpMaskedAndFloat32x4:
- return rewriteValueAMD64_OpMaskedAndFloat32x4(v)
- case OpMaskedAndFloat32x8:
- return rewriteValueAMD64_OpMaskedAndFloat32x8(v)
- case OpMaskedAndFloat64x2:
- return rewriteValueAMD64_OpMaskedAndFloat64x2(v)
- case OpMaskedAndFloat64x4:
- return rewriteValueAMD64_OpMaskedAndFloat64x4(v)
- case OpMaskedAndFloat64x8:
- return rewriteValueAMD64_OpMaskedAndFloat64x8(v)
case OpMaskedAndInt32x16:
return rewriteValueAMD64_OpMaskedAndInt32x16(v)
case OpMaskedAndInt32x4:
return rewriteValueAMD64_OpMaskedAndInt64x4(v)
case OpMaskedAndInt64x8:
return rewriteValueAMD64_OpMaskedAndInt64x8(v)
- case OpMaskedAndNotFloat32x16:
- return rewriteValueAMD64_OpMaskedAndNotFloat32x16(v)
- case OpMaskedAndNotFloat32x4:
- return rewriteValueAMD64_OpMaskedAndNotFloat32x4(v)
- case OpMaskedAndNotFloat32x8:
- return rewriteValueAMD64_OpMaskedAndNotFloat32x8(v)
- case OpMaskedAndNotFloat64x2:
- return rewriteValueAMD64_OpMaskedAndNotFloat64x2(v)
- case OpMaskedAndNotFloat64x4:
- return rewriteValueAMD64_OpMaskedAndNotFloat64x4(v)
- case OpMaskedAndNotFloat64x8:
- return rewriteValueAMD64_OpMaskedAndNotFloat64x8(v)
case OpMaskedAndNotInt32x16:
return rewriteValueAMD64_OpMaskedAndNotInt32x16(v)
case OpMaskedAndNotInt32x4:
return rewriteValueAMD64_OpMaskedNotEqualUint8x32(v)
case OpMaskedNotEqualUint8x64:
return rewriteValueAMD64_OpMaskedNotEqualUint8x64(v)
- case OpMaskedOrFloat32x16:
- return rewriteValueAMD64_OpMaskedOrFloat32x16(v)
- case OpMaskedOrFloat32x4:
- return rewriteValueAMD64_OpMaskedOrFloat32x4(v)
- case OpMaskedOrFloat32x8:
- return rewriteValueAMD64_OpMaskedOrFloat32x8(v)
- case OpMaskedOrFloat64x2:
- return rewriteValueAMD64_OpMaskedOrFloat64x2(v)
- case OpMaskedOrFloat64x4:
- return rewriteValueAMD64_OpMaskedOrFloat64x4(v)
- case OpMaskedOrFloat64x8:
- return rewriteValueAMD64_OpMaskedOrFloat64x8(v)
case OpMaskedOrInt32x16:
return rewriteValueAMD64_OpMaskedOrInt32x16(v)
case OpMaskedOrInt32x4:
return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4(v)
case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8:
return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v)
- case OpMaskedXorFloat32x16:
- return rewriteValueAMD64_OpMaskedXorFloat32x16(v)
- case OpMaskedXorFloat32x4:
- return rewriteValueAMD64_OpMaskedXorFloat32x4(v)
- case OpMaskedXorFloat32x8:
- return rewriteValueAMD64_OpMaskedXorFloat32x8(v)
- case OpMaskedXorFloat64x2:
- return rewriteValueAMD64_OpMaskedXorFloat64x2(v)
- case OpMaskedXorFloat64x4:
- return rewriteValueAMD64_OpMaskedXorFloat64x4(v)
- case OpMaskedXorFloat64x8:
- return rewriteValueAMD64_OpMaskedXorFloat64x8(v)
case OpMaskedXorInt32x16:
return rewriteValueAMD64_OpMaskedXorInt32x16(v)
case OpMaskedXorInt32x4:
case OpOrB:
v.Op = OpAMD64ORL
return true
- case OpOrFloat32x16:
- v.Op = OpAMD64VORPS512
- return true
- case OpOrFloat32x4:
- v.Op = OpAMD64VORPS128
- return true
- case OpOrFloat32x8:
- v.Op = OpAMD64VORPS256
- return true
- case OpOrFloat64x2:
- v.Op = OpAMD64VORPD128
- return true
- case OpOrFloat64x4:
- v.Op = OpAMD64VORPD256
- return true
- case OpOrFloat64x8:
- v.Op = OpAMD64VORPD512
- return true
case OpOrInt16x16:
v.Op = OpAMD64VPOR256
return true
case OpXor8:
v.Op = OpAMD64XORL
return true
- case OpXorFloat32x16:
- v.Op = OpAMD64VXORPS512
- return true
- case OpXorFloat32x4:
- v.Op = OpAMD64VXORPS128
- return true
- case OpXorFloat32x8:
- v.Op = OpAMD64VXORPS256
- return true
- case OpXorFloat64x2:
- v.Op = OpAMD64VXORPD128
- return true
- case OpXorFloat64x4:
- v.Op = OpAMD64VXORPD256
- return true
- case OpXorFloat64x8:
- v.Op = OpAMD64VXORPD512
- return true
case OpXorInt16x16:
v.Op = OpAMD64VPXOR256
return true
return true
}
}
-func rewriteValueAMD64_OpMaskedAndFloat32x16(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndFloat32x16 x y mask)
- // result: (VANDPSMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDPSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedAndFloat32x4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndFloat32x4 x y mask)
- // result: (VANDPSMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDPSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedAndFloat32x8(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndFloat32x8 x y mask)
- // result: (VANDPSMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDPSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedAndFloat64x2(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndFloat64x2 x y mask)
- // result: (VANDPDMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDPDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedAndFloat64x4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndFloat64x4 x y mask)
- // result: (VANDPDMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDPDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedAndFloat64x8(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndFloat64x8 x y mask)
- // result: (VANDPDMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDPDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
return true
}
}
-func rewriteValueAMD64_OpMaskedAndNotFloat32x16(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndNotFloat32x16 x y mask)
- // result: (VANDNPSMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDNPSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedAndNotFloat32x4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndNotFloat32x4 x y mask)
- // result: (VANDNPSMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDNPSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedAndNotFloat32x8(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndNotFloat32x8 x y mask)
- // result: (VANDNPSMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDNPSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedAndNotFloat64x2(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndNotFloat64x2 x y mask)
- // result: (VANDNPDMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDNPDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedAndNotFloat64x4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndNotFloat64x4 x y mask)
- // result: (VANDNPDMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDNPDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedAndNotFloat64x8(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedAndNotFloat64x8 x y mask)
- // result: (VANDNPDMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VANDNPDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
return true
}
}
-func rewriteValueAMD64_OpMaskedOrFloat32x16(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedOrFloat32x16 x y mask)
- // result: (VORPSMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VORPSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedOrFloat32x4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedOrFloat32x4 x y mask)
- // result: (VORPSMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VORPSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedOrFloat32x8(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedOrFloat32x8 x y mask)
- // result: (VORPSMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VORPSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedOrFloat64x2(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedOrFloat64x2 x y mask)
- // result: (VORPDMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VORPDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedOrFloat64x4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedOrFloat64x4 x y mask)
- // result: (VORPDMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VORPDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedOrFloat64x8(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedOrFloat64x8 x y mask)
- // result: (VORPDMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VORPDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
return true
}
}
-func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedXorFloat32x16 x y mask)
- // result: (VXORPSMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VXORPSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedXorFloat32x4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedXorFloat32x4 x y mask)
- // result: (VXORPSMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VXORPSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedXorFloat32x8(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedXorFloat32x8 x y mask)
- // result: (VXORPSMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VXORPSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedXorFloat64x2(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedXorFloat64x2 x y mask)
- // result: (VXORPDMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VXORPDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedXorFloat64x4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedXorFloat64x4 x y mask)
- // result: (VXORPDMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VXORPDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedXorFloat64x8(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedXorFloat64x8 x y mask)
- // result: (VXORPDMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- mask := v_2
- v.reset(OpAMD64VXORPDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg3(x, y, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x4.AddSub", opLen2(ssa.OpAddSubFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x4.And", opLen2(ssa.OpAndFloat32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.And", opLen2(ssa.OpAndFloat32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.And", opLen2(ssa.OpAndFloat32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.And", opLen2(ssa.OpAndFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.And", opLen2(ssa.OpAndFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int8x16.And", opLen2(ssa.OpAndInt8x16, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Int8x32.And", opLen2(ssa.OpAndInt8x32, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Int16x8.And", opLen2(ssa.OpAndInt16x8, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.AndNot", opLen2(ssa.OpAndNotFloat32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.AndNot", opLen2(ssa.OpAndNotFloat32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.AndNot", opLen2(ssa.OpAndNotFloat32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.AndNot", opLen2(ssa.OpAndNotFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.AndNot", opLen2(ssa.OpAndNotFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int8x16.AndNot", opLen2(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Int8x32.AndNot", opLen2(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint64x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint64x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Int32x16.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint64x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint64x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Int32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Uint64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.Or", opLen2(ssa.OpOrFloat32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.Or", opLen2(ssa.OpOrFloat32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.Or", opLen2(ssa.OpOrFloat32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.Or", opLen2(ssa.OpOrFloat64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.Or", opLen2(ssa.OpOrFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.Or", opLen2(ssa.OpOrFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.Xor", opLen2(ssa.OpXorFloat32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.Xor", opLen2(ssa.OpXorFloat32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.Xor", opLen2(ssa.OpXorFloat32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.Xor", opLen2(ssa.OpXorFloat64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.Xor", opLen2(ssa.OpXorFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.Xor", opLen2(ssa.OpXorFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64)
gotv = vec0.Add(vec1)
case "AddSub":
gotv = vec0.AddSub(vec1)
- case "And":
- gotv = vec0.And(vec1)
- case "AndNot":
- gotv = vec0.AndNot(vec1)
case "Div":
gotv = vec0.Div(vec1)
case "Max":
gotv = vec0.Mul(vec1)
case "MulByPowOf2":
gotv = vec0.MulByPowOf2(vec1)
- case "Or":
- gotv = vec0.Or(vec1)
case "PairwiseAdd":
gotv = vec0.PairwiseAdd(vec1)
case "PairwiseSub":
gotv = vec0.PairwiseSub(vec1)
case "Sub":
gotv = vec0.Sub(vec1)
- case "Xor":
- gotv = vec0.Xor(vec1)
default:
t.Errorf("Unknown method: Float32x4.%s", which)
switch which {
case "MaskedAdd":
gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4())
- case "MaskedAnd":
- gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4())
- case "MaskedAndNot":
- gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4())
case "MaskedDiv":
gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x4())
case "MaskedMax":
gotv = vec0.MaskedMul(vec1, vec2.AsMask32x4())
case "MaskedMulByPowOf2":
gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x4())
- case "MaskedOr":
- gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4())
case "MaskedSub":
gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4())
- case "MaskedXor":
- gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4())
default:
t.Errorf("Unknown method: Float32x4.%s", which)
gotv = vec0.Add(vec1)
case "AddSub":
gotv = vec0.AddSub(vec1)
- case "And":
- gotv = vec0.And(vec1)
- case "AndNot":
- gotv = vec0.AndNot(vec1)
case "Div":
gotv = vec0.Div(vec1)
case "Max":
gotv = vec0.Mul(vec1)
case "MulByPowOf2":
gotv = vec0.MulByPowOf2(vec1)
- case "Or":
- gotv = vec0.Or(vec1)
case "PairwiseAdd":
gotv = vec0.PairwiseAdd(vec1)
case "PairwiseSub":
gotv = vec0.PairwiseSub(vec1)
case "Sub":
gotv = vec0.Sub(vec1)
- case "Xor":
- gotv = vec0.Xor(vec1)
default:
t.Errorf("Unknown method: Float32x8.%s", which)
switch which {
case "MaskedAdd":
gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8())
- case "MaskedAnd":
- gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8())
- case "MaskedAndNot":
- gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8())
case "MaskedDiv":
gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x8())
case "MaskedMax":
gotv = vec0.MaskedMul(vec1, vec2.AsMask32x8())
case "MaskedMulByPowOf2":
gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x8())
- case "MaskedOr":
- gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8())
case "MaskedSub":
gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8())
- case "MaskedXor":
- gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8())
default:
t.Errorf("Unknown method: Float32x8.%s", which)
switch which {
case "Add":
gotv = vec0.Add(vec1)
- case "And":
- gotv = vec0.And(vec1)
- case "AndNot":
- gotv = vec0.AndNot(vec1)
case "Div":
gotv = vec0.Div(vec1)
case "Max":
gotv = vec0.Mul(vec1)
case "MulByPowOf2":
gotv = vec0.MulByPowOf2(vec1)
- case "Or":
- gotv = vec0.Or(vec1)
case "Sub":
gotv = vec0.Sub(vec1)
- case "Xor":
- gotv = vec0.Xor(vec1)
default:
t.Errorf("Unknown method: Float32x16.%s", which)
switch which {
case "MaskedAdd":
gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16())
- case "MaskedAnd":
- gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16())
- case "MaskedAndNot":
- gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16())
case "MaskedDiv":
gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x16())
case "MaskedMax":
gotv = vec0.MaskedMul(vec1, vec2.AsMask32x16())
case "MaskedMulByPowOf2":
gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x16())
- case "MaskedOr":
- gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16())
case "MaskedSub":
gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16())
- case "MaskedXor":
- gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16())
default:
t.Errorf("Unknown method: Float32x16.%s", which)
gotv = vec0.Add(vec1)
case "AddSub":
gotv = vec0.AddSub(vec1)
- case "And":
- gotv = vec0.And(vec1)
- case "AndNot":
- gotv = vec0.AndNot(vec1)
case "Div":
gotv = vec0.Div(vec1)
case "DotProdBroadcast":
gotv = vec0.Mul(vec1)
case "MulByPowOf2":
gotv = vec0.MulByPowOf2(vec1)
- case "Or":
- gotv = vec0.Or(vec1)
case "PairwiseAdd":
gotv = vec0.PairwiseAdd(vec1)
case "PairwiseSub":
gotv = vec0.PairwiseSub(vec1)
case "Sub":
gotv = vec0.Sub(vec1)
- case "Xor":
- gotv = vec0.Xor(vec1)
default:
t.Errorf("Unknown method: Float64x2.%s", which)
switch which {
case "MaskedAdd":
gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2())
- case "MaskedAnd":
- gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2())
- case "MaskedAndNot":
- gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2())
case "MaskedDiv":
gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x2())
case "MaskedMax":
gotv = vec0.MaskedMul(vec1, vec2.AsMask64x2())
case "MaskedMulByPowOf2":
gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x2())
- case "MaskedOr":
- gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2())
case "MaskedSub":
gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2())
- case "MaskedXor":
- gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2())
default:
t.Errorf("Unknown method: Float64x2.%s", which)
gotv = vec0.Add(vec1)
case "AddSub":
gotv = vec0.AddSub(vec1)
- case "And":
- gotv = vec0.And(vec1)
- case "AndNot":
- gotv = vec0.AndNot(vec1)
case "Div":
gotv = vec0.Div(vec1)
case "Max":
gotv = vec0.Mul(vec1)
case "MulByPowOf2":
gotv = vec0.MulByPowOf2(vec1)
- case "Or":
- gotv = vec0.Or(vec1)
case "PairwiseAdd":
gotv = vec0.PairwiseAdd(vec1)
case "PairwiseSub":
gotv = vec0.PairwiseSub(vec1)
case "Sub":
gotv = vec0.Sub(vec1)
- case "Xor":
- gotv = vec0.Xor(vec1)
default:
t.Errorf("Unknown method: Float64x4.%s", which)
switch which {
case "MaskedAdd":
gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4())
- case "MaskedAnd":
- gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4())
- case "MaskedAndNot":
- gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4())
case "MaskedDiv":
gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x4())
case "MaskedMax":
gotv = vec0.MaskedMul(vec1, vec2.AsMask64x4())
case "MaskedMulByPowOf2":
gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x4())
- case "MaskedOr":
- gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4())
case "MaskedSub":
gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4())
- case "MaskedXor":
- gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4())
default:
t.Errorf("Unknown method: Float64x4.%s", which)
switch which {
case "Add":
gotv = vec0.Add(vec1)
- case "And":
- gotv = vec0.And(vec1)
- case "AndNot":
- gotv = vec0.AndNot(vec1)
case "Div":
gotv = vec0.Div(vec1)
case "Max":
gotv = vec0.Mul(vec1)
case "MulByPowOf2":
gotv = vec0.MulByPowOf2(vec1)
- case "Or":
- gotv = vec0.Or(vec1)
case "Sub":
gotv = vec0.Sub(vec1)
- case "Xor":
- gotv = vec0.Xor(vec1)
default:
t.Errorf("Unknown method: Float64x8.%s", which)
switch which {
case "MaskedAdd":
gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8())
- case "MaskedAnd":
- gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8())
- case "MaskedAndNot":
- gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8())
case "MaskedDiv":
gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x8())
case "MaskedMax":
gotv = vec0.MaskedMul(vec1, vec2.AsMask64x8())
case "MaskedMulByPowOf2":
gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x8())
- case "MaskedOr":
- gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8())
case "MaskedSub":
gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8())
- case "MaskedXor":
- gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8())
default:
t.Errorf("Unknown method: Float64x8.%s", which)
/* And */
-// And performs a bitwise AND operation between two vectors.
-//
-// Asm: VANDPS, CPU Feature: AVX
-func (x Float32x4) And(y Float32x4) Float32x4
-
-// And performs a bitwise AND operation between two vectors.
-//
-// Asm: VANDPS, CPU Feature: AVX
-func (x Float32x8) And(y Float32x8) Float32x8
-
-// And performs a masked bitwise AND operation between two vectors.
-//
-// Asm: VANDPS, CPU Feature: AVX512EVEX
-func (x Float32x16) And(y Float32x16) Float32x16
-
-// And performs a bitwise AND operation between two vectors.
-//
-// Asm: VANDPD, CPU Feature: AVX
-func (x Float64x2) And(y Float64x2) Float64x2
-
-// And performs a bitwise AND operation between two vectors.
-//
-// Asm: VANDPD, CPU Feature: AVX
-func (x Float64x4) And(y Float64x4) Float64x4
-
-// And performs a masked bitwise AND operation between two vectors.
-//
-// Asm: VANDPD, CPU Feature: AVX512EVEX
-func (x Float64x8) And(y Float64x8) Float64x8
-
// And performs a bitwise AND operation between two vectors.
//
// Asm: VPAND, CPU Feature: AVX
/* AndNot */
-// AndNot performs a bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPS, CPU Feature: AVX
-func (x Float32x4) AndNot(y Float32x4) Float32x4
-
-// AndNot performs a bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPS, CPU Feature: AVX
-func (x Float32x8) AndNot(y Float32x8) Float32x8
-
-// AndNot performs a masked bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPS, CPU Feature: AVX512EVEX
-func (x Float32x16) AndNot(y Float32x16) Float32x16
-
-// AndNot performs a bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPD, CPU Feature: AVX
-func (x Float64x2) AndNot(y Float64x2) Float64x2
-
-// AndNot performs a bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPD, CPU Feature: AVX
-func (x Float64x4) AndNot(y Float64x4) Float64x4
-
-// AndNot performs a masked bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPD, CPU Feature: AVX512EVEX
-func (x Float64x8) AndNot(y Float64x8) Float64x8
-
// AndNot performs a bitwise AND NOT operation between two vectors.
//
// Asm: VPANDN, CPU Feature: AVX
/* MaskedAnd */
-// And performs a masked bitwise AND operation between two vectors.
-//
-// Asm: VANDPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedAnd(y Float32x4, z Mask32x4) Float32x4
-
-// And performs a masked bitwise AND operation between two vectors.
-//
-// Asm: VANDPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedAnd(y Float32x8, z Mask32x8) Float32x8
-
-// And performs a masked bitwise AND operation between two vectors.
-//
-// Asm: VANDPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedAnd(y Float32x16, z Mask32x16) Float32x16
-
-// And performs a masked bitwise AND operation between two vectors.
-//
-// Asm: VANDPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedAnd(y Float64x2, z Mask64x2) Float64x2
-
-// And performs a masked bitwise AND operation between two vectors.
-//
-// Asm: VANDPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedAnd(y Float64x4, z Mask64x4) Float64x4
-
-// And performs a masked bitwise AND operation between two vectors.
-//
-// Asm: VANDPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedAnd(y Float64x8, z Mask64x8) Float64x8
-
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDD, CPU Feature: AVX512EVEX
/* MaskedAndNot */
-// AndNot performs a masked bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedAndNot(y Float32x4, z Mask32x4) Float32x4
-
-// AndNot performs a masked bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedAndNot(y Float32x8, z Mask32x8) Float32x8
-
-// AndNot performs a masked bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedAndNot(y Float32x16, z Mask32x16) Float32x16
-
-// AndNot performs a masked bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedAndNot(y Float64x2, z Mask64x2) Float64x2
-
-// AndNot performs a masked bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedAndNot(y Float64x4, z Mask64x4) Float64x4
-
-// AndNot performs a masked bitwise AND NOT operation between two vectors.
-//
-// Asm: VANDNPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedAndNot(y Float64x8, z Mask64x8) Float64x8
-
// AndNot performs a masked bitwise AND NOT operation between two vectors.
//
// Asm: VPANDND, CPU Feature: AVX512EVEX
/* MaskedOr */
-// Or performs a masked bitwise OR operation between two vectors.
-//
-// Asm: VORPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedOr(y Float32x4, z Mask32x4) Float32x4
-
-// Or performs a masked bitwise OR operation between two vectors.
-//
-// Asm: VORPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedOr(y Float32x8, z Mask32x8) Float32x8
-
-// Or performs a masked bitwise OR operation between two vectors.
-//
-// Asm: VORPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedOr(y Float32x16, z Mask32x16) Float32x16
-
-// Or performs a masked bitwise OR operation between two vectors.
-//
-// Asm: VORPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedOr(y Float64x2, z Mask64x2) Float64x2
-
-// Or performs a masked bitwise OR operation between two vectors.
-//
-// Asm: VORPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedOr(y Float64x4, z Mask64x4) Float64x4
-
-// Or performs a masked bitwise OR operation between two vectors.
-//
-// Asm: VORPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedOr(y Float64x8, z Mask64x8) Float64x8
-
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORD, CPU Feature: AVX512EVEX
/* MaskedXor */
-// Xor performs a masked bitwise XOR operation between two vectors.
-//
-// Asm: VXORPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4
-
-// Xor performs a masked bitwise XOR operation between two vectors.
-//
-// Asm: VXORPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8
-
-// Xor performs a masked bitwise XOR operation between two vectors.
-//
-// Asm: VXORPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16
-
-// Xor performs a masked bitwise XOR operation between two vectors.
-//
-// Asm: VXORPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2
-
-// Xor performs a masked bitwise XOR operation between two vectors.
-//
-// Asm: VXORPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4
-
-// Xor performs a masked bitwise XOR operation between two vectors.
-//
-// Asm: VXORPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8
-
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORD, CPU Feature: AVX512EVEX
/* Or */
-// Or performs a bitwise OR operation between two vectors.
-//
-// Asm: VORPS, CPU Feature: AVX
-func (x Float32x4) Or(y Float32x4) Float32x4
-
-// Or performs a bitwise OR operation between two vectors.
-//
-// Asm: VORPS, CPU Feature: AVX
-func (x Float32x8) Or(y Float32x8) Float32x8
-
-// Or performs a masked bitwise OR operation between two vectors.
-//
-// Asm: VORPS, CPU Feature: AVX512EVEX
-func (x Float32x16) Or(y Float32x16) Float32x16
-
-// Or performs a bitwise OR operation between two vectors.
-//
-// Asm: VORPD, CPU Feature: AVX
-func (x Float64x2) Or(y Float64x2) Float64x2
-
-// Or performs a bitwise OR operation between two vectors.
-//
-// Asm: VORPD, CPU Feature: AVX
-func (x Float64x4) Or(y Float64x4) Float64x4
-
-// Or performs a masked bitwise OR operation between two vectors.
-//
-// Asm: VORPD, CPU Feature: AVX512EVEX
-func (x Float64x8) Or(y Float64x8) Float64x8
-
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX
/* Xor */
-// Xor performs a bitwise XOR operation between two vectors.
-//
-// Asm: VXORPS, CPU Feature: AVX
-func (x Float32x4) Xor(y Float32x4) Float32x4
-
-// Xor performs a bitwise XOR operation between two vectors.
-//
-// Asm: VXORPS, CPU Feature: AVX
-func (x Float32x8) Xor(y Float32x8) Float32x8
-
-// Xor performs a masked bitwise XOR operation between two vectors.
-//
-// Asm: VXORPS, CPU Feature: AVX512EVEX
-func (x Float32x16) Xor(y Float32x16) Float32x16
-
-// Xor performs a bitwise XOR operation between two vectors.
-//
-// Asm: VXORPD, CPU Feature: AVX
-func (x Float64x2) Xor(y Float64x2) Float64x2
-
-// Xor performs a bitwise XOR operation between two vectors.
-//
-// Asm: VXORPD, CPU Feature: AVX
-func (x Float64x4) Xor(y Float64x4) Float64x4
-
-// Xor performs a masked bitwise XOR operation between two vectors.
-//
-// Asm: VXORPD, CPU Feature: AVX512EVEX
-func (x Float64x8) Xor(y Float64x8) Float64x8
-
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX