This CL is generated by CL 680595.
Change-Id: I5e06ea9bc6a62593fc3b00fd44c119a5ed0d9e90
Reviewed-on: https://go-review.googlesource.com/c/go/+/681299
Reviewed-by: David Chase <drchase@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
ssa.OpAMD64VPCMPBMasked512:
p = simdFp2k1k1Imm8(s, v)
- case ssa.OpAMD64VPDPWSSD128,
+ case ssa.OpAMD64VFMADD132PS512,
+ ssa.OpAMD64VFMADD132PS128,
+ ssa.OpAMD64VFMADD132PS256,
+ ssa.OpAMD64VFMADD132PD128,
+ ssa.OpAMD64VFMADD132PD256,
+ ssa.OpAMD64VFMADD132PD512,
+ ssa.OpAMD64VFMADD213PS512,
+ ssa.OpAMD64VFMADD213PS128,
+ ssa.OpAMD64VFMADD213PS256,
+ ssa.OpAMD64VFMADD213PD128,
+ ssa.OpAMD64VFMADD213PD256,
+ ssa.OpAMD64VFMADD213PD512,
+ ssa.OpAMD64VFMADD231PS512,
+ ssa.OpAMD64VFMADD231PS128,
+ ssa.OpAMD64VFMADD231PS256,
+ ssa.OpAMD64VFMADD231PD128,
+ ssa.OpAMD64VFMADD231PD256,
+ ssa.OpAMD64VFMADD231PD512,
+ ssa.OpAMD64VFMADDSUB132PS512,
+ ssa.OpAMD64VFMADDSUB132PS128,
+ ssa.OpAMD64VFMADDSUB132PS256,
+ ssa.OpAMD64VFMADDSUB132PD128,
+ ssa.OpAMD64VFMADDSUB132PD256,
+ ssa.OpAMD64VFMADDSUB132PD512,
+ ssa.OpAMD64VFMADDSUB213PS512,
+ ssa.OpAMD64VFMADDSUB213PS128,
+ ssa.OpAMD64VFMADDSUB213PS256,
+ ssa.OpAMD64VFMADDSUB213PD128,
+ ssa.OpAMD64VFMADDSUB213PD256,
+ ssa.OpAMD64VFMADDSUB213PD512,
+ ssa.OpAMD64VFMADDSUB231PS512,
+ ssa.OpAMD64VFMADDSUB231PS128,
+ ssa.OpAMD64VFMADDSUB231PS256,
+ ssa.OpAMD64VFMADDSUB231PD128,
+ ssa.OpAMD64VFMADDSUB231PD256,
+ ssa.OpAMD64VFMADDSUB231PD512,
+ ssa.OpAMD64VFMSUB132PS512,
+ ssa.OpAMD64VFMSUB132PS128,
+ ssa.OpAMD64VFMSUB132PS256,
+ ssa.OpAMD64VFMSUB132PD128,
+ ssa.OpAMD64VFMSUB132PD256,
+ ssa.OpAMD64VFMSUB132PD512,
+ ssa.OpAMD64VFMSUB213PS512,
+ ssa.OpAMD64VFMSUB213PS128,
+ ssa.OpAMD64VFMSUB213PS256,
+ ssa.OpAMD64VFMSUB213PD128,
+ ssa.OpAMD64VFMSUB213PD256,
+ ssa.OpAMD64VFMSUB213PD512,
+ ssa.OpAMD64VFMSUB231PS512,
+ ssa.OpAMD64VFMSUB231PS128,
+ ssa.OpAMD64VFMSUB231PS256,
+ ssa.OpAMD64VFMSUB231PD128,
+ ssa.OpAMD64VFMSUB231PD256,
+ ssa.OpAMD64VFMSUB231PD512,
+ ssa.OpAMD64VFMSUBADD132PS512,
+ ssa.OpAMD64VFMSUBADD132PS128,
+ ssa.OpAMD64VFMSUBADD132PS256,
+ ssa.OpAMD64VFMSUBADD132PD128,
+ ssa.OpAMD64VFMSUBADD132PD256,
+ ssa.OpAMD64VFMSUBADD132PD512,
+ ssa.OpAMD64VFMSUBADD213PS512,
+ ssa.OpAMD64VFMSUBADD213PS128,
+ ssa.OpAMD64VFMSUBADD213PS256,
+ ssa.OpAMD64VFMSUBADD213PD128,
+ ssa.OpAMD64VFMSUBADD213PD256,
+ ssa.OpAMD64VFMSUBADD213PD512,
+ ssa.OpAMD64VFMSUBADD231PS512,
+ ssa.OpAMD64VFMSUBADD231PS128,
+ ssa.OpAMD64VFMSUBADD231PS256,
+ ssa.OpAMD64VFMSUBADD231PD128,
+ ssa.OpAMD64VFMSUBADD231PD256,
+ ssa.OpAMD64VFMSUBADD231PD512,
+ ssa.OpAMD64VFNMADD132PS512,
+ ssa.OpAMD64VFNMADD132PS128,
+ ssa.OpAMD64VFNMADD132PS256,
+ ssa.OpAMD64VFNMADD132PD128,
+ ssa.OpAMD64VFNMADD132PD256,
+ ssa.OpAMD64VFNMADD132PD512,
+ ssa.OpAMD64VFNMADD213PS512,
+ ssa.OpAMD64VFNMADD213PS128,
+ ssa.OpAMD64VFNMADD213PS256,
+ ssa.OpAMD64VFNMADD213PD128,
+ ssa.OpAMD64VFNMADD213PD256,
+ ssa.OpAMD64VFNMADD213PD512,
+ ssa.OpAMD64VFNMADD231PS512,
+ ssa.OpAMD64VFNMADD231PS128,
+ ssa.OpAMD64VFNMADD231PS256,
+ ssa.OpAMD64VFNMADD231PD128,
+ ssa.OpAMD64VFNMADD231PD256,
+ ssa.OpAMD64VFNMADD231PD512,
+ ssa.OpAMD64VFNMSUB132PS512,
+ ssa.OpAMD64VFNMSUB132PS128,
+ ssa.OpAMD64VFNMSUB132PS256,
+ ssa.OpAMD64VFNMSUB132PD128,
+ ssa.OpAMD64VFNMSUB132PD256,
+ ssa.OpAMD64VFNMSUB132PD512,
+ ssa.OpAMD64VFNMSUB213PS512,
+ ssa.OpAMD64VFNMSUB213PS128,
+ ssa.OpAMD64VFNMSUB213PS256,
+ ssa.OpAMD64VFNMSUB213PD128,
+ ssa.OpAMD64VFNMSUB213PD256,
+ ssa.OpAMD64VFNMSUB213PD512,
+ ssa.OpAMD64VFNMSUB231PS512,
+ ssa.OpAMD64VFNMSUB231PS128,
+ ssa.OpAMD64VFNMSUB231PS256,
+ ssa.OpAMD64VFNMSUB231PD128,
+ ssa.OpAMD64VFNMSUB231PD256,
+ ssa.OpAMD64VFNMSUB231PD512,
+ ssa.OpAMD64VPDPWSSD128,
ssa.OpAMD64VPDPWSSD256,
ssa.OpAMD64VPDPWSSD512,
ssa.OpAMD64VPDPWSSDS128,
ssa.OpAMD64VPDPBUSD512:
p = simdFp31ResultInArg0(s, v)
- case ssa.OpAMD64VPDPWSSDMasked512,
+ case ssa.OpAMD64VFMADD132PSMasked512,
+ ssa.OpAMD64VFMADD132PSMasked128,
+ ssa.OpAMD64VFMADD132PSMasked256,
+ ssa.OpAMD64VFMADD132PDMasked128,
+ ssa.OpAMD64VFMADD132PDMasked256,
+ ssa.OpAMD64VFMADD132PDMasked512,
+ ssa.OpAMD64VFMADD213PSMasked512,
+ ssa.OpAMD64VFMADD213PSMasked128,
+ ssa.OpAMD64VFMADD213PSMasked256,
+ ssa.OpAMD64VFMADD213PDMasked128,
+ ssa.OpAMD64VFMADD213PDMasked256,
+ ssa.OpAMD64VFMADD213PDMasked512,
+ ssa.OpAMD64VFMADD231PSMasked512,
+ ssa.OpAMD64VFMADD231PSMasked128,
+ ssa.OpAMD64VFMADD231PSMasked256,
+ ssa.OpAMD64VFMADD231PDMasked128,
+ ssa.OpAMD64VFMADD231PDMasked256,
+ ssa.OpAMD64VFMADD231PDMasked512,
+ ssa.OpAMD64VFMADDSUB132PSMasked512,
+ ssa.OpAMD64VFMADDSUB132PSMasked128,
+ ssa.OpAMD64VFMADDSUB132PSMasked256,
+ ssa.OpAMD64VFMADDSUB132PDMasked128,
+ ssa.OpAMD64VFMADDSUB132PDMasked256,
+ ssa.OpAMD64VFMADDSUB132PDMasked512,
+ ssa.OpAMD64VFMADDSUB213PSMasked512,
+ ssa.OpAMD64VFMADDSUB213PSMasked128,
+ ssa.OpAMD64VFMADDSUB213PSMasked256,
+ ssa.OpAMD64VFMADDSUB213PDMasked128,
+ ssa.OpAMD64VFMADDSUB213PDMasked256,
+ ssa.OpAMD64VFMADDSUB213PDMasked512,
+ ssa.OpAMD64VFMADDSUB231PSMasked512,
+ ssa.OpAMD64VFMADDSUB231PSMasked128,
+ ssa.OpAMD64VFMADDSUB231PSMasked256,
+ ssa.OpAMD64VFMADDSUB231PDMasked128,
+ ssa.OpAMD64VFMADDSUB231PDMasked256,
+ ssa.OpAMD64VFMADDSUB231PDMasked512,
+ ssa.OpAMD64VFMSUB132PSMasked512,
+ ssa.OpAMD64VFMSUB132PSMasked128,
+ ssa.OpAMD64VFMSUB132PSMasked256,
+ ssa.OpAMD64VFMSUB132PDMasked128,
+ ssa.OpAMD64VFMSUB132PDMasked256,
+ ssa.OpAMD64VFMSUB132PDMasked512,
+ ssa.OpAMD64VFMSUB213PSMasked512,
+ ssa.OpAMD64VFMSUB213PSMasked128,
+ ssa.OpAMD64VFMSUB213PSMasked256,
+ ssa.OpAMD64VFMSUB213PDMasked128,
+ ssa.OpAMD64VFMSUB213PDMasked256,
+ ssa.OpAMD64VFMSUB213PDMasked512,
+ ssa.OpAMD64VFMSUB231PSMasked512,
+ ssa.OpAMD64VFMSUB231PSMasked128,
+ ssa.OpAMD64VFMSUB231PSMasked256,
+ ssa.OpAMD64VFMSUB231PDMasked128,
+ ssa.OpAMD64VFMSUB231PDMasked256,
+ ssa.OpAMD64VFMSUB231PDMasked512,
+ ssa.OpAMD64VFMSUBADD132PSMasked512,
+ ssa.OpAMD64VFMSUBADD132PSMasked128,
+ ssa.OpAMD64VFMSUBADD132PSMasked256,
+ ssa.OpAMD64VFMSUBADD132PDMasked128,
+ ssa.OpAMD64VFMSUBADD132PDMasked256,
+ ssa.OpAMD64VFMSUBADD132PDMasked512,
+ ssa.OpAMD64VFMSUBADD213PSMasked512,
+ ssa.OpAMD64VFMSUBADD213PSMasked128,
+ ssa.OpAMD64VFMSUBADD213PSMasked256,
+ ssa.OpAMD64VFMSUBADD213PDMasked128,
+ ssa.OpAMD64VFMSUBADD213PDMasked256,
+ ssa.OpAMD64VFMSUBADD213PDMasked512,
+ ssa.OpAMD64VFMSUBADD231PSMasked512,
+ ssa.OpAMD64VFMSUBADD231PSMasked128,
+ ssa.OpAMD64VFMSUBADD231PSMasked256,
+ ssa.OpAMD64VFMSUBADD231PDMasked128,
+ ssa.OpAMD64VFMSUBADD231PDMasked256,
+ ssa.OpAMD64VFMSUBADD231PDMasked512,
+ ssa.OpAMD64VFNMADD132PSMasked512,
+ ssa.OpAMD64VFNMADD132PSMasked128,
+ ssa.OpAMD64VFNMADD132PSMasked256,
+ ssa.OpAMD64VFNMADD132PDMasked128,
+ ssa.OpAMD64VFNMADD132PDMasked256,
+ ssa.OpAMD64VFNMADD132PDMasked512,
+ ssa.OpAMD64VFNMADD213PSMasked512,
+ ssa.OpAMD64VFNMADD213PSMasked128,
+ ssa.OpAMD64VFNMADD213PSMasked256,
+ ssa.OpAMD64VFNMADD213PDMasked128,
+ ssa.OpAMD64VFNMADD213PDMasked256,
+ ssa.OpAMD64VFNMADD213PDMasked512,
+ ssa.OpAMD64VFNMADD231PSMasked512,
+ ssa.OpAMD64VFNMADD231PSMasked128,
+ ssa.OpAMD64VFNMADD231PSMasked256,
+ ssa.OpAMD64VFNMADD231PDMasked128,
+ ssa.OpAMD64VFNMADD231PDMasked256,
+ ssa.OpAMD64VFNMADD231PDMasked512,
+ ssa.OpAMD64VFNMSUB132PSMasked512,
+ ssa.OpAMD64VFNMSUB132PSMasked128,
+ ssa.OpAMD64VFNMSUB132PSMasked256,
+ ssa.OpAMD64VFNMSUB132PDMasked128,
+ ssa.OpAMD64VFNMSUB132PDMasked256,
+ ssa.OpAMD64VFNMSUB132PDMasked512,
+ ssa.OpAMD64VFNMSUB213PSMasked512,
+ ssa.OpAMD64VFNMSUB213PSMasked128,
+ ssa.OpAMD64VFNMSUB213PSMasked256,
+ ssa.OpAMD64VFNMSUB213PDMasked128,
+ ssa.OpAMD64VFNMSUB213PDMasked256,
+ ssa.OpAMD64VFNMSUB213PDMasked512,
+ ssa.OpAMD64VFNMSUB231PSMasked512,
+ ssa.OpAMD64VFNMSUB231PSMasked128,
+ ssa.OpAMD64VFNMSUB231PSMasked256,
+ ssa.OpAMD64VFNMSUB231PDMasked128,
+ ssa.OpAMD64VFNMSUB231PDMasked256,
+ ssa.OpAMD64VFNMSUB231PDMasked512,
+ ssa.OpAMD64VPDPWSSDMasked512,
ssa.OpAMD64VPDPWSSDMasked128,
ssa.OpAMD64VPDPWSSDMasked256,
ssa.OpAMD64VPDPWSSDSMasked512,
ssa.OpAMD64VDIVPDMasked128,
ssa.OpAMD64VDIVPDMasked256,
ssa.OpAMD64VDIVPDMasked512,
+ ssa.OpAMD64VFMADD132PSMasked512,
+ ssa.OpAMD64VFMADD132PSMasked128,
+ ssa.OpAMD64VFMADD132PSMasked256,
+ ssa.OpAMD64VFMADD132PDMasked128,
+ ssa.OpAMD64VFMADD132PDMasked256,
+ ssa.OpAMD64VFMADD132PDMasked512,
+ ssa.OpAMD64VFMADD213PSMasked512,
+ ssa.OpAMD64VFMADD213PSMasked128,
+ ssa.OpAMD64VFMADD213PSMasked256,
+ ssa.OpAMD64VFMADD213PDMasked128,
+ ssa.OpAMD64VFMADD213PDMasked256,
+ ssa.OpAMD64VFMADD213PDMasked512,
+ ssa.OpAMD64VFMADD231PSMasked512,
+ ssa.OpAMD64VFMADD231PSMasked128,
+ ssa.OpAMD64VFMADD231PSMasked256,
+ ssa.OpAMD64VFMADD231PDMasked128,
+ ssa.OpAMD64VFMADD231PDMasked256,
+ ssa.OpAMD64VFMADD231PDMasked512,
+ ssa.OpAMD64VFMADDSUB132PSMasked512,
+ ssa.OpAMD64VFMADDSUB132PSMasked128,
+ ssa.OpAMD64VFMADDSUB132PSMasked256,
+ ssa.OpAMD64VFMADDSUB132PDMasked128,
+ ssa.OpAMD64VFMADDSUB132PDMasked256,
+ ssa.OpAMD64VFMADDSUB132PDMasked512,
+ ssa.OpAMD64VFMADDSUB213PSMasked512,
+ ssa.OpAMD64VFMADDSUB213PSMasked128,
+ ssa.OpAMD64VFMADDSUB213PSMasked256,
+ ssa.OpAMD64VFMADDSUB213PDMasked128,
+ ssa.OpAMD64VFMADDSUB213PDMasked256,
+ ssa.OpAMD64VFMADDSUB213PDMasked512,
+ ssa.OpAMD64VFMADDSUB231PSMasked512,
+ ssa.OpAMD64VFMADDSUB231PSMasked128,
+ ssa.OpAMD64VFMADDSUB231PSMasked256,
+ ssa.OpAMD64VFMADDSUB231PDMasked128,
+ ssa.OpAMD64VFMADDSUB231PDMasked256,
+ ssa.OpAMD64VFMADDSUB231PDMasked512,
+ ssa.OpAMD64VFMSUB132PSMasked512,
+ ssa.OpAMD64VFMSUB132PSMasked128,
+ ssa.OpAMD64VFMSUB132PSMasked256,
+ ssa.OpAMD64VFMSUB132PDMasked128,
+ ssa.OpAMD64VFMSUB132PDMasked256,
+ ssa.OpAMD64VFMSUB132PDMasked512,
+ ssa.OpAMD64VFMSUB213PSMasked512,
+ ssa.OpAMD64VFMSUB213PSMasked128,
+ ssa.OpAMD64VFMSUB213PSMasked256,
+ ssa.OpAMD64VFMSUB213PDMasked128,
+ ssa.OpAMD64VFMSUB213PDMasked256,
+ ssa.OpAMD64VFMSUB213PDMasked512,
+ ssa.OpAMD64VFMSUB231PSMasked512,
+ ssa.OpAMD64VFMSUB231PSMasked128,
+ ssa.OpAMD64VFMSUB231PSMasked256,
+ ssa.OpAMD64VFMSUB231PDMasked128,
+ ssa.OpAMD64VFMSUB231PDMasked256,
+ ssa.OpAMD64VFMSUB231PDMasked512,
+ ssa.OpAMD64VFMSUBADD132PSMasked512,
+ ssa.OpAMD64VFMSUBADD132PSMasked128,
+ ssa.OpAMD64VFMSUBADD132PSMasked256,
+ ssa.OpAMD64VFMSUBADD132PDMasked128,
+ ssa.OpAMD64VFMSUBADD132PDMasked256,
+ ssa.OpAMD64VFMSUBADD132PDMasked512,
+ ssa.OpAMD64VFMSUBADD213PSMasked512,
+ ssa.OpAMD64VFMSUBADD213PSMasked128,
+ ssa.OpAMD64VFMSUBADD213PSMasked256,
+ ssa.OpAMD64VFMSUBADD213PDMasked128,
+ ssa.OpAMD64VFMSUBADD213PDMasked256,
+ ssa.OpAMD64VFMSUBADD213PDMasked512,
+ ssa.OpAMD64VFMSUBADD231PSMasked512,
+ ssa.OpAMD64VFMSUBADD231PSMasked128,
+ ssa.OpAMD64VFMSUBADD231PSMasked256,
+ ssa.OpAMD64VFMSUBADD231PDMasked128,
+ ssa.OpAMD64VFMSUBADD231PDMasked256,
+ ssa.OpAMD64VFMSUBADD231PDMasked512,
+ ssa.OpAMD64VFNMADD132PSMasked512,
+ ssa.OpAMD64VFNMADD132PSMasked128,
+ ssa.OpAMD64VFNMADD132PSMasked256,
+ ssa.OpAMD64VFNMADD132PDMasked128,
+ ssa.OpAMD64VFNMADD132PDMasked256,
+ ssa.OpAMD64VFNMADD132PDMasked512,
+ ssa.OpAMD64VFNMADD213PSMasked512,
+ ssa.OpAMD64VFNMADD213PSMasked128,
+ ssa.OpAMD64VFNMADD213PSMasked256,
+ ssa.OpAMD64VFNMADD213PDMasked128,
+ ssa.OpAMD64VFNMADD213PDMasked256,
+ ssa.OpAMD64VFNMADD213PDMasked512,
+ ssa.OpAMD64VFNMADD231PSMasked512,
+ ssa.OpAMD64VFNMADD231PSMasked128,
+ ssa.OpAMD64VFNMADD231PSMasked256,
+ ssa.OpAMD64VFNMADD231PDMasked128,
+ ssa.OpAMD64VFNMADD231PDMasked256,
+ ssa.OpAMD64VFNMADD231PDMasked512,
+ ssa.OpAMD64VFNMSUB132PSMasked512,
+ ssa.OpAMD64VFNMSUB132PSMasked128,
+ ssa.OpAMD64VFNMSUB132PSMasked256,
+ ssa.OpAMD64VFNMSUB132PDMasked128,
+ ssa.OpAMD64VFNMSUB132PDMasked256,
+ ssa.OpAMD64VFNMSUB132PDMasked512,
+ ssa.OpAMD64VFNMSUB213PSMasked512,
+ ssa.OpAMD64VFNMSUB213PSMasked128,
+ ssa.OpAMD64VFNMSUB213PSMasked256,
+ ssa.OpAMD64VFNMSUB213PDMasked128,
+ ssa.OpAMD64VFNMSUB213PDMasked256,
+ ssa.OpAMD64VFNMSUB213PDMasked512,
+ ssa.OpAMD64VFNMSUB231PSMasked512,
+ ssa.OpAMD64VFNMSUB231PSMasked128,
+ ssa.OpAMD64VFNMSUB231PSMasked256,
+ ssa.OpAMD64VFNMSUB231PDMasked128,
+ ssa.OpAMD64VFNMSUB231PDMasked256,
+ ssa.OpAMD64VFNMSUB231PDMasked512,
ssa.OpAMD64VMAXPSMasked512,
ssa.OpAMD64VMAXPSMasked128,
ssa.OpAMD64VMAXPSMasked256,
(FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x)
(FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x)
(FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x)
+(FusedMultiplyAdd132Float32x16 ...) => (VFMADD132PS512 ...)
+(FusedMultiplyAdd132Float32x4 ...) => (VFMADD132PS128 ...)
+(FusedMultiplyAdd132Float32x8 ...) => (VFMADD132PS256 ...)
+(FusedMultiplyAdd132Float64x2 ...) => (VFMADD132PD128 ...)
+(FusedMultiplyAdd132Float64x4 ...) => (VFMADD132PD256 ...)
+(FusedMultiplyAdd132Float64x8 ...) => (VFMADD132PD512 ...)
+(FusedMultiplyAdd213Float32x16 ...) => (VFMADD213PS512 ...)
+(FusedMultiplyAdd213Float32x4 ...) => (VFMADD213PS128 ...)
+(FusedMultiplyAdd213Float32x8 ...) => (VFMADD213PS256 ...)
+(FusedMultiplyAdd213Float64x2 ...) => (VFMADD213PD128 ...)
+(FusedMultiplyAdd213Float64x4 ...) => (VFMADD213PD256 ...)
+(FusedMultiplyAdd213Float64x8 ...) => (VFMADD213PD512 ...)
+(FusedMultiplyAdd231Float32x16 ...) => (VFMADD231PS512 ...)
+(FusedMultiplyAdd231Float32x4 ...) => (VFMADD231PS128 ...)
+(FusedMultiplyAdd231Float32x8 ...) => (VFMADD231PS256 ...)
+(FusedMultiplyAdd231Float64x2 ...) => (VFMADD231PD128 ...)
+(FusedMultiplyAdd231Float64x4 ...) => (VFMADD231PD256 ...)
+(FusedMultiplyAdd231Float64x8 ...) => (VFMADD231PD512 ...)
+(FusedMultiplyAddSub132Float32x16 ...) => (VFMADDSUB132PS512 ...)
+(FusedMultiplyAddSub132Float32x4 ...) => (VFMADDSUB132PS128 ...)
+(FusedMultiplyAddSub132Float32x8 ...) => (VFMADDSUB132PS256 ...)
+(FusedMultiplyAddSub132Float64x2 ...) => (VFMADDSUB132PD128 ...)
+(FusedMultiplyAddSub132Float64x4 ...) => (VFMADDSUB132PD256 ...)
+(FusedMultiplyAddSub132Float64x8 ...) => (VFMADDSUB132PD512 ...)
+(FusedMultiplyAddSub213Float32x16 ...) => (VFMADDSUB213PS512 ...)
+(FusedMultiplyAddSub213Float32x4 ...) => (VFMADDSUB213PS128 ...)
+(FusedMultiplyAddSub213Float32x8 ...) => (VFMADDSUB213PS256 ...)
+(FusedMultiplyAddSub213Float64x2 ...) => (VFMADDSUB213PD128 ...)
+(FusedMultiplyAddSub213Float64x4 ...) => (VFMADDSUB213PD256 ...)
+(FusedMultiplyAddSub213Float64x8 ...) => (VFMADDSUB213PD512 ...)
+(FusedMultiplyAddSub231Float32x16 ...) => (VFMADDSUB231PS512 ...)
+(FusedMultiplyAddSub231Float32x4 ...) => (VFMADDSUB231PS128 ...)
+(FusedMultiplyAddSub231Float32x8 ...) => (VFMADDSUB231PS256 ...)
+(FusedMultiplyAddSub231Float64x2 ...) => (VFMADDSUB231PD128 ...)
+(FusedMultiplyAddSub231Float64x4 ...) => (VFMADDSUB231PD256 ...)
+(FusedMultiplyAddSub231Float64x8 ...) => (VFMADDSUB231PD512 ...)
+(FusedMultiplySub132Float32x16 ...) => (VFMSUB132PS512 ...)
+(FusedMultiplySub132Float32x4 ...) => (VFMSUB132PS128 ...)
+(FusedMultiplySub132Float32x8 ...) => (VFMSUB132PS256 ...)
+(FusedMultiplySub132Float64x2 ...) => (VFMSUB132PD128 ...)
+(FusedMultiplySub132Float64x4 ...) => (VFMSUB132PD256 ...)
+(FusedMultiplySub132Float64x8 ...) => (VFMSUB132PD512 ...)
+(FusedMultiplySub213Float32x16 ...) => (VFMSUB213PS512 ...)
+(FusedMultiplySub213Float32x4 ...) => (VFMSUB213PS128 ...)
+(FusedMultiplySub213Float32x8 ...) => (VFMSUB213PS256 ...)
+(FusedMultiplySub213Float64x2 ...) => (VFMSUB213PD128 ...)
+(FusedMultiplySub213Float64x4 ...) => (VFMSUB213PD256 ...)
+(FusedMultiplySub213Float64x8 ...) => (VFMSUB213PD512 ...)
+(FusedMultiplySub231Float32x16 ...) => (VFMSUB231PS512 ...)
+(FusedMultiplySub231Float32x4 ...) => (VFMSUB231PS128 ...)
+(FusedMultiplySub231Float32x8 ...) => (VFMSUB231PS256 ...)
+(FusedMultiplySub231Float64x2 ...) => (VFMSUB231PD128 ...)
+(FusedMultiplySub231Float64x4 ...) => (VFMSUB231PD256 ...)
+(FusedMultiplySub231Float64x8 ...) => (VFMSUB231PD512 ...)
+(FusedMultiplySubAdd132Float32x16 ...) => (VFMSUBADD132PS512 ...)
+(FusedMultiplySubAdd132Float32x4 ...) => (VFMSUBADD132PS128 ...)
+(FusedMultiplySubAdd132Float32x8 ...) => (VFMSUBADD132PS256 ...)
+(FusedMultiplySubAdd132Float64x2 ...) => (VFMSUBADD132PD128 ...)
+(FusedMultiplySubAdd132Float64x4 ...) => (VFMSUBADD132PD256 ...)
+(FusedMultiplySubAdd132Float64x8 ...) => (VFMSUBADD132PD512 ...)
+(FusedMultiplySubAdd213Float32x16 ...) => (VFMSUBADD213PS512 ...)
+(FusedMultiplySubAdd213Float32x4 ...) => (VFMSUBADD213PS128 ...)
+(FusedMultiplySubAdd213Float32x8 ...) => (VFMSUBADD213PS256 ...)
+(FusedMultiplySubAdd213Float64x2 ...) => (VFMSUBADD213PD128 ...)
+(FusedMultiplySubAdd213Float64x4 ...) => (VFMSUBADD213PD256 ...)
+(FusedMultiplySubAdd213Float64x8 ...) => (VFMSUBADD213PD512 ...)
+(FusedMultiplySubAdd231Float32x16 ...) => (VFMSUBADD231PS512 ...)
+(FusedMultiplySubAdd231Float32x4 ...) => (VFMSUBADD231PS128 ...)
+(FusedMultiplySubAdd231Float32x8 ...) => (VFMSUBADD231PS256 ...)
+(FusedMultiplySubAdd231Float64x2 ...) => (VFMSUBADD231PD128 ...)
+(FusedMultiplySubAdd231Float64x4 ...) => (VFMSUBADD231PD256 ...)
+(FusedMultiplySubAdd231Float64x8 ...) => (VFMSUBADD231PD512 ...)
+(FusedNegativeMultiplyAdd132Float32x16 ...) => (VFNMADD132PS512 ...)
+(FusedNegativeMultiplyAdd132Float32x4 ...) => (VFNMADD132PS128 ...)
+(FusedNegativeMultiplyAdd132Float32x8 ...) => (VFNMADD132PS256 ...)
+(FusedNegativeMultiplyAdd132Float64x2 ...) => (VFNMADD132PD128 ...)
+(FusedNegativeMultiplyAdd132Float64x4 ...) => (VFNMADD132PD256 ...)
+(FusedNegativeMultiplyAdd132Float64x8 ...) => (VFNMADD132PD512 ...)
+(FusedNegativeMultiplyAdd213Float32x16 ...) => (VFNMADD213PS512 ...)
+(FusedNegativeMultiplyAdd213Float32x4 ...) => (VFNMADD213PS128 ...)
+(FusedNegativeMultiplyAdd213Float32x8 ...) => (VFNMADD213PS256 ...)
+(FusedNegativeMultiplyAdd213Float64x2 ...) => (VFNMADD213PD128 ...)
+(FusedNegativeMultiplyAdd213Float64x4 ...) => (VFNMADD213PD256 ...)
+(FusedNegativeMultiplyAdd213Float64x8 ...) => (VFNMADD213PD512 ...)
+(FusedNegativeMultiplyAdd231Float32x16 ...) => (VFNMADD231PS512 ...)
+(FusedNegativeMultiplyAdd231Float32x4 ...) => (VFNMADD231PS128 ...)
+(FusedNegativeMultiplyAdd231Float32x8 ...) => (VFNMADD231PS256 ...)
+(FusedNegativeMultiplyAdd231Float64x2 ...) => (VFNMADD231PD128 ...)
+(FusedNegativeMultiplyAdd231Float64x4 ...) => (VFNMADD231PD256 ...)
+(FusedNegativeMultiplyAdd231Float64x8 ...) => (VFNMADD231PD512 ...)
+(FusedNegativeMultiplySub132Float32x16 ...) => (VFNMSUB132PS512 ...)
+(FusedNegativeMultiplySub132Float32x4 ...) => (VFNMSUB132PS128 ...)
+(FusedNegativeMultiplySub132Float32x8 ...) => (VFNMSUB132PS256 ...)
+(FusedNegativeMultiplySub132Float64x2 ...) => (VFNMSUB132PD128 ...)
+(FusedNegativeMultiplySub132Float64x4 ...) => (VFNMSUB132PD256 ...)
+(FusedNegativeMultiplySub132Float64x8 ...) => (VFNMSUB132PD512 ...)
+(FusedNegativeMultiplySub213Float32x16 ...) => (VFNMSUB213PS512 ...)
+(FusedNegativeMultiplySub213Float32x4 ...) => (VFNMSUB213PS128 ...)
+(FusedNegativeMultiplySub213Float32x8 ...) => (VFNMSUB213PS256 ...)
+(FusedNegativeMultiplySub213Float64x2 ...) => (VFNMSUB213PD128 ...)
+(FusedNegativeMultiplySub213Float64x4 ...) => (VFNMSUB213PD256 ...)
+(FusedNegativeMultiplySub213Float64x8 ...) => (VFNMSUB213PD512 ...)
+(FusedNegativeMultiplySub231Float32x16 ...) => (VFNMSUB231PS512 ...)
+(FusedNegativeMultiplySub231Float32x4 ...) => (VFNMSUB231PS128 ...)
+(FusedNegativeMultiplySub231Float32x8 ...) => (VFNMSUB231PS256 ...)
+(FusedNegativeMultiplySub231Float64x2 ...) => (VFNMSUB231PD128 ...)
+(FusedNegativeMultiplySub231Float64x4 ...) => (VFNMSUB231PD256 ...)
+(FusedNegativeMultiplySub231Float64x8 ...) => (VFNMSUB231PD512 ...)
(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y))
(GreaterFloat32x4 x y) => (VCMPPS128 [6] x y)
(GreaterFloat32x8 x y) => (VCMPPS256 [6] x y)
(MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM <types.TypeMask> mask))
(MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM <types.TypeMask> mask))
(MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd132Float32x16 x y z mask) => (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd132Float32x4 x y z mask) => (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd132Float32x8 x y z mask) => (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd132Float64x2 x y z mask) => (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd132Float64x4 x y z mask) => (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd132Float64x8 x y z mask) => (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd213Float32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd213Float32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd213Float32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd213Float64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd213Float64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd213Float64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd231Float32x16 x y z mask) => (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd231Float32x4 x y z mask) => (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd231Float32x8 x y z mask) => (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd231Float64x2 x y z mask) => (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd231Float64x4 x y z mask) => (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAdd231Float64x8 x y z mask) => (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub132Float32x16 x y z mask) => (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub132Float32x4 x y z mask) => (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub132Float32x8 x y z mask) => (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub132Float64x2 x y z mask) => (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub132Float64x4 x y z mask) => (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub132Float64x8 x y z mask) => (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub213Float32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub213Float32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub213Float32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub213Float64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub213Float64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub213Float64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub231Float32x16 x y z mask) => (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub231Float32x4 x y z mask) => (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub231Float32x8 x y z mask) => (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub231Float64x2 x y z mask) => (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub231Float64x4 x y z mask) => (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSub231Float64x8 x y z mask) => (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub132Float32x16 x y z mask) => (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub132Float32x4 x y z mask) => (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub132Float32x8 x y z mask) => (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub132Float64x2 x y z mask) => (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub132Float64x4 x y z mask) => (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub132Float64x8 x y z mask) => (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub213Float32x16 x y z mask) => (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub213Float32x4 x y z mask) => (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub213Float32x8 x y z mask) => (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub213Float64x2 x y z mask) => (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub213Float64x4 x y z mask) => (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub213Float64x8 x y z mask) => (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub231Float32x16 x y z mask) => (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub231Float32x4 x y z mask) => (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub231Float32x8 x y z mask) => (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub231Float64x2 x y z mask) => (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub231Float64x4 x y z mask) => (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySub231Float64x8 x y z mask) => (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd132Float32x16 x y z mask) => (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd132Float32x4 x y z mask) => (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd132Float32x8 x y z mask) => (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd132Float64x2 x y z mask) => (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd132Float64x4 x y z mask) => (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd132Float64x8 x y z mask) => (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd213Float32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd213Float32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd213Float32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd213Float64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd213Float64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd213Float64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd231Float32x16 x y z mask) => (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd231Float32x4 x y z mask) => (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd231Float32x8 x y z mask) => (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd231Float64x2 x y z mask) => (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd231Float64x4 x y z mask) => (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAdd231Float64x8 x y z mask) => (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask) => (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask) => (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask) => (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask) => (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask) => (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask) => (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask) => (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask) => (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask) => (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask) => (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask) => (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask) => (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask) => (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask) => (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask) => (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask) => (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask) => (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask) => (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub132Float32x16 x y z mask) => (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub132Float32x4 x y z mask) => (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub132Float32x8 x y z mask) => (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub132Float64x2 x y z mask) => (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub132Float64x4 x y z mask) => (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub132Float64x8 x y z mask) => (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub213Float32x16 x y z mask) => (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub213Float32x4 x y z mask) => (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub213Float32x8 x y z mask) => (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub213Float64x2 x y z mask) => (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub213Float64x4 x y z mask) => (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub213Float64x8 x y z mask) => (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub231Float32x16 x y z mask) => (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub231Float32x4 x y z mask) => (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub231Float32x8 x y z mask) => (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub231Float64x2 x y z mask) => (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub231Float64x4 x y z mask) => (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedNegativeMultiplySub231Float64x8 x y z mask) => (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
{name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VFMADD132PS512", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADD231PS512", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB132PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB231PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB132PS512", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB213PS512", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB231PS512", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD132PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD231PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD132PS512", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD213PS512", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD231PS512", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB132PS512", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB213PS512", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB231PS512", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VFMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VMAXPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMINPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMULPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VFMADD132PS128", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADD231PS128", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB132PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB231PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB132PS128", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB213PS128", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB231PS128", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD132PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD231PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD132PS128", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD213PS128", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD231PS128", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB132PS128", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB213PS128", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB231PS128", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VFMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VMAXPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMINPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMULPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VFMADD132PS256", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADD231PS256", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB132PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB231PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB132PS256", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB213PS256", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB231PS256", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD132PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD231PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD132PS256", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD213PS256", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD231PS256", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB132PS256", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB213PS256", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB231PS256", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VFMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VMAXPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMINPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMULPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VFMADD132PD128", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADD231PD128", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB132PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB231PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB132PD128", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB213PD128", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB231PD128", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD132PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD231PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD132PD128", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD213PD128", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD231PD128", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB132PD128", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB213PD128", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB231PD128", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VFMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMADDSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFMSUBADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
+ {name: "VFNMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VMAXPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMINPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMULPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VFMADD132PD256", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADD231PD256", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB132PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB231PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB132PD256", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB213PD256", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB231PD256", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD132PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD231PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD132PD256", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD213PD256", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD231PD256", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB132PD256", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB213PD256", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB231PD256", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VFMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMADDSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFMSUBADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
+ {name: "VFNMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VMAXPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMINPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMULPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VFMADD132PD512", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADD231PD512", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB132PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB231PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB132PD512", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB213PD512", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB231PD512", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD132PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD231PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD132PD512", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD213PD512", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD231PD512", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB132PD512", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB213PD512", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB231PD512", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VFMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMADDSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFMSUBADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
+ {name: "VFNMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VMAXPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMINPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMULPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false},
{name: "DivFloat32x16", argLength: 2, commutative: false},
{name: "EqualFloat32x16", argLength: 2, commutative: true},
+ {name: "FusedMultiplyAdd132Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd213Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd231Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub132Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub213Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub231Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub132Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub213Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub231Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd132Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd213Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd231Float32x16", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd132Float32x16", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd213Float32x16", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd231Float32x16", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub132Float32x16", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub213Float32x16", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub231Float32x16", argLength: 3, commutative: false},
{name: "GreaterFloat32x16", argLength: 2, commutative: false},
{name: "GreaterEqualFloat32x16", argLength: 2, commutative: false},
{name: "IsNanFloat32x16", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false},
{name: "MaskedDivFloat32x16", argLength: 3, commutative: false},
{name: "MaskedEqualFloat32x16", argLength: 3, commutative: true},
+ {name: "MaskedFusedMultiplyAdd132Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd213Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd231Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub132Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub213Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub231Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub132Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub213Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub231Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd132Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd213Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd231Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd132Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd213Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd231Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub132Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub213Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub231Float32x16", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat32x16", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat32x16", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat32x16", argLength: 3, commutative: true},
{name: "DivFloat32x4", argLength: 2, commutative: false},
{name: "EqualFloat32x4", argLength: 2, commutative: true},
{name: "FloorFloat32x4", argLength: 1, commutative: false},
+ {name: "FusedMultiplyAdd132Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd213Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd231Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub132Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub213Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub231Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub132Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub213Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub231Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd132Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd213Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd231Float32x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd132Float32x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd213Float32x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd231Float32x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub132Float32x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub213Float32x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub231Float32x4", argLength: 3, commutative: false},
{name: "GreaterFloat32x4", argLength: 2, commutative: false},
{name: "GreaterEqualFloat32x4", argLength: 2, commutative: false},
{name: "IsNanFloat32x4", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false},
{name: "MaskedDivFloat32x4", argLength: 3, commutative: false},
{name: "MaskedEqualFloat32x4", argLength: 3, commutative: true},
+ {name: "MaskedFusedMultiplyAdd132Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd213Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd231Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub132Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub213Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub231Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub132Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub213Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub231Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd132Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd213Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd231Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd132Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd213Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd231Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub132Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub213Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub231Float32x4", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat32x4", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat32x4", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat32x4", argLength: 3, commutative: true},
{name: "DivFloat32x8", argLength: 2, commutative: false},
{name: "EqualFloat32x8", argLength: 2, commutative: true},
{name: "FloorFloat32x8", argLength: 1, commutative: false},
+ {name: "FusedMultiplyAdd132Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd213Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd231Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub132Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub213Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub231Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub132Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub213Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub231Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd132Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd213Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd231Float32x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd132Float32x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd213Float32x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd231Float32x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub132Float32x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub213Float32x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub231Float32x8", argLength: 3, commutative: false},
{name: "GreaterFloat32x8", argLength: 2, commutative: false},
{name: "GreaterEqualFloat32x8", argLength: 2, commutative: false},
{name: "IsNanFloat32x8", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false},
{name: "MaskedDivFloat32x8", argLength: 3, commutative: false},
{name: "MaskedEqualFloat32x8", argLength: 3, commutative: true},
+ {name: "MaskedFusedMultiplyAdd132Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd213Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd231Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub132Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub213Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub231Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub132Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub213Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub231Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd132Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd213Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd231Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd132Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd213Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd231Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub132Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub213Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub231Float32x8", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat32x8", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat32x8", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat32x8", argLength: 3, commutative: true},
{name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true},
{name: "EqualFloat64x2", argLength: 2, commutative: true},
{name: "FloorFloat64x2", argLength: 1, commutative: false},
+ {name: "FusedMultiplyAdd132Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd213Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd231Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub132Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub213Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub231Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub132Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub213Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub231Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd132Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd213Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd231Float64x2", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd132Float64x2", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd213Float64x2", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd231Float64x2", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub132Float64x2", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub213Float64x2", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub231Float64x2", argLength: 3, commutative: false},
{name: "GreaterFloat64x2", argLength: 2, commutative: false},
{name: "GreaterEqualFloat64x2", argLength: 2, commutative: false},
{name: "IsNanFloat64x2", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false},
{name: "MaskedDivFloat64x2", argLength: 3, commutative: false},
{name: "MaskedEqualFloat64x2", argLength: 3, commutative: true},
+ {name: "MaskedFusedMultiplyAdd132Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd213Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd231Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub132Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub213Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub231Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub132Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub213Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub231Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd132Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd213Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd231Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd132Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd213Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd231Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub132Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub213Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub231Float64x2", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat64x2", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat64x2", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat64x2", argLength: 3, commutative: true},
{name: "DivFloat64x4", argLength: 2, commutative: false},
{name: "EqualFloat64x4", argLength: 2, commutative: true},
{name: "FloorFloat64x4", argLength: 1, commutative: false},
+ {name: "FusedMultiplyAdd132Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd213Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd231Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub132Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub213Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub231Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub132Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub213Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub231Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd132Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd213Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd231Float64x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd132Float64x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd213Float64x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd231Float64x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub132Float64x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub213Float64x4", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub231Float64x4", argLength: 3, commutative: false},
{name: "GreaterFloat64x4", argLength: 2, commutative: false},
{name: "GreaterEqualFloat64x4", argLength: 2, commutative: false},
{name: "IsNanFloat64x4", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false},
{name: "MaskedDivFloat64x4", argLength: 3, commutative: false},
{name: "MaskedEqualFloat64x4", argLength: 3, commutative: true},
+ {name: "MaskedFusedMultiplyAdd132Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd213Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd231Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub132Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub213Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub231Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub132Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub213Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub231Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd132Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd213Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd231Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd132Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd213Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd231Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub132Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub213Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub231Float64x4", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat64x4", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat64x4", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat64x4", argLength: 3, commutative: true},
{name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false},
{name: "DivFloat64x8", argLength: 2, commutative: false},
{name: "EqualFloat64x8", argLength: 2, commutative: true},
+ {name: "FusedMultiplyAdd132Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd213Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAdd231Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub132Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub213Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSub231Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub132Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub213Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySub231Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd132Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd213Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAdd231Float64x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd132Float64x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd213Float64x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplyAdd231Float64x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub132Float64x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub213Float64x8", argLength: 3, commutative: false},
+ {name: "FusedNegativeMultiplySub231Float64x8", argLength: 3, commutative: false},
{name: "GreaterFloat64x8", argLength: 2, commutative: false},
{name: "GreaterEqualFloat64x8", argLength: 2, commutative: false},
{name: "IsNanFloat64x8", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false},
{name: "MaskedDivFloat64x8", argLength: 3, commutative: false},
{name: "MaskedEqualFloat64x8", argLength: 3, commutative: true},
+ {name: "MaskedFusedMultiplyAdd132Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd213Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAdd231Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub132Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub213Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSub231Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub132Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub213Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySub231Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd132Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd213Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAdd231Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd132Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd213Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplyAdd231Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub132Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub213Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedNegativeMultiplySub231Float64x8", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat64x8", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat64x8", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat64x8", argLength: 3, commutative: true},
OpAMD64VRCP14PS512
OpAMD64VRSQRT14PS512
OpAMD64VDIVPS512
+ OpAMD64VFMADD132PS512
+ OpAMD64VFMADD213PS512
+ OpAMD64VFMADD231PS512
+ OpAMD64VFMADDSUB132PS512
+ OpAMD64VFMADDSUB213PS512
+ OpAMD64VFMADDSUB231PS512
+ OpAMD64VFMSUB132PS512
+ OpAMD64VFMSUB213PS512
+ OpAMD64VFMSUB231PS512
+ OpAMD64VFMSUBADD132PS512
+ OpAMD64VFMSUBADD213PS512
+ OpAMD64VFMSUBADD231PS512
+ OpAMD64VFNMADD132PS512
+ OpAMD64VFNMADD213PS512
+ OpAMD64VFNMADD231PS512
+ OpAMD64VFNMSUB132PS512
+ OpAMD64VFNMSUB213PS512
+ OpAMD64VFNMSUB231PS512
OpAMD64VADDPSMasked512
OpAMD64VANDPSMasked512
OpAMD64VANDNPSMasked512
OpAMD64VRCP14PSMasked512
OpAMD64VRSQRT14PSMasked512
OpAMD64VDIVPSMasked512
+ OpAMD64VFMADD132PSMasked512
+ OpAMD64VFMADD213PSMasked512
+ OpAMD64VFMADD231PSMasked512
+ OpAMD64VFMADDSUB132PSMasked512
+ OpAMD64VFMADDSUB213PSMasked512
+ OpAMD64VFMADDSUB231PSMasked512
+ OpAMD64VFMSUB132PSMasked512
+ OpAMD64VFMSUB213PSMasked512
+ OpAMD64VFMSUB231PSMasked512
+ OpAMD64VFMSUBADD132PSMasked512
+ OpAMD64VFMSUBADD213PSMasked512
+ OpAMD64VFMSUBADD231PSMasked512
+ OpAMD64VFNMADD132PSMasked512
+ OpAMD64VFNMADD213PSMasked512
+ OpAMD64VFNMADD231PSMasked512
+ OpAMD64VFNMSUB132PSMasked512
+ OpAMD64VFNMSUB213PSMasked512
+ OpAMD64VFNMSUB231PSMasked512
OpAMD64VMAXPSMasked512
OpAMD64VMINPSMasked512
OpAMD64VMULPSMasked512
OpAMD64VRCP14PS128
OpAMD64VRSQRTPS128
OpAMD64VDIVPS128
+ OpAMD64VFMADD132PS128
+ OpAMD64VFMADD213PS128
+ OpAMD64VFMADD231PS128
+ OpAMD64VFMADDSUB132PS128
+ OpAMD64VFMADDSUB213PS128
+ OpAMD64VFMADDSUB231PS128
+ OpAMD64VFMSUB132PS128
+ OpAMD64VFMSUB213PS128
+ OpAMD64VFMSUB231PS128
+ OpAMD64VFMSUBADD132PS128
+ OpAMD64VFMSUBADD213PS128
+ OpAMD64VFMSUBADD231PS128
+ OpAMD64VFNMADD132PS128
+ OpAMD64VFNMADD213PS128
+ OpAMD64VFNMADD231PS128
+ OpAMD64VFNMSUB132PS128
+ OpAMD64VFNMSUB213PS128
+ OpAMD64VFNMSUB231PS128
OpAMD64VADDPSMasked128
OpAMD64VANDPSMasked128
OpAMD64VANDNPSMasked128
OpAMD64VRCP14PSMasked128
OpAMD64VRSQRT14PSMasked128
OpAMD64VDIVPSMasked128
+ OpAMD64VFMADD132PSMasked128
+ OpAMD64VFMADD213PSMasked128
+ OpAMD64VFMADD231PSMasked128
+ OpAMD64VFMADDSUB132PSMasked128
+ OpAMD64VFMADDSUB213PSMasked128
+ OpAMD64VFMADDSUB231PSMasked128
+ OpAMD64VFMSUB132PSMasked128
+ OpAMD64VFMSUB213PSMasked128
+ OpAMD64VFMSUB231PSMasked128
+ OpAMD64VFMSUBADD132PSMasked128
+ OpAMD64VFMSUBADD213PSMasked128
+ OpAMD64VFMSUBADD231PSMasked128
+ OpAMD64VFNMADD132PSMasked128
+ OpAMD64VFNMADD213PSMasked128
+ OpAMD64VFNMADD231PSMasked128
+ OpAMD64VFNMSUB132PSMasked128
+ OpAMD64VFNMSUB213PSMasked128
+ OpAMD64VFNMSUB231PSMasked128
OpAMD64VMAXPSMasked128
OpAMD64VMINPSMasked128
OpAMD64VMULPSMasked128
OpAMD64VRCP14PS256
OpAMD64VRSQRTPS256
OpAMD64VDIVPS256
+ OpAMD64VFMADD132PS256
+ OpAMD64VFMADD213PS256
+ OpAMD64VFMADD231PS256
+ OpAMD64VFMADDSUB132PS256
+ OpAMD64VFMADDSUB213PS256
+ OpAMD64VFMADDSUB231PS256
+ OpAMD64VFMSUB132PS256
+ OpAMD64VFMSUB213PS256
+ OpAMD64VFMSUB231PS256
+ OpAMD64VFMSUBADD132PS256
+ OpAMD64VFMSUBADD213PS256
+ OpAMD64VFMSUBADD231PS256
+ OpAMD64VFNMADD132PS256
+ OpAMD64VFNMADD213PS256
+ OpAMD64VFNMADD231PS256
+ OpAMD64VFNMSUB132PS256
+ OpAMD64VFNMSUB213PS256
+ OpAMD64VFNMSUB231PS256
OpAMD64VADDPSMasked256
OpAMD64VANDPSMasked256
OpAMD64VANDNPSMasked256
OpAMD64VRCP14PSMasked256
OpAMD64VRSQRT14PSMasked256
OpAMD64VDIVPSMasked256
+ OpAMD64VFMADD132PSMasked256
+ OpAMD64VFMADD213PSMasked256
+ OpAMD64VFMADD231PSMasked256
+ OpAMD64VFMADDSUB132PSMasked256
+ OpAMD64VFMADDSUB213PSMasked256
+ OpAMD64VFMADDSUB231PSMasked256
+ OpAMD64VFMSUB132PSMasked256
+ OpAMD64VFMSUB213PSMasked256
+ OpAMD64VFMSUB231PSMasked256
+ OpAMD64VFMSUBADD132PSMasked256
+ OpAMD64VFMSUBADD213PSMasked256
+ OpAMD64VFMSUBADD231PSMasked256
+ OpAMD64VFNMADD132PSMasked256
+ OpAMD64VFNMADD213PSMasked256
+ OpAMD64VFNMADD231PSMasked256
+ OpAMD64VFNMSUB132PSMasked256
+ OpAMD64VFNMSUB213PSMasked256
+ OpAMD64VFNMSUB231PSMasked256
OpAMD64VMAXPSMasked256
OpAMD64VMINPSMasked256
OpAMD64VMULPSMasked256
OpAMD64VRCP14PD128
OpAMD64VRSQRT14PD128
OpAMD64VDIVPD128
+ OpAMD64VFMADD132PD128
+ OpAMD64VFMADD213PD128
+ OpAMD64VFMADD231PD128
+ OpAMD64VFMADDSUB132PD128
+ OpAMD64VFMADDSUB213PD128
+ OpAMD64VFMADDSUB231PD128
+ OpAMD64VFMSUB132PD128
+ OpAMD64VFMSUB213PD128
+ OpAMD64VFMSUB231PD128
+ OpAMD64VFMSUBADD132PD128
+ OpAMD64VFMSUBADD213PD128
+ OpAMD64VFMSUBADD231PD128
+ OpAMD64VFNMADD132PD128
+ OpAMD64VFNMADD213PD128
+ OpAMD64VFNMADD231PD128
+ OpAMD64VFNMSUB132PD128
+ OpAMD64VFNMSUB213PD128
+ OpAMD64VFNMSUB231PD128
OpAMD64VADDPDMasked128
OpAMD64VANDPDMasked128
OpAMD64VANDNPDMasked128
OpAMD64VRCP14PDMasked128
OpAMD64VRSQRT14PDMasked128
OpAMD64VDIVPDMasked128
+ OpAMD64VFMADD132PDMasked128
+ OpAMD64VFMADD213PDMasked128
+ OpAMD64VFMADD231PDMasked128
+ OpAMD64VFMADDSUB132PDMasked128
+ OpAMD64VFMADDSUB213PDMasked128
+ OpAMD64VFMADDSUB231PDMasked128
+ OpAMD64VFMSUB132PDMasked128
+ OpAMD64VFMSUB213PDMasked128
+ OpAMD64VFMSUB231PDMasked128
+ OpAMD64VFMSUBADD132PDMasked128
+ OpAMD64VFMSUBADD213PDMasked128
+ OpAMD64VFMSUBADD231PDMasked128
+ OpAMD64VFNMADD132PDMasked128
+ OpAMD64VFNMADD213PDMasked128
+ OpAMD64VFNMADD231PDMasked128
+ OpAMD64VFNMSUB132PDMasked128
+ OpAMD64VFNMSUB213PDMasked128
+ OpAMD64VFNMSUB231PDMasked128
OpAMD64VMAXPDMasked128
OpAMD64VMINPDMasked128
OpAMD64VMULPDMasked128
OpAMD64VRCP14PD256
OpAMD64VRSQRT14PD256
OpAMD64VDIVPD256
+ OpAMD64VFMADD132PD256
+ OpAMD64VFMADD213PD256
+ OpAMD64VFMADD231PD256
+ OpAMD64VFMADDSUB132PD256
+ OpAMD64VFMADDSUB213PD256
+ OpAMD64VFMADDSUB231PD256
+ OpAMD64VFMSUB132PD256
+ OpAMD64VFMSUB213PD256
+ OpAMD64VFMSUB231PD256
+ OpAMD64VFMSUBADD132PD256
+ OpAMD64VFMSUBADD213PD256
+ OpAMD64VFMSUBADD231PD256
+ OpAMD64VFNMADD132PD256
+ OpAMD64VFNMADD213PD256
+ OpAMD64VFNMADD231PD256
+ OpAMD64VFNMSUB132PD256
+ OpAMD64VFNMSUB213PD256
+ OpAMD64VFNMSUB231PD256
OpAMD64VADDPDMasked256
OpAMD64VANDPDMasked256
OpAMD64VANDNPDMasked256
OpAMD64VRCP14PDMasked256
OpAMD64VRSQRT14PDMasked256
OpAMD64VDIVPDMasked256
+ OpAMD64VFMADD132PDMasked256
+ OpAMD64VFMADD213PDMasked256
+ OpAMD64VFMADD231PDMasked256
+ OpAMD64VFMADDSUB132PDMasked256
+ OpAMD64VFMADDSUB213PDMasked256
+ OpAMD64VFMADDSUB231PDMasked256
+ OpAMD64VFMSUB132PDMasked256
+ OpAMD64VFMSUB213PDMasked256
+ OpAMD64VFMSUB231PDMasked256
+ OpAMD64VFMSUBADD132PDMasked256
+ OpAMD64VFMSUBADD213PDMasked256
+ OpAMD64VFMSUBADD231PDMasked256
+ OpAMD64VFNMADD132PDMasked256
+ OpAMD64VFNMADD213PDMasked256
+ OpAMD64VFNMADD231PDMasked256
+ OpAMD64VFNMSUB132PDMasked256
+ OpAMD64VFNMSUB213PDMasked256
+ OpAMD64VFNMSUB231PDMasked256
OpAMD64VMAXPDMasked256
OpAMD64VMINPDMasked256
OpAMD64VMULPDMasked256
OpAMD64VRCP14PD512
OpAMD64VRSQRT14PD512
OpAMD64VDIVPD512
+ OpAMD64VFMADD132PD512
+ OpAMD64VFMADD213PD512
+ OpAMD64VFMADD231PD512
+ OpAMD64VFMADDSUB132PD512
+ OpAMD64VFMADDSUB213PD512
+ OpAMD64VFMADDSUB231PD512
+ OpAMD64VFMSUB132PD512
+ OpAMD64VFMSUB213PD512
+ OpAMD64VFMSUB231PD512
+ OpAMD64VFMSUBADD132PD512
+ OpAMD64VFMSUBADD213PD512
+ OpAMD64VFMSUBADD231PD512
+ OpAMD64VFNMADD132PD512
+ OpAMD64VFNMADD213PD512
+ OpAMD64VFNMADD231PD512
+ OpAMD64VFNMSUB132PD512
+ OpAMD64VFNMSUB213PD512
+ OpAMD64VFNMSUB231PD512
OpAMD64VADDPDMasked512
OpAMD64VANDPDMasked512
OpAMD64VANDNPDMasked512
OpAMD64VRCP14PDMasked512
OpAMD64VRSQRT14PDMasked512
OpAMD64VDIVPDMasked512
+ OpAMD64VFMADD132PDMasked512
+ OpAMD64VFMADD213PDMasked512
+ OpAMD64VFMADD231PDMasked512
+ OpAMD64VFMADDSUB132PDMasked512
+ OpAMD64VFMADDSUB213PDMasked512
+ OpAMD64VFMADDSUB231PDMasked512
+ OpAMD64VFMSUB132PDMasked512
+ OpAMD64VFMSUB213PDMasked512
+ OpAMD64VFMSUB231PDMasked512
+ OpAMD64VFMSUBADD132PDMasked512
+ OpAMD64VFMSUBADD213PDMasked512
+ OpAMD64VFMSUBADD231PDMasked512
+ OpAMD64VFNMADD132PDMasked512
+ OpAMD64VFNMADD213PDMasked512
+ OpAMD64VFNMADD231PDMasked512
+ OpAMD64VFNMSUB132PDMasked512
+ OpAMD64VFNMSUB213PDMasked512
+ OpAMD64VFNMSUB231PDMasked512
OpAMD64VMAXPDMasked512
OpAMD64VMINPDMasked512
OpAMD64VMULPDMasked512
OpApproximateReciprocalOfSqrtFloat32x16
OpDivFloat32x16
OpEqualFloat32x16
+ OpFusedMultiplyAdd132Float32x16
+ OpFusedMultiplyAdd213Float32x16
+ OpFusedMultiplyAdd231Float32x16
+ OpFusedMultiplyAddSub132Float32x16
+ OpFusedMultiplyAddSub213Float32x16
+ OpFusedMultiplyAddSub231Float32x16
+ OpFusedMultiplySub132Float32x16
+ OpFusedMultiplySub213Float32x16
+ OpFusedMultiplySub231Float32x16
+ OpFusedMultiplySubAdd132Float32x16
+ OpFusedMultiplySubAdd213Float32x16
+ OpFusedMultiplySubAdd231Float32x16
+ OpFusedNegativeMultiplyAdd132Float32x16
+ OpFusedNegativeMultiplyAdd213Float32x16
+ OpFusedNegativeMultiplyAdd231Float32x16
+ OpFusedNegativeMultiplySub132Float32x16
+ OpFusedNegativeMultiplySub213Float32x16
+ OpFusedNegativeMultiplySub231Float32x16
OpGreaterFloat32x16
OpGreaterEqualFloat32x16
OpIsNanFloat32x16
OpMaskedApproximateReciprocalOfSqrtFloat32x16
OpMaskedDivFloat32x16
OpMaskedEqualFloat32x16
+ OpMaskedFusedMultiplyAdd132Float32x16
+ OpMaskedFusedMultiplyAdd213Float32x16
+ OpMaskedFusedMultiplyAdd231Float32x16
+ OpMaskedFusedMultiplyAddSub132Float32x16
+ OpMaskedFusedMultiplyAddSub213Float32x16
+ OpMaskedFusedMultiplyAddSub231Float32x16
+ OpMaskedFusedMultiplySub132Float32x16
+ OpMaskedFusedMultiplySub213Float32x16
+ OpMaskedFusedMultiplySub231Float32x16
+ OpMaskedFusedMultiplySubAdd132Float32x16
+ OpMaskedFusedMultiplySubAdd213Float32x16
+ OpMaskedFusedMultiplySubAdd231Float32x16
+ OpMaskedFusedNegativeMultiplyAdd132Float32x16
+ OpMaskedFusedNegativeMultiplyAdd213Float32x16
+ OpMaskedFusedNegativeMultiplyAdd231Float32x16
+ OpMaskedFusedNegativeMultiplySub132Float32x16
+ OpMaskedFusedNegativeMultiplySub213Float32x16
+ OpMaskedFusedNegativeMultiplySub231Float32x16
OpMaskedGreaterFloat32x16
OpMaskedGreaterEqualFloat32x16
OpMaskedIsNanFloat32x16
OpDivFloat32x4
OpEqualFloat32x4
OpFloorFloat32x4
+ OpFusedMultiplyAdd132Float32x4
+ OpFusedMultiplyAdd213Float32x4
+ OpFusedMultiplyAdd231Float32x4
+ OpFusedMultiplyAddSub132Float32x4
+ OpFusedMultiplyAddSub213Float32x4
+ OpFusedMultiplyAddSub231Float32x4
+ OpFusedMultiplySub132Float32x4
+ OpFusedMultiplySub213Float32x4
+ OpFusedMultiplySub231Float32x4
+ OpFusedMultiplySubAdd132Float32x4
+ OpFusedMultiplySubAdd213Float32x4
+ OpFusedMultiplySubAdd231Float32x4
+ OpFusedNegativeMultiplyAdd132Float32x4
+ OpFusedNegativeMultiplyAdd213Float32x4
+ OpFusedNegativeMultiplyAdd231Float32x4
+ OpFusedNegativeMultiplySub132Float32x4
+ OpFusedNegativeMultiplySub213Float32x4
+ OpFusedNegativeMultiplySub231Float32x4
OpGreaterFloat32x4
OpGreaterEqualFloat32x4
OpIsNanFloat32x4
OpMaskedApproximateReciprocalOfSqrtFloat32x4
OpMaskedDivFloat32x4
OpMaskedEqualFloat32x4
+ OpMaskedFusedMultiplyAdd132Float32x4
+ OpMaskedFusedMultiplyAdd213Float32x4
+ OpMaskedFusedMultiplyAdd231Float32x4
+ OpMaskedFusedMultiplyAddSub132Float32x4
+ OpMaskedFusedMultiplyAddSub213Float32x4
+ OpMaskedFusedMultiplyAddSub231Float32x4
+ OpMaskedFusedMultiplySub132Float32x4
+ OpMaskedFusedMultiplySub213Float32x4
+ OpMaskedFusedMultiplySub231Float32x4
+ OpMaskedFusedMultiplySubAdd132Float32x4
+ OpMaskedFusedMultiplySubAdd213Float32x4
+ OpMaskedFusedMultiplySubAdd231Float32x4
+ OpMaskedFusedNegativeMultiplyAdd132Float32x4
+ OpMaskedFusedNegativeMultiplyAdd213Float32x4
+ OpMaskedFusedNegativeMultiplyAdd231Float32x4
+ OpMaskedFusedNegativeMultiplySub132Float32x4
+ OpMaskedFusedNegativeMultiplySub213Float32x4
+ OpMaskedFusedNegativeMultiplySub231Float32x4
OpMaskedGreaterFloat32x4
OpMaskedGreaterEqualFloat32x4
OpMaskedIsNanFloat32x4
OpDivFloat32x8
OpEqualFloat32x8
OpFloorFloat32x8
+ OpFusedMultiplyAdd132Float32x8
+ OpFusedMultiplyAdd213Float32x8
+ OpFusedMultiplyAdd231Float32x8
+ OpFusedMultiplyAddSub132Float32x8
+ OpFusedMultiplyAddSub213Float32x8
+ OpFusedMultiplyAddSub231Float32x8
+ OpFusedMultiplySub132Float32x8
+ OpFusedMultiplySub213Float32x8
+ OpFusedMultiplySub231Float32x8
+ OpFusedMultiplySubAdd132Float32x8
+ OpFusedMultiplySubAdd213Float32x8
+ OpFusedMultiplySubAdd231Float32x8
+ OpFusedNegativeMultiplyAdd132Float32x8
+ OpFusedNegativeMultiplyAdd213Float32x8
+ OpFusedNegativeMultiplyAdd231Float32x8
+ OpFusedNegativeMultiplySub132Float32x8
+ OpFusedNegativeMultiplySub213Float32x8
+ OpFusedNegativeMultiplySub231Float32x8
OpGreaterFloat32x8
OpGreaterEqualFloat32x8
OpIsNanFloat32x8
OpMaskedApproximateReciprocalOfSqrtFloat32x8
OpMaskedDivFloat32x8
OpMaskedEqualFloat32x8
+ OpMaskedFusedMultiplyAdd132Float32x8
+ OpMaskedFusedMultiplyAdd213Float32x8
+ OpMaskedFusedMultiplyAdd231Float32x8
+ OpMaskedFusedMultiplyAddSub132Float32x8
+ OpMaskedFusedMultiplyAddSub213Float32x8
+ OpMaskedFusedMultiplyAddSub231Float32x8
+ OpMaskedFusedMultiplySub132Float32x8
+ OpMaskedFusedMultiplySub213Float32x8
+ OpMaskedFusedMultiplySub231Float32x8
+ OpMaskedFusedMultiplySubAdd132Float32x8
+ OpMaskedFusedMultiplySubAdd213Float32x8
+ OpMaskedFusedMultiplySubAdd231Float32x8
+ OpMaskedFusedNegativeMultiplyAdd132Float32x8
+ OpMaskedFusedNegativeMultiplyAdd213Float32x8
+ OpMaskedFusedNegativeMultiplyAdd231Float32x8
+ OpMaskedFusedNegativeMultiplySub132Float32x8
+ OpMaskedFusedNegativeMultiplySub213Float32x8
+ OpMaskedFusedNegativeMultiplySub231Float32x8
OpMaskedGreaterFloat32x8
OpMaskedGreaterEqualFloat32x8
OpMaskedIsNanFloat32x8
OpDotProdBroadcastFloat64x2
OpEqualFloat64x2
OpFloorFloat64x2
+ OpFusedMultiplyAdd132Float64x2
+ OpFusedMultiplyAdd213Float64x2
+ OpFusedMultiplyAdd231Float64x2
+ OpFusedMultiplyAddSub132Float64x2
+ OpFusedMultiplyAddSub213Float64x2
+ OpFusedMultiplyAddSub231Float64x2
+ OpFusedMultiplySub132Float64x2
+ OpFusedMultiplySub213Float64x2
+ OpFusedMultiplySub231Float64x2
+ OpFusedMultiplySubAdd132Float64x2
+ OpFusedMultiplySubAdd213Float64x2
+ OpFusedMultiplySubAdd231Float64x2
+ OpFusedNegativeMultiplyAdd132Float64x2
+ OpFusedNegativeMultiplyAdd213Float64x2
+ OpFusedNegativeMultiplyAdd231Float64x2
+ OpFusedNegativeMultiplySub132Float64x2
+ OpFusedNegativeMultiplySub213Float64x2
+ OpFusedNegativeMultiplySub231Float64x2
OpGreaterFloat64x2
OpGreaterEqualFloat64x2
OpIsNanFloat64x2
OpMaskedApproximateReciprocalOfSqrtFloat64x2
OpMaskedDivFloat64x2
OpMaskedEqualFloat64x2
+ OpMaskedFusedMultiplyAdd132Float64x2
+ OpMaskedFusedMultiplyAdd213Float64x2
+ OpMaskedFusedMultiplyAdd231Float64x2
+ OpMaskedFusedMultiplyAddSub132Float64x2
+ OpMaskedFusedMultiplyAddSub213Float64x2
+ OpMaskedFusedMultiplyAddSub231Float64x2
+ OpMaskedFusedMultiplySub132Float64x2
+ OpMaskedFusedMultiplySub213Float64x2
+ OpMaskedFusedMultiplySub231Float64x2
+ OpMaskedFusedMultiplySubAdd132Float64x2
+ OpMaskedFusedMultiplySubAdd213Float64x2
+ OpMaskedFusedMultiplySubAdd231Float64x2
+ OpMaskedFusedNegativeMultiplyAdd132Float64x2
+ OpMaskedFusedNegativeMultiplyAdd213Float64x2
+ OpMaskedFusedNegativeMultiplyAdd231Float64x2
+ OpMaskedFusedNegativeMultiplySub132Float64x2
+ OpMaskedFusedNegativeMultiplySub213Float64x2
+ OpMaskedFusedNegativeMultiplySub231Float64x2
OpMaskedGreaterFloat64x2
OpMaskedGreaterEqualFloat64x2
OpMaskedIsNanFloat64x2
OpDivFloat64x4
OpEqualFloat64x4
OpFloorFloat64x4
+ OpFusedMultiplyAdd132Float64x4
+ OpFusedMultiplyAdd213Float64x4
+ OpFusedMultiplyAdd231Float64x4
+ OpFusedMultiplyAddSub132Float64x4
+ OpFusedMultiplyAddSub213Float64x4
+ OpFusedMultiplyAddSub231Float64x4
+ OpFusedMultiplySub132Float64x4
+ OpFusedMultiplySub213Float64x4
+ OpFusedMultiplySub231Float64x4
+ OpFusedMultiplySubAdd132Float64x4
+ OpFusedMultiplySubAdd213Float64x4
+ OpFusedMultiplySubAdd231Float64x4
+ OpFusedNegativeMultiplyAdd132Float64x4
+ OpFusedNegativeMultiplyAdd213Float64x4
+ OpFusedNegativeMultiplyAdd231Float64x4
+ OpFusedNegativeMultiplySub132Float64x4
+ OpFusedNegativeMultiplySub213Float64x4
+ OpFusedNegativeMultiplySub231Float64x4
OpGreaterFloat64x4
OpGreaterEqualFloat64x4
OpIsNanFloat64x4
OpMaskedApproximateReciprocalOfSqrtFloat64x4
OpMaskedDivFloat64x4
OpMaskedEqualFloat64x4
+ OpMaskedFusedMultiplyAdd132Float64x4
+ OpMaskedFusedMultiplyAdd213Float64x4
+ OpMaskedFusedMultiplyAdd231Float64x4
+ OpMaskedFusedMultiplyAddSub132Float64x4
+ OpMaskedFusedMultiplyAddSub213Float64x4
+ OpMaskedFusedMultiplyAddSub231Float64x4
+ OpMaskedFusedMultiplySub132Float64x4
+ OpMaskedFusedMultiplySub213Float64x4
+ OpMaskedFusedMultiplySub231Float64x4
+ OpMaskedFusedMultiplySubAdd132Float64x4
+ OpMaskedFusedMultiplySubAdd213Float64x4
+ OpMaskedFusedMultiplySubAdd231Float64x4
+ OpMaskedFusedNegativeMultiplyAdd132Float64x4
+ OpMaskedFusedNegativeMultiplyAdd213Float64x4
+ OpMaskedFusedNegativeMultiplyAdd231Float64x4
+ OpMaskedFusedNegativeMultiplySub132Float64x4
+ OpMaskedFusedNegativeMultiplySub213Float64x4
+ OpMaskedFusedNegativeMultiplySub231Float64x4
OpMaskedGreaterFloat64x4
OpMaskedGreaterEqualFloat64x4
OpMaskedIsNanFloat64x4
OpApproximateReciprocalOfSqrtFloat64x8
OpDivFloat64x8
OpEqualFloat64x8
+ OpFusedMultiplyAdd132Float64x8
+ OpFusedMultiplyAdd213Float64x8
+ OpFusedMultiplyAdd231Float64x8
+ OpFusedMultiplyAddSub132Float64x8
+ OpFusedMultiplyAddSub213Float64x8
+ OpFusedMultiplyAddSub231Float64x8
+ OpFusedMultiplySub132Float64x8
+ OpFusedMultiplySub213Float64x8
+ OpFusedMultiplySub231Float64x8
+ OpFusedMultiplySubAdd132Float64x8
+ OpFusedMultiplySubAdd213Float64x8
+ OpFusedMultiplySubAdd231Float64x8
+ OpFusedNegativeMultiplyAdd132Float64x8
+ OpFusedNegativeMultiplyAdd213Float64x8
+ OpFusedNegativeMultiplyAdd231Float64x8
+ OpFusedNegativeMultiplySub132Float64x8
+ OpFusedNegativeMultiplySub213Float64x8
+ OpFusedNegativeMultiplySub231Float64x8
OpGreaterFloat64x8
OpGreaterEqualFloat64x8
OpIsNanFloat64x8
OpMaskedApproximateReciprocalOfSqrtFloat64x8
OpMaskedDivFloat64x8
OpMaskedEqualFloat64x8
+ OpMaskedFusedMultiplyAdd132Float64x8
+ OpMaskedFusedMultiplyAdd213Float64x8
+ OpMaskedFusedMultiplyAdd231Float64x8
+ OpMaskedFusedMultiplyAddSub132Float64x8
+ OpMaskedFusedMultiplyAddSub213Float64x8
+ OpMaskedFusedMultiplyAddSub231Float64x8
+ OpMaskedFusedMultiplySub132Float64x8
+ OpMaskedFusedMultiplySub213Float64x8
+ OpMaskedFusedMultiplySub231Float64x8
+ OpMaskedFusedMultiplySubAdd132Float64x8
+ OpMaskedFusedMultiplySubAdd213Float64x8
+ OpMaskedFusedMultiplySubAdd231Float64x8
+ OpMaskedFusedNegativeMultiplyAdd132Float64x8
+ OpMaskedFusedNegativeMultiplyAdd213Float64x8
+ OpMaskedFusedNegativeMultiplyAdd231Float64x8
+ OpMaskedFusedNegativeMultiplySub132Float64x8
+ OpMaskedFusedNegativeMultiplySub213Float64x8
+ OpMaskedFusedNegativeMultiplySub231Float64x8
OpMaskedGreaterFloat64x8
OpMaskedGreaterEqualFloat64x8
OpMaskedIsNanFloat64x8
},
},
{
- name: "VADDPSMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVADDPS,
+ name: "VFMADD132PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPSMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPS,
+ name: "VFMADD213PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPSMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDNPS,
+ name: "VFMADD231PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PSMasked512",
- argLen: 2,
- asm: x86.AVRCP14PS,
+ name: "VFMADDSUB132PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PS,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRSQRT14PSMasked512",
- argLen: 2,
- asm: x86.AVRSQRT14PS,
+ name: "VFMADDSUB213PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PS,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VDIVPSMasked512",
- argLen: 3,
- asm: x86.AVDIVPS,
+ name: "VFMADDSUB231PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMAXPSMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVMAXPS,
+ name: "VFMSUB132PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMINPSMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVMINPS,
+ name: "VFMSUB213PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMULPSMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVMULPS,
+ name: "VFMSUB231PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSCALEFPSMasked512",
- argLen: 3,
- asm: x86.AVSCALEFPS,
+ name: "VFMSUBADD132PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VORPSMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVORPS,
+ name: "VFMSUBADD213PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSQRTPSMasked512",
- argLen: 2,
- asm: x86.AVSQRTPS,
+ name: "VFMSUBADD231PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PS,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VXORPSMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVXORPS,
+ name: "VFNMADD132PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMAXPS512",
- argLen: 2,
- commutative: true,
- asm: x86.AVMAXPS,
+ name: "VFNMADD213PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMINPS512",
- argLen: 2,
- commutative: true,
- asm: x86.AVMINPS,
+ name: "VFNMADD231PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMULPS512",
- argLen: 2,
- commutative: true,
- asm: x86.AVMULPS,
+ name: "VFNMSUB132PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSCALEFPS512",
- argLen: 2,
- asm: x86.AVSCALEFPS,
+ name: "VFNMSUB213PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VORPS512",
- argLen: 2,
- commutative: true,
- asm: x86.AVORPS,
+ name: "VFNMSUB231PS512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSQRTPS512",
- argLen: 1,
- asm: x86.AVSQRTPS,
+ name: "VADDPSMasked512",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVADDPS,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VXORPS512",
- argLen: 2,
+ name: "VANDPSMasked512",
+ argLen: 3,
commutative: true,
- asm: x86.AVXORPS,
+ asm: x86.AVANDPS,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDPS128",
- argLen: 2,
+ name: "VANDNPSMasked512",
+ argLen: 3,
commutative: true,
- asm: x86.AVADDPS,
+ asm: x86.AVANDNPS,
reg: regInfo{
inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRCP14PSMasked512",
+ argLen: 2,
+ asm: x86.AVRCP14PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDSUBPS128",
+ name: "VRSQRT14PSMasked512",
argLen: 2,
- asm: x86.AVADDSUBPS,
+ asm: x86.AVRSQRT14PS,
reg: regInfo{
inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VDIVPSMasked512",
+ argLen: 3,
+ asm: x86.AVDIVPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPS128",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPS,
+ name: "VFMADD132PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD213PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PS,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD231PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPS128",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDNPS,
+ name: "VFMADDSUB132PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PS,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB213PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PS128",
- argLen: 1,
- asm: x86.AVRCP14PS,
+ name: "VFMADDSUB231PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PS,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
+ },
+ },
+ {
+ name: "VFMSUB132PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
{
- name: "VRSQRTPS128",
- argLen: 1,
- asm: x86.AVRSQRTPS,
+ name: "VFMSUB213PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PS,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
+ },
+ },
+ {
+ name: "VFMSUB231PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
{
- name: "VDIVPS128",
- argLen: 2,
- asm: x86.AVDIVPS,
+ name: "VFMSUBADD132PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PS,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD213PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDPSMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVADDPS,
+ name: "VFMSUBADD231PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPSMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPS,
+ name: "VFNMADD132PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPSMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDNPS,
+ name: "VFNMADD213PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PSMasked128",
- argLen: 2,
- asm: x86.AVRCP14PS,
+ name: "VFNMADD231PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PS,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRSQRT14PSMasked128",
- argLen: 2,
- asm: x86.AVRSQRT14PS,
+ name: "VFNMSUB132PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PS,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VDIVPSMasked128",
- argLen: 3,
- asm: x86.AVDIVPS,
+ name: "VFNMSUB213PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMAXPSMasked128",
+ name: "VFNMSUB231PSMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMAXPSMasked512",
argLen: 3,
commutative: true,
asm: x86.AVMAXPS,
},
},
{
- name: "VMINPSMasked128",
+ name: "VMINPSMasked512",
argLen: 3,
commutative: true,
asm: x86.AVMINPS,
},
},
{
- name: "VMULPSMasked128",
+ name: "VMULPSMasked512",
argLen: 3,
commutative: true,
asm: x86.AVMULPS,
},
},
{
- name: "VSCALEFPSMasked128",
+ name: "VSCALEFPSMasked512",
argLen: 3,
asm: x86.AVSCALEFPS,
reg: regInfo{
},
},
{
- name: "VORPSMasked128",
+ name: "VORPSMasked512",
argLen: 3,
commutative: true,
asm: x86.AVORPS,
},
},
{
- name: "VSQRTPSMasked128",
+ name: "VSQRTPSMasked512",
argLen: 2,
asm: x86.AVSQRTPS,
reg: regInfo{
},
},
{
- name: "VXORPSMasked128",
+ name: "VXORPSMasked512",
argLen: 3,
commutative: true,
asm: x86.AVXORPS,
},
},
{
- name: "VMAXPS128",
+ name: "VMAXPS512",
argLen: 2,
commutative: true,
asm: x86.AVMAXPS,
},
},
{
- name: "VMINPS128",
+ name: "VMINPS512",
argLen: 2,
commutative: true,
asm: x86.AVMINPS,
},
},
{
- name: "VMULPS128",
+ name: "VMULPS512",
argLen: 2,
commutative: true,
asm: x86.AVMULPS,
},
},
{
- name: "VSCALEFPS128",
+ name: "VSCALEFPS512",
argLen: 2,
asm: x86.AVSCALEFPS,
reg: regInfo{
},
},
{
- name: "VORPS128",
+ name: "VORPS512",
argLen: 2,
commutative: true,
asm: x86.AVORPS,
},
},
{
- name: "VHADDPS128",
- argLen: 2,
- asm: x86.AVHADDPS,
+ name: "VSQRTPS512",
+ argLen: 1,
+ asm: x86.AVSQRTPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VHSUBPS128",
- argLen: 2,
- asm: x86.AVHSUBPS,
+ name: "VXORPS512",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVXORPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSQRTPS128",
- argLen: 1,
- asm: x86.AVSQRTPS,
+ name: "VADDPS128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVADDPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VXORPS128",
- argLen: 2,
- commutative: true,
- asm: x86.AVXORPS,
+ name: "VADDSUBPS128",
+ argLen: 2,
+ asm: x86.AVADDSUBPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDPS256",
+ name: "VANDPS128",
argLen: 2,
commutative: true,
- asm: x86.AVADDPS,
+ asm: x86.AVANDPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDSUBPS256",
- argLen: 2,
- asm: x86.AVADDSUBPS,
- reg: regInfo{
+ name: "VANDNPS128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVANDNPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRCP14PS128",
+ argLen: 1,
+ asm: x86.AVRCP14PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRSQRTPS128",
+ argLen: 1,
+ asm: x86.AVRSQRTPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VDIVPS128",
+ argLen: 2,
+ asm: x86.AVDIVPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD132PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD213PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD231PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB132PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB213PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB231PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB132PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB213PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB231PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD132PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD213PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD231PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD132PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD213PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD231PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB132PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB213PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB231PS128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VADDPSMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVADDPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDPSMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDNPSMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDNPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRCP14PSMasked128",
+ argLen: 2,
+ asm: x86.AVRCP14PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRSQRT14PSMasked128",
+ argLen: 2,
+ asm: x86.AVRSQRT14PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VDIVPSMasked128",
+ argLen: 3,
+ asm: x86.AVDIVPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD132PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD213PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD231PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB132PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB213PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB231PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB132PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB213PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB231PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD132PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD213PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD231PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD132PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD213PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD231PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB132PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB213PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB231PSMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMAXPSMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMAXPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMINPSMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMINPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMULPSMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMULPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSCALEFPSMasked128",
+ argLen: 3,
+ asm: x86.AVSCALEFPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VORPSMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVORPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSQRTPSMasked128",
+ argLen: 2,
+ asm: x86.AVSQRTPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VXORPSMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVXORPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMAXPS128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMAXPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMINPS128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMINPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMULPS128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMULPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSCALEFPS128",
+ argLen: 2,
+ asm: x86.AVSCALEFPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VORPS128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVORPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VHADDPS128",
+ argLen: 2,
+ asm: x86.AVHADDPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VHSUBPS128",
+ argLen: 2,
+ asm: x86.AVHSUBPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSQRTPS128",
+ argLen: 1,
+ asm: x86.AVSQRTPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VXORPS128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVXORPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VADDPS256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVADDPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VADDSUBPS256",
+ argLen: 2,
+ asm: x86.AVADDSUBPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDPS256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVANDPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDNPS256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVANDNPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRCP14PS256",
+ argLen: 1,
+ asm: x86.AVRCP14PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRSQRTPS256",
+ argLen: 1,
+ asm: x86.AVRSQRTPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VDIVPS256",
+ argLen: 2,
+ asm: x86.AVDIVPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD132PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD213PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD231PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB132PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB213PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB231PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB132PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB213PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB231PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD132PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD213PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD231PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD132PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD213PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD231PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB132PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB213PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB231PS256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VADDPSMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVADDPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDPSMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDNPSMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDNPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRCP14PSMasked256",
+ argLen: 2,
+ asm: x86.AVRCP14PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRSQRT14PSMasked256",
+ argLen: 2,
+ asm: x86.AVRSQRT14PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VDIVPSMasked256",
+ argLen: 3,
+ asm: x86.AVDIVPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD132PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD213PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD231PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB132PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB213PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB231PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB132PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB213PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB231PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD132PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD213PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD231PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD132PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD213PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD231PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB132PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB213PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB231PSMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMAXPSMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMAXPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMINPSMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMINPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMULPSMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMULPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSCALEFPSMasked256",
+ argLen: 3,
+ asm: x86.AVSCALEFPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VORPSMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVORPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSQRTPSMasked256",
+ argLen: 2,
+ asm: x86.AVSQRTPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VXORPSMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVXORPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMAXPS256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMAXPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMINPS256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMINPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMULPS256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMULPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSCALEFPS256",
+ argLen: 2,
+ asm: x86.AVSCALEFPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VORPS256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVORPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VHADDPS256",
+ argLen: 2,
+ asm: x86.AVHADDPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VHSUBPS256",
+ argLen: 2,
+ asm: x86.AVHSUBPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSQRTPS256",
+ argLen: 1,
+ asm: x86.AVSQRTPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VXORPS256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVXORPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VADDPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVADDPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VADDSUBPD128",
+ argLen: 2,
+ asm: x86.AVADDSUBPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVANDPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDNPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVANDNPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRCP14PD128",
+ argLen: 1,
+ asm: x86.AVRCP14PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRSQRT14PD128",
+ argLen: 1,
+ asm: x86.AVRSQRT14PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VDIVPD128",
+ argLen: 2,
+ asm: x86.AVDIVPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD132PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD213PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD231PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB132PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB213PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB231PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB132PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB213PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB231PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD132PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD213PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD231PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD132PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD213PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD231PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB132PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB213PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB231PD128",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VADDPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVADDPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDNPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDNPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRCP14PDMasked128",
+ argLen: 2,
+ asm: x86.AVRCP14PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRSQRT14PDMasked128",
+ argLen: 2,
+ asm: x86.AVRSQRT14PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VDIVPDMasked128",
+ argLen: 3,
+ asm: x86.AVDIVPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD132PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD213PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD231PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB132PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB213PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB231PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB132PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB213PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB231PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD132PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD213PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD231PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD132PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD213PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMADD231PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB132PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB213PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB231PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMAXPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMAXPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMINPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMINPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMULPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMULPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSCALEFPDMasked128",
+ argLen: 3,
+ asm: x86.AVSCALEFPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VORPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVORPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSQRTPDMasked128",
+ argLen: 2,
+ asm: x86.AVSQRTPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VXORPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVXORPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMAXPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMAXPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMINPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMINPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMULPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMULPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSCALEFPD128",
+ argLen: 2,
+ asm: x86.AVSCALEFPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VORPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVORPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VHADDPD128",
+ argLen: 2,
+ asm: x86.AVHADDPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VHSUBPD128",
+ argLen: 2,
+ asm: x86.AVHSUBPD,
+ reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPS,
+ name: "VSQRTPD128",
+ argLen: 1,
+ asm: x86.AVSQRTPD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPS256",
+ name: "VXORPD128",
argLen: 2,
commutative: true,
- asm: x86.AVANDNPS,
+ asm: x86.AVXORPD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PS256",
- argLen: 1,
- asm: x86.AVRCP14PS,
+ name: "VADDPD256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVADDPD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRSQRTPS256",
- argLen: 1,
- asm: x86.AVRSQRTPS,
+ name: "VADDSUBPD256",
+ argLen: 2,
+ asm: x86.AVADDSUBPD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VDIVPS256",
- argLen: 2,
- asm: x86.AVDIVPS,
+ name: "VANDPD256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVANDPD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDPSMasked256",
- argLen: 3,
+ name: "VANDNPD256",
+ argLen: 2,
commutative: true,
- asm: x86.AVADDPS,
+ asm: x86.AVANDNPD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPS,
+ name: "VRCP14PD256",
+ argLen: 1,
+ asm: x86.AVRCP14PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDNPS,
+ name: "VRSQRT14PD256",
+ argLen: 1,
+ asm: x86.AVRSQRT14PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PSMasked256",
+ name: "VDIVPD256",
argLen: 2,
- asm: x86.AVRCP14PS,
+ asm: x86.AVDIVPD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRSQRT14PSMasked256",
- argLen: 2,
- asm: x86.AVRSQRT14PS,
+ name: "VFMADD132PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VDIVPSMasked256",
- argLen: 3,
- asm: x86.AVDIVPS,
+ name: "VFMADD213PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMAXPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVMAXPS,
+ name: "VFMADD231PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMINPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVMINPS,
+ name: "VFMADDSUB132PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMULPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVMULPS,
+ name: "VFMADDSUB213PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSCALEFPSMasked256",
- argLen: 3,
- asm: x86.AVSCALEFPS,
+ name: "VFMADDSUB231PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VORPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVORPS,
+ name: "VFMSUB132PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSQRTPSMasked256",
- argLen: 2,
- asm: x86.AVSQRTPS,
+ name: "VFMSUB213PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VXORPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVXORPS,
+ name: "VFMSUB231PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMAXPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVMAXPS,
+ name: "VFMSUBADD132PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMINPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVMINPS,
+ name: "VFMSUBADD213PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMULPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVMULPS,
+ name: "VFMSUBADD231PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSCALEFPS256",
- argLen: 2,
- asm: x86.AVSCALEFPS,
+ name: "VFNMADD132PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VORPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVORPS,
+ name: "VFNMADD213PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VHADDPS256",
- argLen: 2,
- asm: x86.AVHADDPS,
+ name: "VFNMADD231PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VHSUBPS256",
- argLen: 2,
- asm: x86.AVHSUBPS,
+ name: "VFNMSUB132PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSQRTPS256",
- argLen: 1,
- asm: x86.AVSQRTPS,
+ name: "VFNMSUB213PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VXORPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVXORPS,
+ name: "VFNMSUB231PD256",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDPD128",
- argLen: 2,
+ name: "VADDPDMasked256",
+ argLen: 3,
commutative: true,
asm: x86.AVADDPD,
reg: regInfo{
inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDPDMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDSUBPD128",
+ name: "VANDNPDMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDNPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRCP14PDMasked256",
argLen: 2,
- asm: x86.AVADDSUBPD,
+ asm: x86.AVRCP14PD,
reg: regInfo{
inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRSQRT14PDMasked256",
+ argLen: 2,
+ asm: x86.AVRSQRT14PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPD128",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPD,
+ name: "VDIVPDMasked256",
+ argLen: 3,
+ asm: x86.AVDIVPD,
reg: regInfo{
inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD132PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPD128",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDNPD,
+ name: "VFMADD213PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PD,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD231PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PD128",
- argLen: 1,
- asm: x86.AVRCP14PD,
+ name: "VFMADDSUB132PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PD,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
+ },
+ },
+ {
+ name: "VFMADDSUB213PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
{
- name: "VRSQRT14PD128",
- argLen: 1,
- asm: x86.AVRSQRT14PD,
+ name: "VFMADDSUB231PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUB132PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PD,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
+ },
+ },
+ {
+ name: "VFMSUB213PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
{
- name: "VDIVPD128",
- argLen: 2,
- asm: x86.AVDIVPD,
+ name: "VFMSUB231PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PD,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD132PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDPDMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVADDPD,
+ name: "VFMSUBADD213PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPDMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPD,
+ name: "VFMSUBADD231PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPDMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDNPD,
+ name: "VFNMADD132PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PDMasked128",
- argLen: 2,
- asm: x86.AVRCP14PD,
+ name: "VFNMADD213PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRSQRT14PDMasked128",
- argLen: 2,
- asm: x86.AVRSQRT14PD,
+ name: "VFNMADD231PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VDIVPDMasked128",
- argLen: 3,
- asm: x86.AVDIVPD,
+ name: "VFNMSUB132PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB213PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFNMSUB231PDMasked256",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMAXPDMasked128",
+ name: "VMAXPDMasked256",
argLen: 3,
commutative: true,
asm: x86.AVMAXPD,
},
},
{
- name: "VMINPDMasked128",
+ name: "VMINPDMasked256",
argLen: 3,
commutative: true,
asm: x86.AVMINPD,
},
},
{
- name: "VMULPDMasked128",
+ name: "VMULPDMasked256",
argLen: 3,
commutative: true,
asm: x86.AVMULPD,
},
},
{
- name: "VSCALEFPDMasked128",
+ name: "VSCALEFPDMasked256",
argLen: 3,
asm: x86.AVSCALEFPD,
reg: regInfo{
},
},
{
- name: "VORPDMasked128",
+ name: "VORPDMasked256",
argLen: 3,
commutative: true,
asm: x86.AVORPD,
},
},
{
- name: "VSQRTPDMasked128",
+ name: "VSQRTPDMasked256",
argLen: 2,
asm: x86.AVSQRTPD,
reg: regInfo{
},
},
{
- name: "VXORPDMasked128",
+ name: "VXORPDMasked256",
argLen: 3,
commutative: true,
asm: x86.AVXORPD,
},
},
{
- name: "VMAXPD128",
+ name: "VMAXPD256",
argLen: 2,
commutative: true,
asm: x86.AVMAXPD,
},
},
{
- name: "VMINPD128",
+ name: "VMINPD256",
argLen: 2,
commutative: true,
asm: x86.AVMINPD,
},
},
{
- name: "VMULPD128",
+ name: "VMULPD256",
argLen: 2,
commutative: true,
asm: x86.AVMULPD,
},
},
{
- name: "VSCALEFPD128",
+ name: "VSCALEFPD256",
argLen: 2,
asm: x86.AVSCALEFPD,
reg: regInfo{
},
},
{
- name: "VORPD128",
+ name: "VORPD256",
argLen: 2,
commutative: true,
asm: x86.AVORPD,
},
},
{
- name: "VHADDPD128",
+ name: "VHADDPD256",
argLen: 2,
asm: x86.AVHADDPD,
reg: regInfo{
},
},
{
- name: "VHSUBPD128",
+ name: "VHSUBPD256",
argLen: 2,
asm: x86.AVHSUBPD,
reg: regInfo{
},
},
{
- name: "VSQRTPD128",
+ name: "VSQRTPD256",
argLen: 1,
asm: x86.AVSQRTPD,
reg: regInfo{
},
},
{
- name: "VXORPD128",
+ name: "VXORPD256",
argLen: 2,
commutative: true,
asm: x86.AVXORPD,
},
},
{
- name: "VADDPD256",
+ name: "VADDPD512",
argLen: 2,
commutative: true,
asm: x86.AVADDPD,
},
},
{
- name: "VADDSUBPD256",
- argLen: 2,
- asm: x86.AVADDSUBPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDPD256",
+ name: "VANDPD512",
argLen: 2,
commutative: true,
asm: x86.AVANDPD,
},
},
{
- name: "VANDNPD256",
+ name: "VANDNPD512",
argLen: 2,
commutative: true,
asm: x86.AVANDNPD,
},
},
{
- name: "VRCP14PD256",
+ name: "VRCP14PD512",
argLen: 1,
asm: x86.AVRCP14PD,
reg: regInfo{
},
},
{
- name: "VRSQRT14PD256",
+ name: "VRSQRT14PD512",
argLen: 1,
asm: x86.AVRSQRT14PD,
reg: regInfo{
},
},
{
- name: "VDIVPD256",
+ name: "VDIVPD512",
argLen: 2,
asm: x86.AVDIVPD,
reg: regInfo{
},
},
{
- name: "VADDPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVADDPD,
+ name: "VFMADD132PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPD,
+ name: "VFMADD213PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDNPD,
+ name: "VFMADD231PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PDMasked256",
- argLen: 2,
- asm: x86.AVRCP14PD,
+ name: "VFMADDSUB132PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRSQRT14PDMasked256",
- argLen: 2,
- asm: x86.AVRSQRT14PD,
+ name: "VFMADDSUB213PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VDIVPDMasked256",
- argLen: 3,
- asm: x86.AVDIVPD,
+ name: "VFMADDSUB231PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMAXPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVMAXPD,
+ name: "VFMSUB132PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMINPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVMINPD,
+ name: "VFMSUB213PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMULPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVMULPD,
+ name: "VFMSUB231PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSCALEFPDMasked256",
- argLen: 3,
- asm: x86.AVSCALEFPD,
+ name: "VFMSUBADD132PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VORPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVORPD,
+ name: "VFMSUBADD213PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSQRTPDMasked256",
- argLen: 2,
- asm: x86.AVSQRTPD,
+ name: "VFMSUBADD231PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VXORPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVXORPD,
+ name: "VFNMADD132PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMAXPD256",
- argLen: 2,
- commutative: true,
- asm: x86.AVMAXPD,
+ name: "VFNMADD213PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMINPD256",
- argLen: 2,
- commutative: true,
- asm: x86.AVMINPD,
+ name: "VFNMADD231PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMULPD256",
- argLen: 2,
- commutative: true,
- asm: x86.AVMULPD,
+ name: "VFNMSUB132PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSCALEFPD256",
- argLen: 2,
- asm: x86.AVSCALEFPD,
+ name: "VFNMSUB213PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VORPD256",
- argLen: 2,
- commutative: true,
- asm: x86.AVORPD,
+ name: "VFNMSUB231PD512",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VHADDPD256",
- argLen: 2,
- asm: x86.AVHADDPD,
+ name: "VADDPDMasked512",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVADDPD,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VHSUBPD256",
- argLen: 2,
- asm: x86.AVHSUBPD,
+ name: "VANDPDMasked512",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDPD,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSQRTPD256",
- argLen: 1,
- asm: x86.AVSQRTPD,
+ name: "VANDNPDMasked512",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDNPD,
reg: regInfo{
inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
+ },
+ },
+ {
+ name: "VRCP14PDMasked512",
+ argLen: 2,
+ asm: x86.AVRCP14PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
{
- name: "VXORPD256",
- argLen: 2,
- commutative: true,
- asm: x86.AVXORPD,
+ name: "VRSQRT14PDMasked512",
+ argLen: 2,
+ asm: x86.AVRSQRT14PD,
reg: regInfo{
inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VDIVPDMasked512",
+ argLen: 3,
+ asm: x86.AVDIVPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDPD512",
- argLen: 2,
- commutative: true,
- asm: x86.AVADDPD,
+ name: "VFMADD132PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD132PD,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD213PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPD512",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPD,
+ name: "VFMADD231PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADD231PD,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB132PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPD512",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDNPD,
+ name: "VFMADDSUB213PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PD,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB231PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PD512",
- argLen: 1,
- asm: x86.AVRCP14PD,
+ name: "VFMSUB132PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB132PD,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
+ },
+ },
+ {
+ name: "VFMSUB213PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
{
- name: "VRSQRT14PD512",
- argLen: 1,
- asm: x86.AVRSQRT14PD,
+ name: "VFMSUB231PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUB231PD,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
+ },
+ },
+ {
+ name: "VFMSUBADD132PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD132PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
{
- name: "VDIVPD512",
- argLen: 2,
- asm: x86.AVDIVPD,
+ name: "VFMSUBADD213PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD213PD,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMSUBADD231PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMSUBADD231PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDPDMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVADDPD,
+ name: "VFNMADD132PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD132PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPDMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPD,
+ name: "VFNMADD213PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD213PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPDMasked512",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDNPD,
+ name: "VFNMADD231PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMADD231PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PDMasked512",
- argLen: 2,
- asm: x86.AVRCP14PD,
+ name: "VFNMSUB132PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB132PD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRSQRT14PDMasked512",
- argLen: 2,
- asm: x86.AVRSQRT14PD,
+ name: "VFNMSUB213PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB213PD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VDIVPDMasked512",
- argLen: 3,
- asm: x86.AVDIVPD,
+ name: "VFNMSUB231PDMasked512",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFNMSUB231PD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
commutative: true,
generic: true,
},
+ {
+ name: "FusedMultiplyAdd132Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd213Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd231Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub132Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub213Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub231Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub132Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub213Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub231Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd132Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd213Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd231Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd132Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd213Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd231Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub132Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub213Float32x16",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub231Float32x16",
+ argLen: 3,
+ generic: true,
+ },
{
name: "GreaterFloat32x16",
argLen: 2,
commutative: true,
generic: true,
},
+ {
+ name: "MaskedFusedMultiplyAdd132Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd213Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd231Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub132Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub213Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub231Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub132Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub213Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub231Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd132Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd213Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd231Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd132Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd213Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd231Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub132Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub213Float32x16",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub231Float32x16",
+ argLen: 4,
+ generic: true,
+ },
{
name: "MaskedGreaterFloat32x16",
argLen: 3,
argLen: 1,
generic: true,
},
+ {
+ name: "FusedMultiplyAdd132Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd213Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd231Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub132Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub213Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub231Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub132Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub213Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub231Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd132Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd213Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd231Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd132Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd213Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd231Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub132Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub213Float32x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub231Float32x4",
+ argLen: 3,
+ generic: true,
+ },
{
name: "GreaterFloat32x4",
argLen: 2,
commutative: true,
generic: true,
},
+ {
+ name: "MaskedFusedMultiplyAdd132Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd213Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd231Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub132Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub213Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub231Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub132Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub213Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub231Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd132Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd213Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd231Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd132Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd213Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd231Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub132Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub213Float32x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub231Float32x4",
+ argLen: 4,
+ generic: true,
+ },
{
name: "MaskedGreaterFloat32x4",
argLen: 3,
generic: true,
},
{
- name: "FloorFloat32x8",
- argLen: 1,
+ name: "FloorFloat32x8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd132Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd213Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd231Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub132Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub213Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub231Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub132Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub213Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub231Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd132Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd213Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd231Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd132Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd213Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd231Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub132Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub213Float32x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub231Float32x8",
+ argLen: 3,
generic: true,
},
{
commutative: true,
generic: true,
},
+ {
+ name: "MaskedFusedMultiplyAdd132Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd213Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd231Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub132Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub213Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub231Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub132Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub213Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub231Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd132Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd213Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd231Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd132Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd213Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd231Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub132Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub213Float32x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub231Float32x8",
+ argLen: 4,
+ generic: true,
+ },
{
name: "MaskedGreaterFloat32x8",
argLen: 3,
argLen: 1,
generic: true,
},
+ {
+ name: "FusedMultiplyAdd132Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd213Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd231Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub132Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub213Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub231Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub132Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub213Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub231Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd132Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd213Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd231Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd132Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd213Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd231Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub132Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub213Float64x2",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub231Float64x2",
+ argLen: 3,
+ generic: true,
+ },
{
name: "GreaterFloat64x2",
argLen: 2,
commutative: true,
generic: true,
},
+ {
+ name: "MaskedFusedMultiplyAdd132Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd213Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd231Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub132Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub213Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub231Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub132Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub213Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub231Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd132Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd213Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd231Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd132Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd213Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd231Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub132Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub213Float64x2",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub231Float64x2",
+ argLen: 4,
+ generic: true,
+ },
{
name: "MaskedGreaterFloat64x2",
argLen: 3,
argLen: 1,
generic: true,
},
+ {
+ name: "FusedMultiplyAdd132Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd213Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd231Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub132Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub213Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub231Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub132Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub213Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub231Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd132Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd213Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd231Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd132Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd213Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd231Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub132Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub213Float64x4",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub231Float64x4",
+ argLen: 3,
+ generic: true,
+ },
{
name: "GreaterFloat64x4",
argLen: 2,
commutative: true,
generic: true,
},
+ {
+ name: "MaskedFusedMultiplyAdd132Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd213Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd231Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub132Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub213Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub231Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub132Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub213Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub231Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd132Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd213Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd231Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd132Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd213Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd231Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub132Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub213Float64x4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub231Float64x4",
+ argLen: 4,
+ generic: true,
+ },
{
name: "MaskedGreaterFloat64x4",
argLen: 3,
commutative: true,
generic: true,
},
+ {
+ name: "FusedMultiplyAdd132Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd213Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAdd231Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub132Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub213Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplyAddSub231Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub132Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub213Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySub231Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd132Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd213Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedMultiplySubAdd231Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd132Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd213Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplyAdd231Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub132Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub213Float64x8",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "FusedNegativeMultiplySub231Float64x8",
+ argLen: 3,
+ generic: true,
+ },
{
name: "GreaterFloat64x8",
argLen: 2,
commutative: true,
generic: true,
},
+ {
+ name: "MaskedFusedMultiplyAdd132Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd213Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAdd231Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub132Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub213Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplyAddSub231Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub132Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub213Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySub231Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd132Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd213Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedMultiplySubAdd231Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd132Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd213Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplyAdd231Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub132Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub213Float64x8",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "MaskedFusedNegativeMultiplySub231Float64x8",
+ argLen: 4,
+ generic: true,
+ },
{
name: "MaskedGreaterFloat64x8",
argLen: 3,
return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v)
case OpFloorWithPrecisionFloat64x8:
return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v)
+ case OpFusedMultiplyAdd132Float32x16:
+ v.Op = OpAMD64VFMADD132PS512
+ return true
+ case OpFusedMultiplyAdd132Float32x4:
+ v.Op = OpAMD64VFMADD132PS128
+ return true
+ case OpFusedMultiplyAdd132Float32x8:
+ v.Op = OpAMD64VFMADD132PS256
+ return true
+ case OpFusedMultiplyAdd132Float64x2:
+ v.Op = OpAMD64VFMADD132PD128
+ return true
+ case OpFusedMultiplyAdd132Float64x4:
+ v.Op = OpAMD64VFMADD132PD256
+ return true
+ case OpFusedMultiplyAdd132Float64x8:
+ v.Op = OpAMD64VFMADD132PD512
+ return true
+ case OpFusedMultiplyAdd213Float32x16:
+ v.Op = OpAMD64VFMADD213PS512
+ return true
+ case OpFusedMultiplyAdd213Float32x4:
+ v.Op = OpAMD64VFMADD213PS128
+ return true
+ case OpFusedMultiplyAdd213Float32x8:
+ v.Op = OpAMD64VFMADD213PS256
+ return true
+ case OpFusedMultiplyAdd213Float64x2:
+ v.Op = OpAMD64VFMADD213PD128
+ return true
+ case OpFusedMultiplyAdd213Float64x4:
+ v.Op = OpAMD64VFMADD213PD256
+ return true
+ case OpFusedMultiplyAdd213Float64x8:
+ v.Op = OpAMD64VFMADD213PD512
+ return true
+ case OpFusedMultiplyAdd231Float32x16:
+ v.Op = OpAMD64VFMADD231PS512
+ return true
+ case OpFusedMultiplyAdd231Float32x4:
+ v.Op = OpAMD64VFMADD231PS128
+ return true
+ case OpFusedMultiplyAdd231Float32x8:
+ v.Op = OpAMD64VFMADD231PS256
+ return true
+ case OpFusedMultiplyAdd231Float64x2:
+ v.Op = OpAMD64VFMADD231PD128
+ return true
+ case OpFusedMultiplyAdd231Float64x4:
+ v.Op = OpAMD64VFMADD231PD256
+ return true
+ case OpFusedMultiplyAdd231Float64x8:
+ v.Op = OpAMD64VFMADD231PD512
+ return true
+ case OpFusedMultiplyAddSub132Float32x16:
+ v.Op = OpAMD64VFMADDSUB132PS512
+ return true
+ case OpFusedMultiplyAddSub132Float32x4:
+ v.Op = OpAMD64VFMADDSUB132PS128
+ return true
+ case OpFusedMultiplyAddSub132Float32x8:
+ v.Op = OpAMD64VFMADDSUB132PS256
+ return true
+ case OpFusedMultiplyAddSub132Float64x2:
+ v.Op = OpAMD64VFMADDSUB132PD128
+ return true
+ case OpFusedMultiplyAddSub132Float64x4:
+ v.Op = OpAMD64VFMADDSUB132PD256
+ return true
+ case OpFusedMultiplyAddSub132Float64x8:
+ v.Op = OpAMD64VFMADDSUB132PD512
+ return true
+ case OpFusedMultiplyAddSub213Float32x16:
+ v.Op = OpAMD64VFMADDSUB213PS512
+ return true
+ case OpFusedMultiplyAddSub213Float32x4:
+ v.Op = OpAMD64VFMADDSUB213PS128
+ return true
+ case OpFusedMultiplyAddSub213Float32x8:
+ v.Op = OpAMD64VFMADDSUB213PS256
+ return true
+ case OpFusedMultiplyAddSub213Float64x2:
+ v.Op = OpAMD64VFMADDSUB213PD128
+ return true
+ case OpFusedMultiplyAddSub213Float64x4:
+ v.Op = OpAMD64VFMADDSUB213PD256
+ return true
+ case OpFusedMultiplyAddSub213Float64x8:
+ v.Op = OpAMD64VFMADDSUB213PD512
+ return true
+ case OpFusedMultiplyAddSub231Float32x16:
+ v.Op = OpAMD64VFMADDSUB231PS512
+ return true
+ case OpFusedMultiplyAddSub231Float32x4:
+ v.Op = OpAMD64VFMADDSUB231PS128
+ return true
+ case OpFusedMultiplyAddSub231Float32x8:
+ v.Op = OpAMD64VFMADDSUB231PS256
+ return true
+ case OpFusedMultiplyAddSub231Float64x2:
+ v.Op = OpAMD64VFMADDSUB231PD128
+ return true
+ case OpFusedMultiplyAddSub231Float64x4:
+ v.Op = OpAMD64VFMADDSUB231PD256
+ return true
+ case OpFusedMultiplyAddSub231Float64x8:
+ v.Op = OpAMD64VFMADDSUB231PD512
+ return true
+ case OpFusedMultiplySub132Float32x16:
+ v.Op = OpAMD64VFMSUB132PS512
+ return true
+ case OpFusedMultiplySub132Float32x4:
+ v.Op = OpAMD64VFMSUB132PS128
+ return true
+ case OpFusedMultiplySub132Float32x8:
+ v.Op = OpAMD64VFMSUB132PS256
+ return true
+ case OpFusedMultiplySub132Float64x2:
+ v.Op = OpAMD64VFMSUB132PD128
+ return true
+ case OpFusedMultiplySub132Float64x4:
+ v.Op = OpAMD64VFMSUB132PD256
+ return true
+ case OpFusedMultiplySub132Float64x8:
+ v.Op = OpAMD64VFMSUB132PD512
+ return true
+ case OpFusedMultiplySub213Float32x16:
+ v.Op = OpAMD64VFMSUB213PS512
+ return true
+ case OpFusedMultiplySub213Float32x4:
+ v.Op = OpAMD64VFMSUB213PS128
+ return true
+ case OpFusedMultiplySub213Float32x8:
+ v.Op = OpAMD64VFMSUB213PS256
+ return true
+ case OpFusedMultiplySub213Float64x2:
+ v.Op = OpAMD64VFMSUB213PD128
+ return true
+ case OpFusedMultiplySub213Float64x4:
+ v.Op = OpAMD64VFMSUB213PD256
+ return true
+ case OpFusedMultiplySub213Float64x8:
+ v.Op = OpAMD64VFMSUB213PD512
+ return true
+ case OpFusedMultiplySub231Float32x16:
+ v.Op = OpAMD64VFMSUB231PS512
+ return true
+ case OpFusedMultiplySub231Float32x4:
+ v.Op = OpAMD64VFMSUB231PS128
+ return true
+ case OpFusedMultiplySub231Float32x8:
+ v.Op = OpAMD64VFMSUB231PS256
+ return true
+ case OpFusedMultiplySub231Float64x2:
+ v.Op = OpAMD64VFMSUB231PD128
+ return true
+ case OpFusedMultiplySub231Float64x4:
+ v.Op = OpAMD64VFMSUB231PD256
+ return true
+ case OpFusedMultiplySub231Float64x8:
+ v.Op = OpAMD64VFMSUB231PD512
+ return true
+ case OpFusedMultiplySubAdd132Float32x16:
+ v.Op = OpAMD64VFMSUBADD132PS512
+ return true
+ case OpFusedMultiplySubAdd132Float32x4:
+ v.Op = OpAMD64VFMSUBADD132PS128
+ return true
+ case OpFusedMultiplySubAdd132Float32x8:
+ v.Op = OpAMD64VFMSUBADD132PS256
+ return true
+ case OpFusedMultiplySubAdd132Float64x2:
+ v.Op = OpAMD64VFMSUBADD132PD128
+ return true
+ case OpFusedMultiplySubAdd132Float64x4:
+ v.Op = OpAMD64VFMSUBADD132PD256
+ return true
+ case OpFusedMultiplySubAdd132Float64x8:
+ v.Op = OpAMD64VFMSUBADD132PD512
+ return true
+ case OpFusedMultiplySubAdd213Float32x16:
+ v.Op = OpAMD64VFMSUBADD213PS512
+ return true
+ case OpFusedMultiplySubAdd213Float32x4:
+ v.Op = OpAMD64VFMSUBADD213PS128
+ return true
+ case OpFusedMultiplySubAdd213Float32x8:
+ v.Op = OpAMD64VFMSUBADD213PS256
+ return true
+ case OpFusedMultiplySubAdd213Float64x2:
+ v.Op = OpAMD64VFMSUBADD213PD128
+ return true
+ case OpFusedMultiplySubAdd213Float64x4:
+ v.Op = OpAMD64VFMSUBADD213PD256
+ return true
+ case OpFusedMultiplySubAdd213Float64x8:
+ v.Op = OpAMD64VFMSUBADD213PD512
+ return true
+ case OpFusedMultiplySubAdd231Float32x16:
+ v.Op = OpAMD64VFMSUBADD231PS512
+ return true
+ case OpFusedMultiplySubAdd231Float32x4:
+ v.Op = OpAMD64VFMSUBADD231PS128
+ return true
+ case OpFusedMultiplySubAdd231Float32x8:
+ v.Op = OpAMD64VFMSUBADD231PS256
+ return true
+ case OpFusedMultiplySubAdd231Float64x2:
+ v.Op = OpAMD64VFMSUBADD231PD128
+ return true
+ case OpFusedMultiplySubAdd231Float64x4:
+ v.Op = OpAMD64VFMSUBADD231PD256
+ return true
+ case OpFusedMultiplySubAdd231Float64x8:
+ v.Op = OpAMD64VFMSUBADD231PD512
+ return true
+ case OpFusedNegativeMultiplyAdd132Float32x16:
+ v.Op = OpAMD64VFNMADD132PS512
+ return true
+ case OpFusedNegativeMultiplyAdd132Float32x4:
+ v.Op = OpAMD64VFNMADD132PS128
+ return true
+ case OpFusedNegativeMultiplyAdd132Float32x8:
+ v.Op = OpAMD64VFNMADD132PS256
+ return true
+ case OpFusedNegativeMultiplyAdd132Float64x2:
+ v.Op = OpAMD64VFNMADD132PD128
+ return true
+ case OpFusedNegativeMultiplyAdd132Float64x4:
+ v.Op = OpAMD64VFNMADD132PD256
+ return true
+ case OpFusedNegativeMultiplyAdd132Float64x8:
+ v.Op = OpAMD64VFNMADD132PD512
+ return true
+ case OpFusedNegativeMultiplyAdd213Float32x16:
+ v.Op = OpAMD64VFNMADD213PS512
+ return true
+ case OpFusedNegativeMultiplyAdd213Float32x4:
+ v.Op = OpAMD64VFNMADD213PS128
+ return true
+ case OpFusedNegativeMultiplyAdd213Float32x8:
+ v.Op = OpAMD64VFNMADD213PS256
+ return true
+ case OpFusedNegativeMultiplyAdd213Float64x2:
+ v.Op = OpAMD64VFNMADD213PD128
+ return true
+ case OpFusedNegativeMultiplyAdd213Float64x4:
+ v.Op = OpAMD64VFNMADD213PD256
+ return true
+ case OpFusedNegativeMultiplyAdd213Float64x8:
+ v.Op = OpAMD64VFNMADD213PD512
+ return true
+ case OpFusedNegativeMultiplyAdd231Float32x16:
+ v.Op = OpAMD64VFNMADD231PS512
+ return true
+ case OpFusedNegativeMultiplyAdd231Float32x4:
+ v.Op = OpAMD64VFNMADD231PS128
+ return true
+ case OpFusedNegativeMultiplyAdd231Float32x8:
+ v.Op = OpAMD64VFNMADD231PS256
+ return true
+ case OpFusedNegativeMultiplyAdd231Float64x2:
+ v.Op = OpAMD64VFNMADD231PD128
+ return true
+ case OpFusedNegativeMultiplyAdd231Float64x4:
+ v.Op = OpAMD64VFNMADD231PD256
+ return true
+ case OpFusedNegativeMultiplyAdd231Float64x8:
+ v.Op = OpAMD64VFNMADD231PD512
+ return true
+ case OpFusedNegativeMultiplySub132Float32x16:
+ v.Op = OpAMD64VFNMSUB132PS512
+ return true
+ case OpFusedNegativeMultiplySub132Float32x4:
+ v.Op = OpAMD64VFNMSUB132PS128
+ return true
+ case OpFusedNegativeMultiplySub132Float32x8:
+ v.Op = OpAMD64VFNMSUB132PS256
+ return true
+ case OpFusedNegativeMultiplySub132Float64x2:
+ v.Op = OpAMD64VFNMSUB132PD128
+ return true
+ case OpFusedNegativeMultiplySub132Float64x4:
+ v.Op = OpAMD64VFNMSUB132PD256
+ return true
+ case OpFusedNegativeMultiplySub132Float64x8:
+ v.Op = OpAMD64VFNMSUB132PD512
+ return true
+ case OpFusedNegativeMultiplySub213Float32x16:
+ v.Op = OpAMD64VFNMSUB213PS512
+ return true
+ case OpFusedNegativeMultiplySub213Float32x4:
+ v.Op = OpAMD64VFNMSUB213PS128
+ return true
+ case OpFusedNegativeMultiplySub213Float32x8:
+ v.Op = OpAMD64VFNMSUB213PS256
+ return true
+ case OpFusedNegativeMultiplySub213Float64x2:
+ v.Op = OpAMD64VFNMSUB213PD128
+ return true
+ case OpFusedNegativeMultiplySub213Float64x4:
+ v.Op = OpAMD64VFNMSUB213PD256
+ return true
+ case OpFusedNegativeMultiplySub213Float64x8:
+ v.Op = OpAMD64VFNMSUB213PD512
+ return true
+ case OpFusedNegativeMultiplySub231Float32x16:
+ v.Op = OpAMD64VFNMSUB231PS512
+ return true
+ case OpFusedNegativeMultiplySub231Float32x4:
+ v.Op = OpAMD64VFNMSUB231PS128
+ return true
+ case OpFusedNegativeMultiplySub231Float32x8:
+ v.Op = OpAMD64VFNMSUB231PS256
+ return true
+ case OpFusedNegativeMultiplySub231Float64x2:
+ v.Op = OpAMD64VFNMSUB231PD128
+ return true
+ case OpFusedNegativeMultiplySub231Float64x4:
+ v.Op = OpAMD64VFNMSUB231PD256
+ return true
+ case OpFusedNegativeMultiplySub231Float64x8:
+ v.Op = OpAMD64VFNMSUB231PD512
+ return true
case OpGetCallerPC:
v.Op = OpAMD64LoweredGetCallerPC
return true
return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v)
case OpMaskedFloorWithPrecisionFloat64x8:
return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v)
+ case OpMaskedFusedMultiplyAdd132Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v)
+ case OpMaskedFusedMultiplyAdd132Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v)
+ case OpMaskedFusedMultiplyAdd132Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v)
+ case OpMaskedFusedMultiplyAdd132Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v)
+ case OpMaskedFusedMultiplyAdd132Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v)
+ case OpMaskedFusedMultiplyAdd132Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v)
+ case OpMaskedFusedMultiplyAdd213Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v)
+ case OpMaskedFusedMultiplyAdd213Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v)
+ case OpMaskedFusedMultiplyAdd213Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v)
+ case OpMaskedFusedMultiplyAdd213Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v)
+ case OpMaskedFusedMultiplyAdd213Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v)
+ case OpMaskedFusedMultiplyAdd213Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v)
+ case OpMaskedFusedMultiplyAdd231Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v)
+ case OpMaskedFusedMultiplyAdd231Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v)
+ case OpMaskedFusedMultiplyAdd231Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v)
+ case OpMaskedFusedMultiplyAdd231Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v)
+ case OpMaskedFusedMultiplyAdd231Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v)
+ case OpMaskedFusedMultiplyAdd231Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v)
+ case OpMaskedFusedMultiplyAddSub132Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v)
+ case OpMaskedFusedMultiplyAddSub132Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v)
+ case OpMaskedFusedMultiplyAddSub132Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v)
+ case OpMaskedFusedMultiplyAddSub132Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v)
+ case OpMaskedFusedMultiplyAddSub132Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v)
+ case OpMaskedFusedMultiplyAddSub132Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v)
+ case OpMaskedFusedMultiplyAddSub213Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v)
+ case OpMaskedFusedMultiplyAddSub213Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v)
+ case OpMaskedFusedMultiplyAddSub213Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v)
+ case OpMaskedFusedMultiplyAddSub213Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v)
+ case OpMaskedFusedMultiplyAddSub213Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v)
+ case OpMaskedFusedMultiplyAddSub213Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v)
+ case OpMaskedFusedMultiplyAddSub231Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v)
+ case OpMaskedFusedMultiplyAddSub231Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v)
+ case OpMaskedFusedMultiplyAddSub231Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v)
+ case OpMaskedFusedMultiplyAddSub231Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v)
+ case OpMaskedFusedMultiplyAddSub231Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v)
+ case OpMaskedFusedMultiplyAddSub231Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v)
+ case OpMaskedFusedMultiplySub132Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v)
+ case OpMaskedFusedMultiplySub132Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v)
+ case OpMaskedFusedMultiplySub132Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v)
+ case OpMaskedFusedMultiplySub132Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v)
+ case OpMaskedFusedMultiplySub132Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v)
+ case OpMaskedFusedMultiplySub132Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v)
+ case OpMaskedFusedMultiplySub213Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v)
+ case OpMaskedFusedMultiplySub213Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v)
+ case OpMaskedFusedMultiplySub213Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v)
+ case OpMaskedFusedMultiplySub213Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v)
+ case OpMaskedFusedMultiplySub213Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v)
+ case OpMaskedFusedMultiplySub213Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v)
+ case OpMaskedFusedMultiplySub231Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v)
+ case OpMaskedFusedMultiplySub231Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v)
+ case OpMaskedFusedMultiplySub231Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v)
+ case OpMaskedFusedMultiplySub231Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v)
+ case OpMaskedFusedMultiplySub231Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v)
+ case OpMaskedFusedMultiplySub231Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v)
+ case OpMaskedFusedMultiplySubAdd132Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v)
+ case OpMaskedFusedMultiplySubAdd132Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v)
+ case OpMaskedFusedMultiplySubAdd132Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v)
+ case OpMaskedFusedMultiplySubAdd132Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v)
+ case OpMaskedFusedMultiplySubAdd132Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v)
+ case OpMaskedFusedMultiplySubAdd132Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v)
+ case OpMaskedFusedMultiplySubAdd213Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v)
+ case OpMaskedFusedMultiplySubAdd213Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v)
+ case OpMaskedFusedMultiplySubAdd213Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v)
+ case OpMaskedFusedMultiplySubAdd213Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v)
+ case OpMaskedFusedMultiplySubAdd213Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v)
+ case OpMaskedFusedMultiplySubAdd213Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v)
+ case OpMaskedFusedMultiplySubAdd231Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v)
+ case OpMaskedFusedMultiplySubAdd231Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v)
+ case OpMaskedFusedMultiplySubAdd231Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v)
+ case OpMaskedFusedMultiplySubAdd231Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v)
+ case OpMaskedFusedMultiplySubAdd231Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v)
+ case OpMaskedFusedMultiplySubAdd231Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v)
+ case OpMaskedFusedNegativeMultiplyAdd132Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v)
+ case OpMaskedFusedNegativeMultiplyAdd132Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v)
+ case OpMaskedFusedNegativeMultiplyAdd132Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v)
+ case OpMaskedFusedNegativeMultiplyAdd132Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v)
+ case OpMaskedFusedNegativeMultiplyAdd132Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v)
+ case OpMaskedFusedNegativeMultiplyAdd132Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v)
+ case OpMaskedFusedNegativeMultiplyAdd213Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v)
+ case OpMaskedFusedNegativeMultiplyAdd213Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v)
+ case OpMaskedFusedNegativeMultiplyAdd213Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v)
+ case OpMaskedFusedNegativeMultiplyAdd213Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v)
+ case OpMaskedFusedNegativeMultiplyAdd213Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v)
+ case OpMaskedFusedNegativeMultiplyAdd213Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v)
+ case OpMaskedFusedNegativeMultiplyAdd231Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v)
+ case OpMaskedFusedNegativeMultiplyAdd231Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v)
+ case OpMaskedFusedNegativeMultiplyAdd231Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v)
+ case OpMaskedFusedNegativeMultiplyAdd231Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v)
+ case OpMaskedFusedNegativeMultiplyAdd231Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v)
+ case OpMaskedFusedNegativeMultiplyAdd231Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v)
+ case OpMaskedFusedNegativeMultiplySub132Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v)
+ case OpMaskedFusedNegativeMultiplySub132Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v)
+ case OpMaskedFusedNegativeMultiplySub132Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v)
+ case OpMaskedFusedNegativeMultiplySub132Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v)
+ case OpMaskedFusedNegativeMultiplySub132Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v)
+ case OpMaskedFusedNegativeMultiplySub132Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v)
+ case OpMaskedFusedNegativeMultiplySub213Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v)
+ case OpMaskedFusedNegativeMultiplySub213Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v)
+ case OpMaskedFusedNegativeMultiplySub213Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v)
+ case OpMaskedFusedNegativeMultiplySub213Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v)
+ case OpMaskedFusedNegativeMultiplySub213Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v)
+ case OpMaskedFusedNegativeMultiplySub213Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v)
+ case OpMaskedFusedNegativeMultiplySub231Float32x16:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v)
+ case OpMaskedFusedNegativeMultiplySub231Float32x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v)
+ case OpMaskedFusedNegativeMultiplySub231Float32x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v)
+ case OpMaskedFusedNegativeMultiplySub231Float64x2:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v)
+ case OpMaskedFusedNegativeMultiplySub231Float64x4:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v)
+ case OpMaskedFusedNegativeMultiplySub231Float64x8:
+ return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v)
case OpMaskedGreaterEqualFloat32x16:
return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v)
case OpMaskedGreaterEqualFloat32x4:
return true
}
}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd132Float32x16 x y z mask)
+ // result: (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD132PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd132Float32x4 x y z mask)
+ // result: (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD132PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd132Float32x8 x y z mask)
+ // result: (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD132PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd132Float64x2 x y z mask)
+ // result: (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD132PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd132Float64x4 x y z mask)
+ // result: (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD132PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd132Float64x8 x y z mask)
+ // result: (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD132PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd213Float32x16 x y z mask)
+ // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD213PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd213Float32x4 x y z mask)
+ // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD213PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd213Float32x8 x y z mask)
+ // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD213PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd213Float64x2 x y z mask)
+ // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD213PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd213Float64x4 x y z mask)
+ // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD213PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd213Float64x8 x y z mask)
+ // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD213PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd231Float32x16 x y z mask)
+ // result: (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD231PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd231Float32x4 x y z mask)
+ // result: (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD231PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd231Float32x8 x y z mask)
+ // result: (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD231PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd231Float64x2 x y z mask)
+ // result: (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD231PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd231Float64x4 x y z mask)
+ // result: (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD231PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAdd231Float64x8 x y z mask)
+ // result: (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADD231PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub132Float32x16 x y z mask)
+ // result: (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB132PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub132Float32x4 x y z mask)
+ // result: (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB132PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub132Float32x8 x y z mask)
+ // result: (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB132PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub132Float64x2 x y z mask)
+ // result: (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB132PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub132Float64x4 x y z mask)
+ // result: (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB132PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub132Float64x8 x y z mask)
+ // result: (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB132PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub213Float32x16 x y z mask)
+ // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB213PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub213Float32x4 x y z mask)
+ // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB213PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub213Float32x8 x y z mask)
+ // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB213PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub213Float64x2 x y z mask)
+ // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB213PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub213Float64x4 x y z mask)
+ // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB213PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub213Float64x8 x y z mask)
+ // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB213PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub231Float32x16 x y z mask)
+ // result: (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB231PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub231Float32x4 x y z mask)
+ // result: (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB231PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub231Float32x8 x y z mask)
+ // result: (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB231PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub231Float64x2 x y z mask)
+ // result: (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB231PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub231Float64x4 x y z mask)
+ // result: (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB231PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplyAddSub231Float64x8 x y z mask)
+ // result: (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMADDSUB231PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub132Float32x16 x y z mask)
+ // result: (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB132PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub132Float32x4 x y z mask)
+ // result: (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB132PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub132Float32x8 x y z mask)
+ // result: (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB132PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub132Float64x2 x y z mask)
+ // result: (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB132PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub132Float64x4 x y z mask)
+ // result: (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB132PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub132Float64x8 x y z mask)
+ // result: (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB132PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub213Float32x16 x y z mask)
+ // result: (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB213PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub213Float32x4 x y z mask)
+ // result: (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB213PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub213Float32x8 x y z mask)
+ // result: (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB213PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub213Float64x2 x y z mask)
+ // result: (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB213PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub213Float64x4 x y z mask)
+ // result: (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB213PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub213Float64x8 x y z mask)
+ // result: (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB213PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub231Float32x16 x y z mask)
+ // result: (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB231PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub231Float32x4 x y z mask)
+ // result: (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB231PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub231Float32x8 x y z mask)
+ // result: (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB231PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub231Float64x2 x y z mask)
+ // result: (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB231PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub231Float64x4 x y z mask)
+ // result: (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB231PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySub231Float64x8 x y z mask)
+ // result: (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUB231PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd132Float32x16 x y z mask)
+ // result: (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD132PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd132Float32x4 x y z mask)
+ // result: (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD132PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd132Float32x8 x y z mask)
+ // result: (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD132PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd132Float64x2 x y z mask)
+ // result: (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD132PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd132Float64x4 x y z mask)
+ // result: (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD132PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd132Float64x8 x y z mask)
+ // result: (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD132PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd213Float32x16 x y z mask)
+ // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD213PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd213Float32x4 x y z mask)
+ // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD213PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd213Float32x8 x y z mask)
+ // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD213PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd213Float64x2 x y z mask)
+ // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD213PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd213Float64x4 x y z mask)
+ // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD213PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd213Float64x8 x y z mask)
+ // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD213PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd231Float32x16 x y z mask)
+ // result: (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD231PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd231Float32x4 x y z mask)
+ // result: (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD231PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd231Float32x8 x y z mask)
+ // result: (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD231PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd231Float64x2 x y z mask)
+ // result: (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD231PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd231Float64x4 x y z mask)
+ // result: (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD231PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedMultiplySubAdd231Float64x8 x y z mask)
+ // result: (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFMSUBADD231PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask)
+ // result: (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD132PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask)
+ // result: (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD132PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask)
+ // result: (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD132PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask)
+ // result: (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD132PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask)
+ // result: (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD132PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask)
+ // result: (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD132PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask)
+ // result: (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD213PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask)
+ // result: (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD213PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask)
+ // result: (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD213PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask)
+ // result: (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD213PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask)
+ // result: (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD213PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask)
+ // result: (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD213PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask)
+ // result: (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD231PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask)
+ // result: (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD231PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask)
+ // result: (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD231PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask)
+ // result: (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD231PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask)
+ // result: (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD231PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask)
+ // result: (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMADD231PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub132Float32x16 x y z mask)
+ // result: (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB132PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub132Float32x4 x y z mask)
+ // result: (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB132PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub132Float32x8 x y z mask)
+ // result: (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB132PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub132Float64x2 x y z mask)
+ // result: (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB132PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub132Float64x4 x y z mask)
+ // result: (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB132PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub132Float64x8 x y z mask)
+ // result: (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB132PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub213Float32x16 x y z mask)
+ // result: (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB213PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub213Float32x4 x y z mask)
+ // result: (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB213PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub213Float32x8 x y z mask)
+ // result: (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB213PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub213Float64x2 x y z mask)
+ // result: (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB213PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub213Float64x4 x y z mask)
+ // result: (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB213PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub213Float64x8 x y z mask)
+ // result: (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB213PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub231Float32x16 x y z mask)
+ // result: (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB231PSMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub231Float32x4 x y z mask)
+ // result: (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB231PSMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub231Float32x8 x y z mask)
+ // result: (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB231PSMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub231Float64x2 x y z mask)
+ // result: (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB231PDMasked128)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub231Float64x4 x y z mask)
+ // result: (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB231PDMasked256)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MaskedFusedNegativeMultiplySub231Float64x8 x y z mask)
+ // result: (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ mask := v_3
+ v.reset(OpAMD64VFNMSUB231PDMasked512)
+ v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
+ v0.AddArg(mask)
+ v.AddArg4(x, y, z, v0)
+ return true
+ }
+}
func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
addF(simdPackage, "Uint8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x64, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Uint8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x64, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Uint8x64.Sub", opLen2(ssa.OpSubUint8x64, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x2.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x2.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float64x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float64x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float64x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int32x16.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int32x16.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64)
// Asm: VPSUBB, CPU Feature: AVX512EVEX
func (x Uint8x64) Sub(y Uint8x64) Uint8x64
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplyAdd132(y Float32x16, z Float32x16) Float32x16
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplyAdd213(y Float32x16, z Float32x16) Float32x16
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplyAdd231(y Float32x16, z Float32x16) Float32x16
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplyAddSub132(y Float32x16, z Float32x16) Float32x16
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplyAddSub213(y Float32x16, z Float32x16) Float32x16
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplyAddSub231(y Float32x16, z Float32x16) Float32x16
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplySub132(y Float32x16, z Float32x16) Float32x16
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplySub213(y Float32x16, z Float32x16) Float32x16
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplySub231(y Float32x16, z Float32x16) Float32x16
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplySubAdd132(y Float32x16, z Float32x16) Float32x16
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplySubAdd213(y Float32x16, z Float32x16) Float32x16
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedMultiplySubAdd231(y Float32x16, z Float32x16) Float32x16
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedNegativeMultiplyAdd132(y Float32x16, z Float32x16) Float32x16
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedNegativeMultiplyAdd213(y Float32x16, z Float32x16) Float32x16
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedNegativeMultiplyAdd231(y Float32x16, z Float32x16) Float32x16
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedNegativeMultiplySub132(y Float32x16, z Float32x16) Float32x16
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedNegativeMultiplySub213(y Float32x16, z Float32x16) Float32x16
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) FusedNegativeMultiplySub231(y Float32x16, z Float32x16) Float32x16
+
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPS, CPU Feature: AVX512EVEX
// Asm: VXORPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplyAdd132(y Float32x4, z Float32x4) Float32x4
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplyAdd213(y Float32x4, z Float32x4) Float32x4
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplyAdd231(y Float32x4, z Float32x4) Float32x4
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplyAddSub132(y Float32x4, z Float32x4) Float32x4
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplyAddSub213(y Float32x4, z Float32x4) Float32x4
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplyAddSub231(y Float32x4, z Float32x4) Float32x4
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplySub132(y Float32x4, z Float32x4) Float32x4
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplySub213(y Float32x4, z Float32x4) Float32x4
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplySub231(y Float32x4, z Float32x4) Float32x4
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplySubAdd132(y Float32x4, z Float32x4) Float32x4
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplySubAdd213(y Float32x4, z Float32x4) Float32x4
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedMultiplySubAdd231(y Float32x4, z Float32x4) Float32x4
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedNegativeMultiplyAdd132(y Float32x4, z Float32x4) Float32x4
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedNegativeMultiplyAdd213(y Float32x4, z Float32x4) Float32x4
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedNegativeMultiplyAdd231(y Float32x4, z Float32x4) Float32x4
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedNegativeMultiplySub132(y Float32x4, z Float32x4) Float32x4
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedNegativeMultiplySub213(y Float32x4, z Float32x4) Float32x4
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) FusedNegativeMultiplySub231(y Float32x4, z Float32x4) Float32x4
+
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPS, CPU Feature: AVX512EVEX
// Asm: VXORPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplyAdd132(y Float32x8, z Float32x8) Float32x8
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplyAdd213(y Float32x8, z Float32x8) Float32x8
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplyAdd231(y Float32x8, z Float32x8) Float32x8
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplyAddSub132(y Float32x8, z Float32x8) Float32x8
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplyAddSub213(y Float32x8, z Float32x8) Float32x8
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplyAddSub231(y Float32x8, z Float32x8) Float32x8
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplySub132(y Float32x8, z Float32x8) Float32x8
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplySub213(y Float32x8, z Float32x8) Float32x8
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplySub231(y Float32x8, z Float32x8) Float32x8
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplySubAdd132(y Float32x8, z Float32x8) Float32x8
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplySubAdd213(y Float32x8, z Float32x8) Float32x8
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedMultiplySubAdd231(y Float32x8, z Float32x8) Float32x8
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedNegativeMultiplyAdd132(y Float32x8, z Float32x8) Float32x8
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedNegativeMultiplyAdd213(y Float32x8, z Float32x8) Float32x8
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedNegativeMultiplyAdd231(y Float32x8, z Float32x8) Float32x8
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedNegativeMultiplySub132(y Float32x8, z Float32x8) Float32x8
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedNegativeMultiplySub213(y Float32x8, z Float32x8) Float32x8
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) FusedNegativeMultiplySub231(y Float32x8, z Float32x8) Float32x8
+
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPS, CPU Feature: AVX512EVEX
// Asm: VXORPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplyAdd132(y Float64x2, z Float64x2) Float64x2
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplyAdd213(y Float64x2, z Float64x2) Float64x2
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplyAdd231(y Float64x2, z Float64x2) Float64x2
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplyAddSub132(y Float64x2, z Float64x2) Float64x2
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplyAddSub213(y Float64x2, z Float64x2) Float64x2
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplyAddSub231(y Float64x2, z Float64x2) Float64x2
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplySub132(y Float64x2, z Float64x2) Float64x2
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplySub213(y Float64x2, z Float64x2) Float64x2
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplySub231(y Float64x2, z Float64x2) Float64x2
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplySubAdd132(y Float64x2, z Float64x2) Float64x2
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplySubAdd213(y Float64x2, z Float64x2) Float64x2
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedMultiplySubAdd231(y Float64x2, z Float64x2) Float64x2
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedNegativeMultiplyAdd132(y Float64x2, z Float64x2) Float64x2
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedNegativeMultiplyAdd213(y Float64x2, z Float64x2) Float64x2
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedNegativeMultiplyAdd231(y Float64x2, z Float64x2) Float64x2
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedNegativeMultiplySub132(y Float64x2, z Float64x2) Float64x2
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedNegativeMultiplySub213(y Float64x2, z Float64x2) Float64x2
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) FusedNegativeMultiplySub231(y Float64x2, z Float64x2) Float64x2
+
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPD, CPU Feature: AVX512EVEX
// Asm: VXORPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplyAdd132(y Float64x4, z Float64x4) Float64x4
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplyAdd213(y Float64x4, z Float64x4) Float64x4
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplyAdd231(y Float64x4, z Float64x4) Float64x4
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplyAddSub132(y Float64x4, z Float64x4) Float64x4
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplyAddSub213(y Float64x4, z Float64x4) Float64x4
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplyAddSub231(y Float64x4, z Float64x4) Float64x4
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplySub132(y Float64x4, z Float64x4) Float64x4
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplySub213(y Float64x4, z Float64x4) Float64x4
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplySub231(y Float64x4, z Float64x4) Float64x4
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplySubAdd132(y Float64x4, z Float64x4) Float64x4
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplySubAdd213(y Float64x4, z Float64x4) Float64x4
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedMultiplySubAdd231(y Float64x4, z Float64x4) Float64x4
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedNegativeMultiplyAdd132(y Float64x4, z Float64x4) Float64x4
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedNegativeMultiplyAdd213(y Float64x4, z Float64x4) Float64x4
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedNegativeMultiplyAdd231(y Float64x4, z Float64x4) Float64x4
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedNegativeMultiplySub132(y Float64x4, z Float64x4) Float64x4
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedNegativeMultiplySub213(y Float64x4, z Float64x4) Float64x4
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) FusedNegativeMultiplySub231(y Float64x4, z Float64x4) Float64x4
+
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPD, CPU Feature: AVX512EVEX
// Asm: VXORPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplyAdd132(y Float64x8, z Float64x8) Float64x8
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplyAdd213(y Float64x8, z Float64x8) Float64x8
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplyAdd231(y Float64x8, z Float64x8) Float64x8
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplyAddSub132(y Float64x8, z Float64x8) Float64x8
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplyAddSub213(y Float64x8, z Float64x8) Float64x8
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplyAddSub231(y Float64x8, z Float64x8) Float64x8
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplySub132(y Float64x8, z Float64x8) Float64x8
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplySub213(y Float64x8, z Float64x8) Float64x8
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplySub231(y Float64x8, z Float64x8) Float64x8
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplySubAdd132(y Float64x8, z Float64x8) Float64x8
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplySubAdd213(y Float64x8, z Float64x8) Float64x8
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedMultiplySubAdd231(y Float64x8, z Float64x8) Float64x8
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedNegativeMultiplyAdd132(y Float64x8, z Float64x8) Float64x8
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedNegativeMultiplyAdd213(y Float64x8, z Float64x8) Float64x8
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedNegativeMultiplyAdd231(y Float64x8, z Float64x8) Float64x8
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedNegativeMultiplySub132(y Float64x8, z Float64x8) Float64x8
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedNegativeMultiplySub213(y Float64x8, z Float64x8) Float64x8
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) FusedNegativeMultiplySub231(y Float64x8, z Float64x8) Float64x8
+
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPD, CPU Feature: AVX512EVEX
// Asm: VPSUBB, CPU Feature: AVX512EVEX
func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplyAddSub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplyAddSub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplyAddSub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplySubAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplySubAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedMultiplySubAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedNegativeMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedNegativeMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedNegativeMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedNegativeMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedNegativeMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x16) MaskedFusedNegativeMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplyAddSub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplyAddSub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplyAddSub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplySubAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplySubAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedMultiplySubAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedNegativeMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedNegativeMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedNegativeMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedNegativeMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedNegativeMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x4) MaskedFusedNegativeMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplyAddSub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplyAddSub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplyAddSub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplySubAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplySubAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedMultiplySubAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedNegativeMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedNegativeMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedNegativeMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedNegativeMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedNegativeMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
+func (x Float32x8) MaskedFusedNegativeMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplyAddSub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplyAddSub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplyAddSub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplySubAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplySubAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedMultiplySubAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedNegativeMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedNegativeMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedNegativeMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedNegativeMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedNegativeMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x2) MaskedFusedNegativeMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplyAddSub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplyAddSub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplyAddSub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplySubAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplySubAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedMultiplySubAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedNegativeMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedNegativeMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedNegativeMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedNegativeMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedNegativeMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x4) MaskedFusedNegativeMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+
+// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
+//
+// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+//
+// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
+//
+// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
+//
+// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplyAddSub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+//
+// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplyAddSub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
+//
+// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplyAddSub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedMultiplySub132 performs `(v1 * v3) - v2`.
+//
+// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedMultiplySub213 performs `(v2 * v1) - v3`.
+//
+// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedMultiplySub231 performs `(v2 * v3) - v1`.
+//
+// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
+//
+// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplySubAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+//
+// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplySubAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
+//
+// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedMultiplySubAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
+//
+// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedNegativeMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
+//
+// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedNegativeMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
+//
+// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedNegativeMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
+//
+// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedNegativeMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
+//
+// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedNegativeMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
+// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
+//
+// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
+func (x Float64x8) MaskedFusedNegativeMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+
// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSD, CPU Feature: AVX512EVEX