ssa.OpAMD64VPCMPUQMasked512:
p = simdFp2k1k1Imm8(s, v)
- case ssa.OpAMD64VFMADD132PS128,
- ssa.OpAMD64VFMADD132PS256,
- ssa.OpAMD64VFMADD132PS512,
- ssa.OpAMD64VFMADD132PD128,
- ssa.OpAMD64VFMADD132PD256,
- ssa.OpAMD64VFMADD132PD512,
- ssa.OpAMD64VFMADD213PS128,
+ case ssa.OpAMD64VFMADD213PS128,
ssa.OpAMD64VFMADD213PS256,
ssa.OpAMD64VFMADD213PS512,
ssa.OpAMD64VFMADD213PD128,
ssa.OpAMD64VFMADD213PD256,
ssa.OpAMD64VFMADD213PD512,
- ssa.OpAMD64VFMADD231PS128,
- ssa.OpAMD64VFMADD231PS256,
- ssa.OpAMD64VFMADD231PS512,
- ssa.OpAMD64VFMADD231PD128,
- ssa.OpAMD64VFMADD231PD256,
- ssa.OpAMD64VFMADD231PD512,
- ssa.OpAMD64VFMADDSUB132PS128,
- ssa.OpAMD64VFMADDSUB132PS256,
- ssa.OpAMD64VFMADDSUB132PS512,
- ssa.OpAMD64VFMADDSUB132PD128,
- ssa.OpAMD64VFMADDSUB132PD256,
- ssa.OpAMD64VFMADDSUB132PD512,
ssa.OpAMD64VFMADDSUB213PS128,
ssa.OpAMD64VFMADDSUB213PS256,
ssa.OpAMD64VFMADDSUB213PS512,
ssa.OpAMD64VFMADDSUB213PD128,
ssa.OpAMD64VFMADDSUB213PD256,
ssa.OpAMD64VFMADDSUB213PD512,
- ssa.OpAMD64VFMADDSUB231PS128,
- ssa.OpAMD64VFMADDSUB231PS256,
- ssa.OpAMD64VFMADDSUB231PS512,
- ssa.OpAMD64VFMADDSUB231PD128,
- ssa.OpAMD64VFMADDSUB231PD256,
- ssa.OpAMD64VFMADDSUB231PD512,
- ssa.OpAMD64VFMSUB132PS128,
- ssa.OpAMD64VFMSUB132PS256,
- ssa.OpAMD64VFMSUB132PS512,
- ssa.OpAMD64VFMSUB132PD128,
- ssa.OpAMD64VFMSUB132PD256,
- ssa.OpAMD64VFMSUB132PD512,
- ssa.OpAMD64VFMSUB213PS128,
- ssa.OpAMD64VFMSUB213PS256,
- ssa.OpAMD64VFMSUB213PS512,
- ssa.OpAMD64VFMSUB213PD128,
- ssa.OpAMD64VFMSUB213PD256,
- ssa.OpAMD64VFMSUB213PD512,
- ssa.OpAMD64VFMSUB231PS128,
- ssa.OpAMD64VFMSUB231PS256,
- ssa.OpAMD64VFMSUB231PS512,
- ssa.OpAMD64VFMSUB231PD128,
- ssa.OpAMD64VFMSUB231PD256,
- ssa.OpAMD64VFMSUB231PD512,
- ssa.OpAMD64VFMSUBADD132PS128,
- ssa.OpAMD64VFMSUBADD132PS256,
- ssa.OpAMD64VFMSUBADD132PS512,
- ssa.OpAMD64VFMSUBADD132PD128,
- ssa.OpAMD64VFMSUBADD132PD256,
- ssa.OpAMD64VFMSUBADD132PD512,
ssa.OpAMD64VFMSUBADD213PS128,
ssa.OpAMD64VFMSUBADD213PS256,
ssa.OpAMD64VFMSUBADD213PS512,
ssa.OpAMD64VFMSUBADD213PD128,
ssa.OpAMD64VFMSUBADD213PD256,
ssa.OpAMD64VFMSUBADD213PD512,
- ssa.OpAMD64VFMSUBADD231PS128,
- ssa.OpAMD64VFMSUBADD231PS256,
- ssa.OpAMD64VFMSUBADD231PS512,
- ssa.OpAMD64VFMSUBADD231PD128,
- ssa.OpAMD64VFMSUBADD231PD256,
- ssa.OpAMD64VFMSUBADD231PD512,
- ssa.OpAMD64VFNMADD132PS128,
- ssa.OpAMD64VFNMADD132PS256,
- ssa.OpAMD64VFNMADD132PS512,
- ssa.OpAMD64VFNMADD132PD128,
- ssa.OpAMD64VFNMADD132PD256,
- ssa.OpAMD64VFNMADD132PD512,
- ssa.OpAMD64VFNMADD213PS128,
- ssa.OpAMD64VFNMADD213PS256,
- ssa.OpAMD64VFNMADD213PS512,
- ssa.OpAMD64VFNMADD213PD128,
- ssa.OpAMD64VFNMADD213PD256,
- ssa.OpAMD64VFNMADD213PD512,
- ssa.OpAMD64VFNMADD231PS128,
- ssa.OpAMD64VFNMADD231PS256,
- ssa.OpAMD64VFNMADD231PS512,
- ssa.OpAMD64VFNMADD231PD128,
- ssa.OpAMD64VFNMADD231PD256,
- ssa.OpAMD64VFNMADD231PD512,
- ssa.OpAMD64VFNMSUB132PS128,
- ssa.OpAMD64VFNMSUB132PS256,
- ssa.OpAMD64VFNMSUB132PS512,
- ssa.OpAMD64VFNMSUB132PD128,
- ssa.OpAMD64VFNMSUB132PD256,
- ssa.OpAMD64VFNMSUB132PD512,
- ssa.OpAMD64VFNMSUB213PS128,
- ssa.OpAMD64VFNMSUB213PS256,
- ssa.OpAMD64VFNMSUB213PS512,
- ssa.OpAMD64VFNMSUB213PD128,
- ssa.OpAMD64VFNMSUB213PD256,
- ssa.OpAMD64VFNMSUB213PD512,
- ssa.OpAMD64VFNMSUB231PS128,
- ssa.OpAMD64VFNMSUB231PS256,
- ssa.OpAMD64VFNMSUB231PS512,
- ssa.OpAMD64VFNMSUB231PD128,
- ssa.OpAMD64VFNMSUB231PD256,
- ssa.OpAMD64VFNMSUB231PD512,
ssa.OpAMD64VPDPWSSD128,
ssa.OpAMD64VPDPWSSD256,
ssa.OpAMD64VPDPWSSD512,
ssa.OpAMD64VPDPBUSD512:
p = simdFp31ResultInArg0(s, v)
- case ssa.OpAMD64VFMADD132PSMasked128,
- ssa.OpAMD64VFMADD132PSMasked256,
- ssa.OpAMD64VFMADD132PSMasked512,
- ssa.OpAMD64VFMADD132PDMasked128,
- ssa.OpAMD64VFMADD132PDMasked256,
- ssa.OpAMD64VFMADD132PDMasked512,
- ssa.OpAMD64VFMADD213PSMasked128,
+ case ssa.OpAMD64VFMADD213PSMasked128,
ssa.OpAMD64VFMADD213PSMasked256,
ssa.OpAMD64VFMADD213PSMasked512,
ssa.OpAMD64VFMADD213PDMasked128,
ssa.OpAMD64VFMADD213PDMasked256,
ssa.OpAMD64VFMADD213PDMasked512,
- ssa.OpAMD64VFMADD231PSMasked128,
- ssa.OpAMD64VFMADD231PSMasked256,
- ssa.OpAMD64VFMADD231PSMasked512,
- ssa.OpAMD64VFMADD231PDMasked128,
- ssa.OpAMD64VFMADD231PDMasked256,
- ssa.OpAMD64VFMADD231PDMasked512,
- ssa.OpAMD64VFMADDSUB132PSMasked128,
- ssa.OpAMD64VFMADDSUB132PSMasked256,
- ssa.OpAMD64VFMADDSUB132PSMasked512,
- ssa.OpAMD64VFMADDSUB132PDMasked128,
- ssa.OpAMD64VFMADDSUB132PDMasked256,
- ssa.OpAMD64VFMADDSUB132PDMasked512,
ssa.OpAMD64VFMADDSUB213PSMasked128,
ssa.OpAMD64VFMADDSUB213PSMasked256,
ssa.OpAMD64VFMADDSUB213PSMasked512,
ssa.OpAMD64VFMADDSUB213PDMasked128,
ssa.OpAMD64VFMADDSUB213PDMasked256,
ssa.OpAMD64VFMADDSUB213PDMasked512,
- ssa.OpAMD64VFMADDSUB231PSMasked128,
- ssa.OpAMD64VFMADDSUB231PSMasked256,
- ssa.OpAMD64VFMADDSUB231PSMasked512,
- ssa.OpAMD64VFMADDSUB231PDMasked128,
- ssa.OpAMD64VFMADDSUB231PDMasked256,
- ssa.OpAMD64VFMADDSUB231PDMasked512,
- ssa.OpAMD64VFMSUB132PSMasked128,
- ssa.OpAMD64VFMSUB132PSMasked256,
- ssa.OpAMD64VFMSUB132PSMasked512,
- ssa.OpAMD64VFMSUB132PDMasked128,
- ssa.OpAMD64VFMSUB132PDMasked256,
- ssa.OpAMD64VFMSUB132PDMasked512,
- ssa.OpAMD64VFMSUB213PSMasked128,
- ssa.OpAMD64VFMSUB213PSMasked256,
- ssa.OpAMD64VFMSUB213PSMasked512,
- ssa.OpAMD64VFMSUB213PDMasked128,
- ssa.OpAMD64VFMSUB213PDMasked256,
- ssa.OpAMD64VFMSUB213PDMasked512,
- ssa.OpAMD64VFMSUB231PSMasked128,
- ssa.OpAMD64VFMSUB231PSMasked256,
- ssa.OpAMD64VFMSUB231PSMasked512,
- ssa.OpAMD64VFMSUB231PDMasked128,
- ssa.OpAMD64VFMSUB231PDMasked256,
- ssa.OpAMD64VFMSUB231PDMasked512,
- ssa.OpAMD64VFMSUBADD132PSMasked128,
- ssa.OpAMD64VFMSUBADD132PSMasked256,
- ssa.OpAMD64VFMSUBADD132PSMasked512,
- ssa.OpAMD64VFMSUBADD132PDMasked128,
- ssa.OpAMD64VFMSUBADD132PDMasked256,
- ssa.OpAMD64VFMSUBADD132PDMasked512,
ssa.OpAMD64VFMSUBADD213PSMasked128,
ssa.OpAMD64VFMSUBADD213PSMasked256,
ssa.OpAMD64VFMSUBADD213PSMasked512,
ssa.OpAMD64VFMSUBADD213PDMasked128,
ssa.OpAMD64VFMSUBADD213PDMasked256,
ssa.OpAMD64VFMSUBADD213PDMasked512,
- ssa.OpAMD64VFMSUBADD231PSMasked128,
- ssa.OpAMD64VFMSUBADD231PSMasked256,
- ssa.OpAMD64VFMSUBADD231PSMasked512,
- ssa.OpAMD64VFMSUBADD231PDMasked128,
- ssa.OpAMD64VFMSUBADD231PDMasked256,
- ssa.OpAMD64VFMSUBADD231PDMasked512,
- ssa.OpAMD64VFNMADD132PSMasked128,
- ssa.OpAMD64VFNMADD132PSMasked256,
- ssa.OpAMD64VFNMADD132PSMasked512,
- ssa.OpAMD64VFNMADD132PDMasked128,
- ssa.OpAMD64VFNMADD132PDMasked256,
- ssa.OpAMD64VFNMADD132PDMasked512,
- ssa.OpAMD64VFNMADD213PSMasked128,
- ssa.OpAMD64VFNMADD213PSMasked256,
- ssa.OpAMD64VFNMADD213PSMasked512,
- ssa.OpAMD64VFNMADD213PDMasked128,
- ssa.OpAMD64VFNMADD213PDMasked256,
- ssa.OpAMD64VFNMADD213PDMasked512,
- ssa.OpAMD64VFNMADD231PSMasked128,
- ssa.OpAMD64VFNMADD231PSMasked256,
- ssa.OpAMD64VFNMADD231PSMasked512,
- ssa.OpAMD64VFNMADD231PDMasked128,
- ssa.OpAMD64VFNMADD231PDMasked256,
- ssa.OpAMD64VFNMADD231PDMasked512,
- ssa.OpAMD64VFNMSUB132PSMasked128,
- ssa.OpAMD64VFNMSUB132PSMasked256,
- ssa.OpAMD64VFNMSUB132PSMasked512,
- ssa.OpAMD64VFNMSUB132PDMasked128,
- ssa.OpAMD64VFNMSUB132PDMasked256,
- ssa.OpAMD64VFNMSUB132PDMasked512,
- ssa.OpAMD64VFNMSUB213PSMasked128,
- ssa.OpAMD64VFNMSUB213PSMasked256,
- ssa.OpAMD64VFNMSUB213PSMasked512,
- ssa.OpAMD64VFNMSUB213PDMasked128,
- ssa.OpAMD64VFNMSUB213PDMasked256,
- ssa.OpAMD64VFNMSUB213PDMasked512,
- ssa.OpAMD64VFNMSUB231PSMasked128,
- ssa.OpAMD64VFNMSUB231PSMasked256,
- ssa.OpAMD64VFNMSUB231PSMasked512,
- ssa.OpAMD64VFNMSUB231PDMasked128,
- ssa.OpAMD64VFNMSUB231PDMasked256,
- ssa.OpAMD64VFNMSUB231PDMasked512,
ssa.OpAMD64VPDPWSSDMasked128,
ssa.OpAMD64VPDPWSSDMasked256,
ssa.OpAMD64VPDPWSSDMasked512,
ssa.OpAMD64VDIVPDMasked128,
ssa.OpAMD64VDIVPDMasked256,
ssa.OpAMD64VDIVPDMasked512,
- ssa.OpAMD64VFMADD132PSMasked128,
- ssa.OpAMD64VFMADD132PSMasked256,
- ssa.OpAMD64VFMADD132PSMasked512,
- ssa.OpAMD64VFMADD132PDMasked128,
- ssa.OpAMD64VFMADD132PDMasked256,
- ssa.OpAMD64VFMADD132PDMasked512,
ssa.OpAMD64VFMADD213PSMasked128,
ssa.OpAMD64VFMADD213PSMasked256,
ssa.OpAMD64VFMADD213PSMasked512,
ssa.OpAMD64VFMADD213PDMasked128,
ssa.OpAMD64VFMADD213PDMasked256,
ssa.OpAMD64VFMADD213PDMasked512,
- ssa.OpAMD64VFMADD231PSMasked128,
- ssa.OpAMD64VFMADD231PSMasked256,
- ssa.OpAMD64VFMADD231PSMasked512,
- ssa.OpAMD64VFMADD231PDMasked128,
- ssa.OpAMD64VFMADD231PDMasked256,
- ssa.OpAMD64VFMADD231PDMasked512,
- ssa.OpAMD64VFMADDSUB132PSMasked128,
- ssa.OpAMD64VFMADDSUB132PSMasked256,
- ssa.OpAMD64VFMADDSUB132PSMasked512,
- ssa.OpAMD64VFMADDSUB132PDMasked128,
- ssa.OpAMD64VFMADDSUB132PDMasked256,
- ssa.OpAMD64VFMADDSUB132PDMasked512,
ssa.OpAMD64VFMADDSUB213PSMasked128,
ssa.OpAMD64VFMADDSUB213PSMasked256,
ssa.OpAMD64VFMADDSUB213PSMasked512,
ssa.OpAMD64VFMADDSUB213PDMasked128,
ssa.OpAMD64VFMADDSUB213PDMasked256,
ssa.OpAMD64VFMADDSUB213PDMasked512,
- ssa.OpAMD64VFMADDSUB231PSMasked128,
- ssa.OpAMD64VFMADDSUB231PSMasked256,
- ssa.OpAMD64VFMADDSUB231PSMasked512,
- ssa.OpAMD64VFMADDSUB231PDMasked128,
- ssa.OpAMD64VFMADDSUB231PDMasked256,
- ssa.OpAMD64VFMADDSUB231PDMasked512,
- ssa.OpAMD64VFMSUB132PSMasked128,
- ssa.OpAMD64VFMSUB132PSMasked256,
- ssa.OpAMD64VFMSUB132PSMasked512,
- ssa.OpAMD64VFMSUB132PDMasked128,
- ssa.OpAMD64VFMSUB132PDMasked256,
- ssa.OpAMD64VFMSUB132PDMasked512,
- ssa.OpAMD64VFMSUB213PSMasked128,
- ssa.OpAMD64VFMSUB213PSMasked256,
- ssa.OpAMD64VFMSUB213PSMasked512,
- ssa.OpAMD64VFMSUB213PDMasked128,
- ssa.OpAMD64VFMSUB213PDMasked256,
- ssa.OpAMD64VFMSUB213PDMasked512,
- ssa.OpAMD64VFMSUB231PSMasked128,
- ssa.OpAMD64VFMSUB231PSMasked256,
- ssa.OpAMD64VFMSUB231PSMasked512,
- ssa.OpAMD64VFMSUB231PDMasked128,
- ssa.OpAMD64VFMSUB231PDMasked256,
- ssa.OpAMD64VFMSUB231PDMasked512,
- ssa.OpAMD64VFMSUBADD132PSMasked128,
- ssa.OpAMD64VFMSUBADD132PSMasked256,
- ssa.OpAMD64VFMSUBADD132PSMasked512,
- ssa.OpAMD64VFMSUBADD132PDMasked128,
- ssa.OpAMD64VFMSUBADD132PDMasked256,
- ssa.OpAMD64VFMSUBADD132PDMasked512,
ssa.OpAMD64VFMSUBADD213PSMasked128,
ssa.OpAMD64VFMSUBADD213PSMasked256,
ssa.OpAMD64VFMSUBADD213PSMasked512,
ssa.OpAMD64VFMSUBADD213PDMasked128,
ssa.OpAMD64VFMSUBADD213PDMasked256,
ssa.OpAMD64VFMSUBADD213PDMasked512,
- ssa.OpAMD64VFMSUBADD231PSMasked128,
- ssa.OpAMD64VFMSUBADD231PSMasked256,
- ssa.OpAMD64VFMSUBADD231PSMasked512,
- ssa.OpAMD64VFMSUBADD231PDMasked128,
- ssa.OpAMD64VFMSUBADD231PDMasked256,
- ssa.OpAMD64VFMSUBADD231PDMasked512,
- ssa.OpAMD64VFNMADD132PSMasked128,
- ssa.OpAMD64VFNMADD132PSMasked256,
- ssa.OpAMD64VFNMADD132PSMasked512,
- ssa.OpAMD64VFNMADD132PDMasked128,
- ssa.OpAMD64VFNMADD132PDMasked256,
- ssa.OpAMD64VFNMADD132PDMasked512,
- ssa.OpAMD64VFNMADD213PSMasked128,
- ssa.OpAMD64VFNMADD213PSMasked256,
- ssa.OpAMD64VFNMADD213PSMasked512,
- ssa.OpAMD64VFNMADD213PDMasked128,
- ssa.OpAMD64VFNMADD213PDMasked256,
- ssa.OpAMD64VFNMADD213PDMasked512,
- ssa.OpAMD64VFNMADD231PSMasked128,
- ssa.OpAMD64VFNMADD231PSMasked256,
- ssa.OpAMD64VFNMADD231PSMasked512,
- ssa.OpAMD64VFNMADD231PDMasked128,
- ssa.OpAMD64VFNMADD231PDMasked256,
- ssa.OpAMD64VFNMADD231PDMasked512,
- ssa.OpAMD64VFNMSUB132PSMasked128,
- ssa.OpAMD64VFNMSUB132PSMasked256,
- ssa.OpAMD64VFNMSUB132PSMasked512,
- ssa.OpAMD64VFNMSUB132PDMasked128,
- ssa.OpAMD64VFNMSUB132PDMasked256,
- ssa.OpAMD64VFNMSUB132PDMasked512,
- ssa.OpAMD64VFNMSUB213PSMasked128,
- ssa.OpAMD64VFNMSUB213PSMasked256,
- ssa.OpAMD64VFNMSUB213PSMasked512,
- ssa.OpAMD64VFNMSUB213PDMasked128,
- ssa.OpAMD64VFNMSUB213PDMasked256,
- ssa.OpAMD64VFNMSUB213PDMasked512,
- ssa.OpAMD64VFNMSUB231PSMasked128,
- ssa.OpAMD64VFNMSUB231PSMasked256,
- ssa.OpAMD64VFNMSUB231PSMasked512,
- ssa.OpAMD64VFNMSUB231PDMasked128,
- ssa.OpAMD64VFNMSUB231PDMasked256,
- ssa.OpAMD64VFNMSUB231PDMasked512,
ssa.OpAMD64VMAXPSMasked128,
ssa.OpAMD64VMAXPSMasked256,
ssa.OpAMD64VMAXPSMasked512,
(FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x)
(FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x)
(FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x)
-(FusedMultiplyAdd132Float32x16 ...) => (VFMADD132PS512 ...)
-(FusedMultiplyAdd132Float32x4 ...) => (VFMADD132PS128 ...)
-(FusedMultiplyAdd132Float32x8 ...) => (VFMADD132PS256 ...)
-(FusedMultiplyAdd132Float64x2 ...) => (VFMADD132PD128 ...)
-(FusedMultiplyAdd132Float64x4 ...) => (VFMADD132PD256 ...)
-(FusedMultiplyAdd132Float64x8 ...) => (VFMADD132PD512 ...)
-(FusedMultiplyAdd213Float32x16 ...) => (VFMADD213PS512 ...)
-(FusedMultiplyAdd213Float32x4 ...) => (VFMADD213PS128 ...)
-(FusedMultiplyAdd213Float32x8 ...) => (VFMADD213PS256 ...)
-(FusedMultiplyAdd213Float64x2 ...) => (VFMADD213PD128 ...)
-(FusedMultiplyAdd213Float64x4 ...) => (VFMADD213PD256 ...)
-(FusedMultiplyAdd213Float64x8 ...) => (VFMADD213PD512 ...)
-(FusedMultiplyAdd231Float32x16 ...) => (VFMADD231PS512 ...)
-(FusedMultiplyAdd231Float32x4 ...) => (VFMADD231PS128 ...)
-(FusedMultiplyAdd231Float32x8 ...) => (VFMADD231PS256 ...)
-(FusedMultiplyAdd231Float64x2 ...) => (VFMADD231PD128 ...)
-(FusedMultiplyAdd231Float64x4 ...) => (VFMADD231PD256 ...)
-(FusedMultiplyAdd231Float64x8 ...) => (VFMADD231PD512 ...)
-(FusedMultiplyAddSub132Float32x16 ...) => (VFMADDSUB132PS512 ...)
-(FusedMultiplyAddSub132Float32x4 ...) => (VFMADDSUB132PS128 ...)
-(FusedMultiplyAddSub132Float32x8 ...) => (VFMADDSUB132PS256 ...)
-(FusedMultiplyAddSub132Float64x2 ...) => (VFMADDSUB132PD128 ...)
-(FusedMultiplyAddSub132Float64x4 ...) => (VFMADDSUB132PD256 ...)
-(FusedMultiplyAddSub132Float64x8 ...) => (VFMADDSUB132PD512 ...)
-(FusedMultiplyAddSub213Float32x16 ...) => (VFMADDSUB213PS512 ...)
-(FusedMultiplyAddSub213Float32x4 ...) => (VFMADDSUB213PS128 ...)
-(FusedMultiplyAddSub213Float32x8 ...) => (VFMADDSUB213PS256 ...)
-(FusedMultiplyAddSub213Float64x2 ...) => (VFMADDSUB213PD128 ...)
-(FusedMultiplyAddSub213Float64x4 ...) => (VFMADDSUB213PD256 ...)
-(FusedMultiplyAddSub213Float64x8 ...) => (VFMADDSUB213PD512 ...)
-(FusedMultiplyAddSub231Float32x16 ...) => (VFMADDSUB231PS512 ...)
-(FusedMultiplyAddSub231Float32x4 ...) => (VFMADDSUB231PS128 ...)
-(FusedMultiplyAddSub231Float32x8 ...) => (VFMADDSUB231PS256 ...)
-(FusedMultiplyAddSub231Float64x2 ...) => (VFMADDSUB231PD128 ...)
-(FusedMultiplyAddSub231Float64x4 ...) => (VFMADDSUB231PD256 ...)
-(FusedMultiplyAddSub231Float64x8 ...) => (VFMADDSUB231PD512 ...)
-(FusedMultiplySub132Float32x16 ...) => (VFMSUB132PS512 ...)
-(FusedMultiplySub132Float32x4 ...) => (VFMSUB132PS128 ...)
-(FusedMultiplySub132Float32x8 ...) => (VFMSUB132PS256 ...)
-(FusedMultiplySub132Float64x2 ...) => (VFMSUB132PD128 ...)
-(FusedMultiplySub132Float64x4 ...) => (VFMSUB132PD256 ...)
-(FusedMultiplySub132Float64x8 ...) => (VFMSUB132PD512 ...)
-(FusedMultiplySub213Float32x16 ...) => (VFMSUB213PS512 ...)
-(FusedMultiplySub213Float32x4 ...) => (VFMSUB213PS128 ...)
-(FusedMultiplySub213Float32x8 ...) => (VFMSUB213PS256 ...)
-(FusedMultiplySub213Float64x2 ...) => (VFMSUB213PD128 ...)
-(FusedMultiplySub213Float64x4 ...) => (VFMSUB213PD256 ...)
-(FusedMultiplySub213Float64x8 ...) => (VFMSUB213PD512 ...)
-(FusedMultiplySub231Float32x16 ...) => (VFMSUB231PS512 ...)
-(FusedMultiplySub231Float32x4 ...) => (VFMSUB231PS128 ...)
-(FusedMultiplySub231Float32x8 ...) => (VFMSUB231PS256 ...)
-(FusedMultiplySub231Float64x2 ...) => (VFMSUB231PD128 ...)
-(FusedMultiplySub231Float64x4 ...) => (VFMSUB231PD256 ...)
-(FusedMultiplySub231Float64x8 ...) => (VFMSUB231PD512 ...)
-(FusedMultiplySubAdd132Float32x16 ...) => (VFMSUBADD132PS512 ...)
-(FusedMultiplySubAdd132Float32x4 ...) => (VFMSUBADD132PS128 ...)
-(FusedMultiplySubAdd132Float32x8 ...) => (VFMSUBADD132PS256 ...)
-(FusedMultiplySubAdd132Float64x2 ...) => (VFMSUBADD132PD128 ...)
-(FusedMultiplySubAdd132Float64x4 ...) => (VFMSUBADD132PD256 ...)
-(FusedMultiplySubAdd132Float64x8 ...) => (VFMSUBADD132PD512 ...)
-(FusedMultiplySubAdd213Float32x16 ...) => (VFMSUBADD213PS512 ...)
-(FusedMultiplySubAdd213Float32x4 ...) => (VFMSUBADD213PS128 ...)
-(FusedMultiplySubAdd213Float32x8 ...) => (VFMSUBADD213PS256 ...)
-(FusedMultiplySubAdd213Float64x2 ...) => (VFMSUBADD213PD128 ...)
-(FusedMultiplySubAdd213Float64x4 ...) => (VFMSUBADD213PD256 ...)
-(FusedMultiplySubAdd213Float64x8 ...) => (VFMSUBADD213PD512 ...)
-(FusedMultiplySubAdd231Float32x16 ...) => (VFMSUBADD231PS512 ...)
-(FusedMultiplySubAdd231Float32x4 ...) => (VFMSUBADD231PS128 ...)
-(FusedMultiplySubAdd231Float32x8 ...) => (VFMSUBADD231PS256 ...)
-(FusedMultiplySubAdd231Float64x2 ...) => (VFMSUBADD231PD128 ...)
-(FusedMultiplySubAdd231Float64x4 ...) => (VFMSUBADD231PD256 ...)
-(FusedMultiplySubAdd231Float64x8 ...) => (VFMSUBADD231PD512 ...)
-(FusedNegativeMultiplyAdd132Float32x16 ...) => (VFNMADD132PS512 ...)
-(FusedNegativeMultiplyAdd132Float32x4 ...) => (VFNMADD132PS128 ...)
-(FusedNegativeMultiplyAdd132Float32x8 ...) => (VFNMADD132PS256 ...)
-(FusedNegativeMultiplyAdd132Float64x2 ...) => (VFNMADD132PD128 ...)
-(FusedNegativeMultiplyAdd132Float64x4 ...) => (VFNMADD132PD256 ...)
-(FusedNegativeMultiplyAdd132Float64x8 ...) => (VFNMADD132PD512 ...)
-(FusedNegativeMultiplyAdd213Float32x16 ...) => (VFNMADD213PS512 ...)
-(FusedNegativeMultiplyAdd213Float32x4 ...) => (VFNMADD213PS128 ...)
-(FusedNegativeMultiplyAdd213Float32x8 ...) => (VFNMADD213PS256 ...)
-(FusedNegativeMultiplyAdd213Float64x2 ...) => (VFNMADD213PD128 ...)
-(FusedNegativeMultiplyAdd213Float64x4 ...) => (VFNMADD213PD256 ...)
-(FusedNegativeMultiplyAdd213Float64x8 ...) => (VFNMADD213PD512 ...)
-(FusedNegativeMultiplyAdd231Float32x16 ...) => (VFNMADD231PS512 ...)
-(FusedNegativeMultiplyAdd231Float32x4 ...) => (VFNMADD231PS128 ...)
-(FusedNegativeMultiplyAdd231Float32x8 ...) => (VFNMADD231PS256 ...)
-(FusedNegativeMultiplyAdd231Float64x2 ...) => (VFNMADD231PD128 ...)
-(FusedNegativeMultiplyAdd231Float64x4 ...) => (VFNMADD231PD256 ...)
-(FusedNegativeMultiplyAdd231Float64x8 ...) => (VFNMADD231PD512 ...)
-(FusedNegativeMultiplySub132Float32x16 ...) => (VFNMSUB132PS512 ...)
-(FusedNegativeMultiplySub132Float32x4 ...) => (VFNMSUB132PS128 ...)
-(FusedNegativeMultiplySub132Float32x8 ...) => (VFNMSUB132PS256 ...)
-(FusedNegativeMultiplySub132Float64x2 ...) => (VFNMSUB132PD128 ...)
-(FusedNegativeMultiplySub132Float64x4 ...) => (VFNMSUB132PD256 ...)
-(FusedNegativeMultiplySub132Float64x8 ...) => (VFNMSUB132PD512 ...)
-(FusedNegativeMultiplySub213Float32x16 ...) => (VFNMSUB213PS512 ...)
-(FusedNegativeMultiplySub213Float32x4 ...) => (VFNMSUB213PS128 ...)
-(FusedNegativeMultiplySub213Float32x8 ...) => (VFNMSUB213PS256 ...)
-(FusedNegativeMultiplySub213Float64x2 ...) => (VFNMSUB213PD128 ...)
-(FusedNegativeMultiplySub213Float64x4 ...) => (VFNMSUB213PD256 ...)
-(FusedNegativeMultiplySub213Float64x8 ...) => (VFNMSUB213PD512 ...)
-(FusedNegativeMultiplySub231Float32x16 ...) => (VFNMSUB231PS512 ...)
-(FusedNegativeMultiplySub231Float32x4 ...) => (VFNMSUB231PS128 ...)
-(FusedNegativeMultiplySub231Float32x8 ...) => (VFNMSUB231PS256 ...)
-(FusedNegativeMultiplySub231Float64x2 ...) => (VFNMSUB231PD128 ...)
-(FusedNegativeMultiplySub231Float64x4 ...) => (VFNMSUB231PD256 ...)
-(FusedNegativeMultiplySub231Float64x8 ...) => (VFNMSUB231PD512 ...)
+(FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...)
+(FusedMultiplyAddFloat32x4 ...) => (VFMADD213PS128 ...)
+(FusedMultiplyAddFloat32x8 ...) => (VFMADD213PS256 ...)
+(FusedMultiplyAddFloat64x2 ...) => (VFMADD213PD128 ...)
+(FusedMultiplyAddFloat64x4 ...) => (VFMADD213PD256 ...)
+(FusedMultiplyAddFloat64x8 ...) => (VFMADD213PD512 ...)
+(FusedMultiplyAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...)
+(FusedMultiplyAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...)
+(FusedMultiplyAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...)
+(FusedMultiplyAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...)
+(FusedMultiplyAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...)
+(FusedMultiplyAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...)
+(FusedMultiplySubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...)
+(FusedMultiplySubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...)
+(FusedMultiplySubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...)
+(FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...)
+(FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...)
+(FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...)
(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y))
(GreaterFloat32x4 x y) => (VCMPPS128 [6] x y)
(GreaterFloat32x8 x y) => (VCMPPS256 [6] x y)
(MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM <types.TypeMask> mask))
(MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM <types.TypeMask> mask))
(MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd132Float32x16 x y z mask) => (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd132Float32x4 x y z mask) => (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd132Float32x8 x y z mask) => (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd132Float64x2 x y z mask) => (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd132Float64x4 x y z mask) => (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd132Float64x8 x y z mask) => (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd213Float32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd213Float32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd213Float32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd213Float64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd213Float64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd213Float64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd231Float32x16 x y z mask) => (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd231Float32x4 x y z mask) => (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd231Float32x8 x y z mask) => (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd231Float64x2 x y z mask) => (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd231Float64x4 x y z mask) => (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAdd231Float64x8 x y z mask) => (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub132Float32x16 x y z mask) => (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub132Float32x4 x y z mask) => (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub132Float32x8 x y z mask) => (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub132Float64x2 x y z mask) => (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub132Float64x4 x y z mask) => (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub132Float64x8 x y z mask) => (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub213Float32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub213Float32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub213Float32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub213Float64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub213Float64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub213Float64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub231Float32x16 x y z mask) => (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub231Float32x4 x y z mask) => (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub231Float32x8 x y z mask) => (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub231Float64x2 x y z mask) => (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub231Float64x4 x y z mask) => (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplyAddSub231Float64x8 x y z mask) => (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub132Float32x16 x y z mask) => (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub132Float32x4 x y z mask) => (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub132Float32x8 x y z mask) => (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub132Float64x2 x y z mask) => (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub132Float64x4 x y z mask) => (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub132Float64x8 x y z mask) => (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub213Float32x16 x y z mask) => (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub213Float32x4 x y z mask) => (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub213Float32x8 x y z mask) => (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub213Float64x2 x y z mask) => (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub213Float64x4 x y z mask) => (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub213Float64x8 x y z mask) => (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub231Float32x16 x y z mask) => (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub231Float32x4 x y z mask) => (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub231Float32x8 x y z mask) => (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub231Float64x2 x y z mask) => (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub231Float64x4 x y z mask) => (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySub231Float64x8 x y z mask) => (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd132Float32x16 x y z mask) => (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd132Float32x4 x y z mask) => (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd132Float32x8 x y z mask) => (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd132Float64x2 x y z mask) => (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd132Float64x4 x y z mask) => (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd132Float64x8 x y z mask) => (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd213Float32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd213Float32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd213Float32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd213Float64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd213Float64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd213Float64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd231Float32x16 x y z mask) => (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd231Float32x4 x y z mask) => (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd231Float32x8 x y z mask) => (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd231Float64x2 x y z mask) => (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd231Float64x4 x y z mask) => (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedMultiplySubAdd231Float64x8 x y z mask) => (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask) => (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask) => (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask) => (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask) => (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask) => (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask) => (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask) => (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask) => (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask) => (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask) => (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask) => (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask) => (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask) => (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask) => (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask) => (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask) => (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask) => (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask) => (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub132Float32x16 x y z mask) => (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub132Float32x4 x y z mask) => (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub132Float32x8 x y z mask) => (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub132Float64x2 x y z mask) => (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub132Float64x4 x y z mask) => (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub132Float64x8 x y z mask) => (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub213Float32x16 x y z mask) => (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub213Float32x4 x y z mask) => (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub213Float32x8 x y z mask) => (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub213Float64x2 x y z mask) => (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub213Float64x4 x y z mask) => (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub213Float64x8 x y z mask) => (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub231Float32x16 x y z mask) => (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub231Float32x4 x y z mask) => (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub231Float32x8 x y z mask) => (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub231Float64x2 x y z mask) => (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub231Float64x4 x y z mask) => (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFusedNegativeMultiplySub231Float64x8 x y z mask) => (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSubFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSubFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSubFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSubFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSubFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplyAddSubFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAddFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAddFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAddFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAddFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAddFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
+(MaskedFusedMultiplySubAddFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
{name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VFMADD132PS512", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADD231PS512", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADDSUB132PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADDSUB231PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB132PS512", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB213PS512", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB231PS512", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUBADD132PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUBADD231PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD132PS512", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD213PS512", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD231PS512", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB132PS512", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB213PS512", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB231PS512", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VFMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADDSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADDSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUBADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUBADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VMAXPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMINPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMULPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VFMADD132PS128", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADD231PS128", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADDSUB132PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADDSUB231PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB132PS128", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB213PS128", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB231PS128", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUBADD132PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUBADD231PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD132PS128", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD213PS128", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD231PS128", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB132PS128", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB213PS128", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB231PS128", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VFMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADDSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADDSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUBADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUBADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VMAXPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMINPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMULPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VFMADD132PS256", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADD231PS256", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADDSUB132PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADDSUB231PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB132PS256", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB213PS256", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB231PS256", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUBADD132PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUBADD231PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD132PS256", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD213PS256", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD231PS256", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB132PS256", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB213PS256", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB231PS256", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VFMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADDSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADDSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUBADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUBADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VMAXPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMINPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMULPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VFMADD132PD128", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADD231PD128", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADDSUB132PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADDSUB231PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB132PD128", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB213PD128", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB231PD128", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUBADD132PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUBADD231PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD132PD128", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD213PD128", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD231PD128", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB132PD128", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB213PD128", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB231PD128", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VFMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADDSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMADDSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUBADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFMSUBADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true},
- {name: "VFNMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true},
{name: "VMAXPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMINPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VMULPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false},
{name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VFMADD132PD256", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADD231PD256", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADDSUB132PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADDSUB231PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB132PD256", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB213PD256", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB231PD256", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUBADD132PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUBADD231PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD132PD256", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD213PD256", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD231PD256", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB132PD256", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB213PD256", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB231PD256", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VFMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADDSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMADDSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUBADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFMSUBADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true},
- {name: "VFNMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true},
{name: "VMAXPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMINPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VMULPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false},
{name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VFMADD132PD512", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADD231PD512", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADDSUB132PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADDSUB231PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB132PD512", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB213PD512", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB231PD512", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUBADD132PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUBADD231PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD132PD512", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD213PD512", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD231PD512", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB132PD512", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB213PD512", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB231PD512", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false},
{name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VFMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADDSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMADDSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUBADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFMSUBADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true},
- {name: "VFNMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true},
{name: "VMAXPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMINPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "VMULPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false},
{name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false},
{name: "DivFloat32x16", argLength: 2, commutative: false},
{name: "EqualFloat32x16", argLength: 2, commutative: true},
- {name: "FusedMultiplyAdd132Float32x16", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd213Float32x16", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd231Float32x16", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub132Float32x16", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub213Float32x16", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub231Float32x16", argLength: 3, commutative: false},
- {name: "FusedMultiplySub132Float32x16", argLength: 3, commutative: false},
- {name: "FusedMultiplySub213Float32x16", argLength: 3, commutative: false},
- {name: "FusedMultiplySub231Float32x16", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd132Float32x16", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd213Float32x16", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd231Float32x16", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd132Float32x16", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd213Float32x16", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd231Float32x16", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub132Float32x16", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub213Float32x16", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub231Float32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddFloat32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSubFloat32x16", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAddFloat32x16", argLength: 3, commutative: false},
{name: "GreaterFloat32x16", argLength: 2, commutative: false},
{name: "GreaterEqualFloat32x16", argLength: 2, commutative: false},
{name: "IsNanFloat32x16", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false},
{name: "MaskedDivFloat32x16", argLength: 3, commutative: false},
{name: "MaskedEqualFloat32x16", argLength: 3, commutative: true},
- {name: "MaskedFusedMultiplyAdd132Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd213Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd231Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub132Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub213Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub231Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub132Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub213Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub231Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd132Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd213Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd231Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd132Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd213Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd231Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub132Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub213Float32x16", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub231Float32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddFloat32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSubFloat32x16", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAddFloat32x16", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat32x16", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat32x16", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat32x16", argLength: 3, commutative: true},
{name: "DivFloat32x4", argLength: 2, commutative: false},
{name: "EqualFloat32x4", argLength: 2, commutative: true},
{name: "FloorFloat32x4", argLength: 1, commutative: false},
- {name: "FusedMultiplyAdd132Float32x4", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd213Float32x4", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd231Float32x4", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub132Float32x4", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub213Float32x4", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub231Float32x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySub132Float32x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySub213Float32x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySub231Float32x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd132Float32x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd213Float32x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd231Float32x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd132Float32x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd213Float32x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd231Float32x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub132Float32x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub213Float32x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub231Float32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddFloat32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSubFloat32x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAddFloat32x4", argLength: 3, commutative: false},
{name: "GreaterFloat32x4", argLength: 2, commutative: false},
{name: "GreaterEqualFloat32x4", argLength: 2, commutative: false},
{name: "IsNanFloat32x4", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false},
{name: "MaskedDivFloat32x4", argLength: 3, commutative: false},
{name: "MaskedEqualFloat32x4", argLength: 3, commutative: true},
- {name: "MaskedFusedMultiplyAdd132Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd213Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd231Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub132Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub213Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub231Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub132Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub213Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub231Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd132Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd213Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd231Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd132Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd213Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd231Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub132Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub213Float32x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub231Float32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddFloat32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSubFloat32x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAddFloat32x4", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat32x4", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat32x4", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat32x4", argLength: 3, commutative: true},
{name: "DivFloat32x8", argLength: 2, commutative: false},
{name: "EqualFloat32x8", argLength: 2, commutative: true},
{name: "FloorFloat32x8", argLength: 1, commutative: false},
- {name: "FusedMultiplyAdd132Float32x8", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd213Float32x8", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd231Float32x8", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub132Float32x8", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub213Float32x8", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub231Float32x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySub132Float32x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySub213Float32x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySub231Float32x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd132Float32x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd213Float32x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd231Float32x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd132Float32x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd213Float32x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd231Float32x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub132Float32x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub213Float32x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub231Float32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddFloat32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSubFloat32x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAddFloat32x8", argLength: 3, commutative: false},
{name: "GreaterFloat32x8", argLength: 2, commutative: false},
{name: "GreaterEqualFloat32x8", argLength: 2, commutative: false},
{name: "IsNanFloat32x8", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false},
{name: "MaskedDivFloat32x8", argLength: 3, commutative: false},
{name: "MaskedEqualFloat32x8", argLength: 3, commutative: true},
- {name: "MaskedFusedMultiplyAdd132Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd213Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd231Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub132Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub213Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub231Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub132Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub213Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub231Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd132Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd213Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd231Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd132Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd213Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd231Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub132Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub213Float32x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub231Float32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddFloat32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSubFloat32x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAddFloat32x8", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat32x8", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat32x8", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat32x8", argLength: 3, commutative: true},
{name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true},
{name: "EqualFloat64x2", argLength: 2, commutative: true},
{name: "FloorFloat64x2", argLength: 1, commutative: false},
- {name: "FusedMultiplyAdd132Float64x2", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd213Float64x2", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd231Float64x2", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub132Float64x2", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub213Float64x2", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub231Float64x2", argLength: 3, commutative: false},
- {name: "FusedMultiplySub132Float64x2", argLength: 3, commutative: false},
- {name: "FusedMultiplySub213Float64x2", argLength: 3, commutative: false},
- {name: "FusedMultiplySub231Float64x2", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd132Float64x2", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd213Float64x2", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd231Float64x2", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd132Float64x2", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd213Float64x2", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd231Float64x2", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub132Float64x2", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub213Float64x2", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub231Float64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddFloat64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSubFloat64x2", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAddFloat64x2", argLength: 3, commutative: false},
{name: "GreaterFloat64x2", argLength: 2, commutative: false},
{name: "GreaterEqualFloat64x2", argLength: 2, commutative: false},
{name: "IsNanFloat64x2", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false},
{name: "MaskedDivFloat64x2", argLength: 3, commutative: false},
{name: "MaskedEqualFloat64x2", argLength: 3, commutative: true},
- {name: "MaskedFusedMultiplyAdd132Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd213Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd231Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub132Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub213Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub231Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub132Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub213Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub231Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd132Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd213Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd231Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd132Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd213Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd231Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub132Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub213Float64x2", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub231Float64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddFloat64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSubFloat64x2", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAddFloat64x2", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat64x2", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat64x2", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat64x2", argLength: 3, commutative: true},
{name: "DivFloat64x4", argLength: 2, commutative: false},
{name: "EqualFloat64x4", argLength: 2, commutative: true},
{name: "FloorFloat64x4", argLength: 1, commutative: false},
- {name: "FusedMultiplyAdd132Float64x4", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd213Float64x4", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd231Float64x4", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub132Float64x4", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub213Float64x4", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub231Float64x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySub132Float64x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySub213Float64x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySub231Float64x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd132Float64x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd213Float64x4", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd231Float64x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd132Float64x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd213Float64x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd231Float64x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub132Float64x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub213Float64x4", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub231Float64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddFloat64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSubFloat64x4", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAddFloat64x4", argLength: 3, commutative: false},
{name: "GreaterFloat64x4", argLength: 2, commutative: false},
{name: "GreaterEqualFloat64x4", argLength: 2, commutative: false},
{name: "IsNanFloat64x4", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false},
{name: "MaskedDivFloat64x4", argLength: 3, commutative: false},
{name: "MaskedEqualFloat64x4", argLength: 3, commutative: true},
- {name: "MaskedFusedMultiplyAdd132Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd213Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd231Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub132Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub213Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub231Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub132Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub213Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub231Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd132Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd213Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd231Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd132Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd213Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd231Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub132Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub213Float64x4", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub231Float64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddFloat64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSubFloat64x4", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAddFloat64x4", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat64x4", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat64x4", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat64x4", argLength: 3, commutative: true},
{name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false},
{name: "DivFloat64x8", argLength: 2, commutative: false},
{name: "EqualFloat64x8", argLength: 2, commutative: true},
- {name: "FusedMultiplyAdd132Float64x8", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd213Float64x8", argLength: 3, commutative: false},
- {name: "FusedMultiplyAdd231Float64x8", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub132Float64x8", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub213Float64x8", argLength: 3, commutative: false},
- {name: "FusedMultiplyAddSub231Float64x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySub132Float64x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySub213Float64x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySub231Float64x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd132Float64x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd213Float64x8", argLength: 3, commutative: false},
- {name: "FusedMultiplySubAdd231Float64x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd132Float64x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd213Float64x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplyAdd231Float64x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub132Float64x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub213Float64x8", argLength: 3, commutative: false},
- {name: "FusedNegativeMultiplySub231Float64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddFloat64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplyAddSubFloat64x8", argLength: 3, commutative: false},
+ {name: "FusedMultiplySubAddFloat64x8", argLength: 3, commutative: false},
{name: "GreaterFloat64x8", argLength: 2, commutative: false},
{name: "GreaterEqualFloat64x8", argLength: 2, commutative: false},
{name: "IsNanFloat64x8", argLength: 2, commutative: true},
{name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false},
{name: "MaskedDivFloat64x8", argLength: 3, commutative: false},
{name: "MaskedEqualFloat64x8", argLength: 3, commutative: true},
- {name: "MaskedFusedMultiplyAdd132Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd213Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAdd231Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub132Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub213Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplyAddSub231Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub132Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub213Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySub231Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd132Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd213Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedMultiplySubAdd231Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd132Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd213Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplyAdd231Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub132Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub213Float64x8", argLength: 4, commutative: false},
- {name: "MaskedFusedNegativeMultiplySub231Float64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddFloat64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplyAddSubFloat64x8", argLength: 4, commutative: false},
+ {name: "MaskedFusedMultiplySubAddFloat64x8", argLength: 4, commutative: false},
{name: "MaskedGreaterFloat64x8", argLength: 3, commutative: false},
{name: "MaskedGreaterEqualFloat64x8", argLength: 3, commutative: false},
{name: "MaskedIsNanFloat64x8", argLength: 3, commutative: true},
OpAMD64VRCP14PS512
OpAMD64VRSQRT14PS512
OpAMD64VDIVPS512
- OpAMD64VFMADD132PS512
OpAMD64VFMADD213PS512
- OpAMD64VFMADD231PS512
- OpAMD64VFMADDSUB132PS512
OpAMD64VFMADDSUB213PS512
- OpAMD64VFMADDSUB231PS512
- OpAMD64VFMSUB132PS512
- OpAMD64VFMSUB213PS512
- OpAMD64VFMSUB231PS512
- OpAMD64VFMSUBADD132PS512
OpAMD64VFMSUBADD213PS512
- OpAMD64VFMSUBADD231PS512
- OpAMD64VFNMADD132PS512
- OpAMD64VFNMADD213PS512
- OpAMD64VFNMADD231PS512
- OpAMD64VFNMSUB132PS512
- OpAMD64VFNMSUB213PS512
- OpAMD64VFNMSUB231PS512
OpAMD64VADDPSMasked512
OpAMD64VANDPSMasked512
OpAMD64VANDNPSMasked512
OpAMD64VRCP14PSMasked512
OpAMD64VRSQRT14PSMasked512
OpAMD64VDIVPSMasked512
- OpAMD64VFMADD132PSMasked512
OpAMD64VFMADD213PSMasked512
- OpAMD64VFMADD231PSMasked512
- OpAMD64VFMADDSUB132PSMasked512
OpAMD64VFMADDSUB213PSMasked512
- OpAMD64VFMADDSUB231PSMasked512
- OpAMD64VFMSUB132PSMasked512
- OpAMD64VFMSUB213PSMasked512
- OpAMD64VFMSUB231PSMasked512
- OpAMD64VFMSUBADD132PSMasked512
OpAMD64VFMSUBADD213PSMasked512
- OpAMD64VFMSUBADD231PSMasked512
- OpAMD64VFNMADD132PSMasked512
- OpAMD64VFNMADD213PSMasked512
- OpAMD64VFNMADD231PSMasked512
- OpAMD64VFNMSUB132PSMasked512
- OpAMD64VFNMSUB213PSMasked512
- OpAMD64VFNMSUB231PSMasked512
OpAMD64VMAXPSMasked512
OpAMD64VMINPSMasked512
OpAMD64VMULPSMasked512
OpAMD64VRCP14PS128
OpAMD64VRSQRTPS128
OpAMD64VDIVPS128
- OpAMD64VFMADD132PS128
OpAMD64VFMADD213PS128
- OpAMD64VFMADD231PS128
- OpAMD64VFMADDSUB132PS128
OpAMD64VFMADDSUB213PS128
- OpAMD64VFMADDSUB231PS128
- OpAMD64VFMSUB132PS128
- OpAMD64VFMSUB213PS128
- OpAMD64VFMSUB231PS128
- OpAMD64VFMSUBADD132PS128
OpAMD64VFMSUBADD213PS128
- OpAMD64VFMSUBADD231PS128
- OpAMD64VFNMADD132PS128
- OpAMD64VFNMADD213PS128
- OpAMD64VFNMADD231PS128
- OpAMD64VFNMSUB132PS128
- OpAMD64VFNMSUB213PS128
- OpAMD64VFNMSUB231PS128
OpAMD64VADDPSMasked128
OpAMD64VANDPSMasked128
OpAMD64VANDNPSMasked128
OpAMD64VRCP14PSMasked128
OpAMD64VRSQRT14PSMasked128
OpAMD64VDIVPSMasked128
- OpAMD64VFMADD132PSMasked128
OpAMD64VFMADD213PSMasked128
- OpAMD64VFMADD231PSMasked128
- OpAMD64VFMADDSUB132PSMasked128
OpAMD64VFMADDSUB213PSMasked128
- OpAMD64VFMADDSUB231PSMasked128
- OpAMD64VFMSUB132PSMasked128
- OpAMD64VFMSUB213PSMasked128
- OpAMD64VFMSUB231PSMasked128
- OpAMD64VFMSUBADD132PSMasked128
OpAMD64VFMSUBADD213PSMasked128
- OpAMD64VFMSUBADD231PSMasked128
- OpAMD64VFNMADD132PSMasked128
- OpAMD64VFNMADD213PSMasked128
- OpAMD64VFNMADD231PSMasked128
- OpAMD64VFNMSUB132PSMasked128
- OpAMD64VFNMSUB213PSMasked128
- OpAMD64VFNMSUB231PSMasked128
OpAMD64VMAXPSMasked128
OpAMD64VMINPSMasked128
OpAMD64VMULPSMasked128
OpAMD64VRCP14PS256
OpAMD64VRSQRTPS256
OpAMD64VDIVPS256
- OpAMD64VFMADD132PS256
OpAMD64VFMADD213PS256
- OpAMD64VFMADD231PS256
- OpAMD64VFMADDSUB132PS256
OpAMD64VFMADDSUB213PS256
- OpAMD64VFMADDSUB231PS256
- OpAMD64VFMSUB132PS256
- OpAMD64VFMSUB213PS256
- OpAMD64VFMSUB231PS256
- OpAMD64VFMSUBADD132PS256
OpAMD64VFMSUBADD213PS256
- OpAMD64VFMSUBADD231PS256
- OpAMD64VFNMADD132PS256
- OpAMD64VFNMADD213PS256
- OpAMD64VFNMADD231PS256
- OpAMD64VFNMSUB132PS256
- OpAMD64VFNMSUB213PS256
- OpAMD64VFNMSUB231PS256
OpAMD64VADDPSMasked256
OpAMD64VANDPSMasked256
OpAMD64VANDNPSMasked256
OpAMD64VRCP14PSMasked256
OpAMD64VRSQRT14PSMasked256
OpAMD64VDIVPSMasked256
- OpAMD64VFMADD132PSMasked256
OpAMD64VFMADD213PSMasked256
- OpAMD64VFMADD231PSMasked256
- OpAMD64VFMADDSUB132PSMasked256
OpAMD64VFMADDSUB213PSMasked256
- OpAMD64VFMADDSUB231PSMasked256
- OpAMD64VFMSUB132PSMasked256
- OpAMD64VFMSUB213PSMasked256
- OpAMD64VFMSUB231PSMasked256
- OpAMD64VFMSUBADD132PSMasked256
OpAMD64VFMSUBADD213PSMasked256
- OpAMD64VFMSUBADD231PSMasked256
- OpAMD64VFNMADD132PSMasked256
- OpAMD64VFNMADD213PSMasked256
- OpAMD64VFNMADD231PSMasked256
- OpAMD64VFNMSUB132PSMasked256
- OpAMD64VFNMSUB213PSMasked256
- OpAMD64VFNMSUB231PSMasked256
OpAMD64VMAXPSMasked256
OpAMD64VMINPSMasked256
OpAMD64VMULPSMasked256
OpAMD64VRCP14PD128
OpAMD64VRSQRT14PD128
OpAMD64VDIVPD128
- OpAMD64VFMADD132PD128
OpAMD64VFMADD213PD128
- OpAMD64VFMADD231PD128
- OpAMD64VFMADDSUB132PD128
OpAMD64VFMADDSUB213PD128
- OpAMD64VFMADDSUB231PD128
- OpAMD64VFMSUB132PD128
- OpAMD64VFMSUB213PD128
- OpAMD64VFMSUB231PD128
- OpAMD64VFMSUBADD132PD128
OpAMD64VFMSUBADD213PD128
- OpAMD64VFMSUBADD231PD128
- OpAMD64VFNMADD132PD128
- OpAMD64VFNMADD213PD128
- OpAMD64VFNMADD231PD128
- OpAMD64VFNMSUB132PD128
- OpAMD64VFNMSUB213PD128
- OpAMD64VFNMSUB231PD128
OpAMD64VADDPDMasked128
OpAMD64VANDPDMasked128
OpAMD64VANDNPDMasked128
OpAMD64VRCP14PDMasked128
OpAMD64VRSQRT14PDMasked128
OpAMD64VDIVPDMasked128
- OpAMD64VFMADD132PDMasked128
OpAMD64VFMADD213PDMasked128
- OpAMD64VFMADD231PDMasked128
- OpAMD64VFMADDSUB132PDMasked128
OpAMD64VFMADDSUB213PDMasked128
- OpAMD64VFMADDSUB231PDMasked128
- OpAMD64VFMSUB132PDMasked128
- OpAMD64VFMSUB213PDMasked128
- OpAMD64VFMSUB231PDMasked128
- OpAMD64VFMSUBADD132PDMasked128
OpAMD64VFMSUBADD213PDMasked128
- OpAMD64VFMSUBADD231PDMasked128
- OpAMD64VFNMADD132PDMasked128
- OpAMD64VFNMADD213PDMasked128
- OpAMD64VFNMADD231PDMasked128
- OpAMD64VFNMSUB132PDMasked128
- OpAMD64VFNMSUB213PDMasked128
- OpAMD64VFNMSUB231PDMasked128
OpAMD64VMAXPDMasked128
OpAMD64VMINPDMasked128
OpAMD64VMULPDMasked128
OpAMD64VRCP14PD256
OpAMD64VRSQRT14PD256
OpAMD64VDIVPD256
- OpAMD64VFMADD132PD256
OpAMD64VFMADD213PD256
- OpAMD64VFMADD231PD256
- OpAMD64VFMADDSUB132PD256
OpAMD64VFMADDSUB213PD256
- OpAMD64VFMADDSUB231PD256
- OpAMD64VFMSUB132PD256
- OpAMD64VFMSUB213PD256
- OpAMD64VFMSUB231PD256
- OpAMD64VFMSUBADD132PD256
OpAMD64VFMSUBADD213PD256
- OpAMD64VFMSUBADD231PD256
- OpAMD64VFNMADD132PD256
- OpAMD64VFNMADD213PD256
- OpAMD64VFNMADD231PD256
- OpAMD64VFNMSUB132PD256
- OpAMD64VFNMSUB213PD256
- OpAMD64VFNMSUB231PD256
OpAMD64VADDPDMasked256
OpAMD64VANDPDMasked256
OpAMD64VANDNPDMasked256
OpAMD64VRCP14PDMasked256
OpAMD64VRSQRT14PDMasked256
OpAMD64VDIVPDMasked256
- OpAMD64VFMADD132PDMasked256
OpAMD64VFMADD213PDMasked256
- OpAMD64VFMADD231PDMasked256
- OpAMD64VFMADDSUB132PDMasked256
OpAMD64VFMADDSUB213PDMasked256
- OpAMD64VFMADDSUB231PDMasked256
- OpAMD64VFMSUB132PDMasked256
- OpAMD64VFMSUB213PDMasked256
- OpAMD64VFMSUB231PDMasked256
- OpAMD64VFMSUBADD132PDMasked256
OpAMD64VFMSUBADD213PDMasked256
- OpAMD64VFMSUBADD231PDMasked256
- OpAMD64VFNMADD132PDMasked256
- OpAMD64VFNMADD213PDMasked256
- OpAMD64VFNMADD231PDMasked256
- OpAMD64VFNMSUB132PDMasked256
- OpAMD64VFNMSUB213PDMasked256
- OpAMD64VFNMSUB231PDMasked256
OpAMD64VMAXPDMasked256
OpAMD64VMINPDMasked256
OpAMD64VMULPDMasked256
OpAMD64VRCP14PD512
OpAMD64VRSQRT14PD512
OpAMD64VDIVPD512
- OpAMD64VFMADD132PD512
OpAMD64VFMADD213PD512
- OpAMD64VFMADD231PD512
- OpAMD64VFMADDSUB132PD512
OpAMD64VFMADDSUB213PD512
- OpAMD64VFMADDSUB231PD512
- OpAMD64VFMSUB132PD512
- OpAMD64VFMSUB213PD512
- OpAMD64VFMSUB231PD512
- OpAMD64VFMSUBADD132PD512
OpAMD64VFMSUBADD213PD512
- OpAMD64VFMSUBADD231PD512
- OpAMD64VFNMADD132PD512
- OpAMD64VFNMADD213PD512
- OpAMD64VFNMADD231PD512
- OpAMD64VFNMSUB132PD512
- OpAMD64VFNMSUB213PD512
- OpAMD64VFNMSUB231PD512
OpAMD64VADDPDMasked512
OpAMD64VANDPDMasked512
OpAMD64VANDNPDMasked512
OpAMD64VRCP14PDMasked512
OpAMD64VRSQRT14PDMasked512
OpAMD64VDIVPDMasked512
- OpAMD64VFMADD132PDMasked512
OpAMD64VFMADD213PDMasked512
- OpAMD64VFMADD231PDMasked512
- OpAMD64VFMADDSUB132PDMasked512
OpAMD64VFMADDSUB213PDMasked512
- OpAMD64VFMADDSUB231PDMasked512
- OpAMD64VFMSUB132PDMasked512
- OpAMD64VFMSUB213PDMasked512
- OpAMD64VFMSUB231PDMasked512
- OpAMD64VFMSUBADD132PDMasked512
OpAMD64VFMSUBADD213PDMasked512
- OpAMD64VFMSUBADD231PDMasked512
- OpAMD64VFNMADD132PDMasked512
- OpAMD64VFNMADD213PDMasked512
- OpAMD64VFNMADD231PDMasked512
- OpAMD64VFNMSUB132PDMasked512
- OpAMD64VFNMSUB213PDMasked512
- OpAMD64VFNMSUB231PDMasked512
OpAMD64VMAXPDMasked512
OpAMD64VMINPDMasked512
OpAMD64VMULPDMasked512
OpApproximateReciprocalOfSqrtFloat32x16
OpDivFloat32x16
OpEqualFloat32x16
- OpFusedMultiplyAdd132Float32x16
- OpFusedMultiplyAdd213Float32x16
- OpFusedMultiplyAdd231Float32x16
- OpFusedMultiplyAddSub132Float32x16
- OpFusedMultiplyAddSub213Float32x16
- OpFusedMultiplyAddSub231Float32x16
- OpFusedMultiplySub132Float32x16
- OpFusedMultiplySub213Float32x16
- OpFusedMultiplySub231Float32x16
- OpFusedMultiplySubAdd132Float32x16
- OpFusedMultiplySubAdd213Float32x16
- OpFusedMultiplySubAdd231Float32x16
- OpFusedNegativeMultiplyAdd132Float32x16
- OpFusedNegativeMultiplyAdd213Float32x16
- OpFusedNegativeMultiplyAdd231Float32x16
- OpFusedNegativeMultiplySub132Float32x16
- OpFusedNegativeMultiplySub213Float32x16
- OpFusedNegativeMultiplySub231Float32x16
+ OpFusedMultiplyAddFloat32x16
+ OpFusedMultiplyAddSubFloat32x16
+ OpFusedMultiplySubAddFloat32x16
OpGreaterFloat32x16
OpGreaterEqualFloat32x16
OpIsNanFloat32x16
OpMaskedApproximateReciprocalOfSqrtFloat32x16
OpMaskedDivFloat32x16
OpMaskedEqualFloat32x16
- OpMaskedFusedMultiplyAdd132Float32x16
- OpMaskedFusedMultiplyAdd213Float32x16
- OpMaskedFusedMultiplyAdd231Float32x16
- OpMaskedFusedMultiplyAddSub132Float32x16
- OpMaskedFusedMultiplyAddSub213Float32x16
- OpMaskedFusedMultiplyAddSub231Float32x16
- OpMaskedFusedMultiplySub132Float32x16
- OpMaskedFusedMultiplySub213Float32x16
- OpMaskedFusedMultiplySub231Float32x16
- OpMaskedFusedMultiplySubAdd132Float32x16
- OpMaskedFusedMultiplySubAdd213Float32x16
- OpMaskedFusedMultiplySubAdd231Float32x16
- OpMaskedFusedNegativeMultiplyAdd132Float32x16
- OpMaskedFusedNegativeMultiplyAdd213Float32x16
- OpMaskedFusedNegativeMultiplyAdd231Float32x16
- OpMaskedFusedNegativeMultiplySub132Float32x16
- OpMaskedFusedNegativeMultiplySub213Float32x16
- OpMaskedFusedNegativeMultiplySub231Float32x16
+ OpMaskedFusedMultiplyAddFloat32x16
+ OpMaskedFusedMultiplyAddSubFloat32x16
+ OpMaskedFusedMultiplySubAddFloat32x16
OpMaskedGreaterFloat32x16
OpMaskedGreaterEqualFloat32x16
OpMaskedIsNanFloat32x16
OpDivFloat32x4
OpEqualFloat32x4
OpFloorFloat32x4
- OpFusedMultiplyAdd132Float32x4
- OpFusedMultiplyAdd213Float32x4
- OpFusedMultiplyAdd231Float32x4
- OpFusedMultiplyAddSub132Float32x4
- OpFusedMultiplyAddSub213Float32x4
- OpFusedMultiplyAddSub231Float32x4
- OpFusedMultiplySub132Float32x4
- OpFusedMultiplySub213Float32x4
- OpFusedMultiplySub231Float32x4
- OpFusedMultiplySubAdd132Float32x4
- OpFusedMultiplySubAdd213Float32x4
- OpFusedMultiplySubAdd231Float32x4
- OpFusedNegativeMultiplyAdd132Float32x4
- OpFusedNegativeMultiplyAdd213Float32x4
- OpFusedNegativeMultiplyAdd231Float32x4
- OpFusedNegativeMultiplySub132Float32x4
- OpFusedNegativeMultiplySub213Float32x4
- OpFusedNegativeMultiplySub231Float32x4
+ OpFusedMultiplyAddFloat32x4
+ OpFusedMultiplyAddSubFloat32x4
+ OpFusedMultiplySubAddFloat32x4
OpGreaterFloat32x4
OpGreaterEqualFloat32x4
OpIsNanFloat32x4
OpMaskedApproximateReciprocalOfSqrtFloat32x4
OpMaskedDivFloat32x4
OpMaskedEqualFloat32x4
- OpMaskedFusedMultiplyAdd132Float32x4
- OpMaskedFusedMultiplyAdd213Float32x4
- OpMaskedFusedMultiplyAdd231Float32x4
- OpMaskedFusedMultiplyAddSub132Float32x4
- OpMaskedFusedMultiplyAddSub213Float32x4
- OpMaskedFusedMultiplyAddSub231Float32x4
- OpMaskedFusedMultiplySub132Float32x4
- OpMaskedFusedMultiplySub213Float32x4
- OpMaskedFusedMultiplySub231Float32x4
- OpMaskedFusedMultiplySubAdd132Float32x4
- OpMaskedFusedMultiplySubAdd213Float32x4
- OpMaskedFusedMultiplySubAdd231Float32x4
- OpMaskedFusedNegativeMultiplyAdd132Float32x4
- OpMaskedFusedNegativeMultiplyAdd213Float32x4
- OpMaskedFusedNegativeMultiplyAdd231Float32x4
- OpMaskedFusedNegativeMultiplySub132Float32x4
- OpMaskedFusedNegativeMultiplySub213Float32x4
- OpMaskedFusedNegativeMultiplySub231Float32x4
+ OpMaskedFusedMultiplyAddFloat32x4
+ OpMaskedFusedMultiplyAddSubFloat32x4
+ OpMaskedFusedMultiplySubAddFloat32x4
OpMaskedGreaterFloat32x4
OpMaskedGreaterEqualFloat32x4
OpMaskedIsNanFloat32x4
OpDivFloat32x8
OpEqualFloat32x8
OpFloorFloat32x8
- OpFusedMultiplyAdd132Float32x8
- OpFusedMultiplyAdd213Float32x8
- OpFusedMultiplyAdd231Float32x8
- OpFusedMultiplyAddSub132Float32x8
- OpFusedMultiplyAddSub213Float32x8
- OpFusedMultiplyAddSub231Float32x8
- OpFusedMultiplySub132Float32x8
- OpFusedMultiplySub213Float32x8
- OpFusedMultiplySub231Float32x8
- OpFusedMultiplySubAdd132Float32x8
- OpFusedMultiplySubAdd213Float32x8
- OpFusedMultiplySubAdd231Float32x8
- OpFusedNegativeMultiplyAdd132Float32x8
- OpFusedNegativeMultiplyAdd213Float32x8
- OpFusedNegativeMultiplyAdd231Float32x8
- OpFusedNegativeMultiplySub132Float32x8
- OpFusedNegativeMultiplySub213Float32x8
- OpFusedNegativeMultiplySub231Float32x8
+ OpFusedMultiplyAddFloat32x8
+ OpFusedMultiplyAddSubFloat32x8
+ OpFusedMultiplySubAddFloat32x8
OpGreaterFloat32x8
OpGreaterEqualFloat32x8
OpIsNanFloat32x8
OpMaskedApproximateReciprocalOfSqrtFloat32x8
OpMaskedDivFloat32x8
OpMaskedEqualFloat32x8
- OpMaskedFusedMultiplyAdd132Float32x8
- OpMaskedFusedMultiplyAdd213Float32x8
- OpMaskedFusedMultiplyAdd231Float32x8
- OpMaskedFusedMultiplyAddSub132Float32x8
- OpMaskedFusedMultiplyAddSub213Float32x8
- OpMaskedFusedMultiplyAddSub231Float32x8
- OpMaskedFusedMultiplySub132Float32x8
- OpMaskedFusedMultiplySub213Float32x8
- OpMaskedFusedMultiplySub231Float32x8
- OpMaskedFusedMultiplySubAdd132Float32x8
- OpMaskedFusedMultiplySubAdd213Float32x8
- OpMaskedFusedMultiplySubAdd231Float32x8
- OpMaskedFusedNegativeMultiplyAdd132Float32x8
- OpMaskedFusedNegativeMultiplyAdd213Float32x8
- OpMaskedFusedNegativeMultiplyAdd231Float32x8
- OpMaskedFusedNegativeMultiplySub132Float32x8
- OpMaskedFusedNegativeMultiplySub213Float32x8
- OpMaskedFusedNegativeMultiplySub231Float32x8
+ OpMaskedFusedMultiplyAddFloat32x8
+ OpMaskedFusedMultiplyAddSubFloat32x8
+ OpMaskedFusedMultiplySubAddFloat32x8
OpMaskedGreaterFloat32x8
OpMaskedGreaterEqualFloat32x8
OpMaskedIsNanFloat32x8
OpDotProdBroadcastFloat64x2
OpEqualFloat64x2
OpFloorFloat64x2
- OpFusedMultiplyAdd132Float64x2
- OpFusedMultiplyAdd213Float64x2
- OpFusedMultiplyAdd231Float64x2
- OpFusedMultiplyAddSub132Float64x2
- OpFusedMultiplyAddSub213Float64x2
- OpFusedMultiplyAddSub231Float64x2
- OpFusedMultiplySub132Float64x2
- OpFusedMultiplySub213Float64x2
- OpFusedMultiplySub231Float64x2
- OpFusedMultiplySubAdd132Float64x2
- OpFusedMultiplySubAdd213Float64x2
- OpFusedMultiplySubAdd231Float64x2
- OpFusedNegativeMultiplyAdd132Float64x2
- OpFusedNegativeMultiplyAdd213Float64x2
- OpFusedNegativeMultiplyAdd231Float64x2
- OpFusedNegativeMultiplySub132Float64x2
- OpFusedNegativeMultiplySub213Float64x2
- OpFusedNegativeMultiplySub231Float64x2
+ OpFusedMultiplyAddFloat64x2
+ OpFusedMultiplyAddSubFloat64x2
+ OpFusedMultiplySubAddFloat64x2
OpGreaterFloat64x2
OpGreaterEqualFloat64x2
OpIsNanFloat64x2
OpMaskedApproximateReciprocalOfSqrtFloat64x2
OpMaskedDivFloat64x2
OpMaskedEqualFloat64x2
- OpMaskedFusedMultiplyAdd132Float64x2
- OpMaskedFusedMultiplyAdd213Float64x2
- OpMaskedFusedMultiplyAdd231Float64x2
- OpMaskedFusedMultiplyAddSub132Float64x2
- OpMaskedFusedMultiplyAddSub213Float64x2
- OpMaskedFusedMultiplyAddSub231Float64x2
- OpMaskedFusedMultiplySub132Float64x2
- OpMaskedFusedMultiplySub213Float64x2
- OpMaskedFusedMultiplySub231Float64x2
- OpMaskedFusedMultiplySubAdd132Float64x2
- OpMaskedFusedMultiplySubAdd213Float64x2
- OpMaskedFusedMultiplySubAdd231Float64x2
- OpMaskedFusedNegativeMultiplyAdd132Float64x2
- OpMaskedFusedNegativeMultiplyAdd213Float64x2
- OpMaskedFusedNegativeMultiplyAdd231Float64x2
- OpMaskedFusedNegativeMultiplySub132Float64x2
- OpMaskedFusedNegativeMultiplySub213Float64x2
- OpMaskedFusedNegativeMultiplySub231Float64x2
+ OpMaskedFusedMultiplyAddFloat64x2
+ OpMaskedFusedMultiplyAddSubFloat64x2
+ OpMaskedFusedMultiplySubAddFloat64x2
OpMaskedGreaterFloat64x2
OpMaskedGreaterEqualFloat64x2
OpMaskedIsNanFloat64x2
OpDivFloat64x4
OpEqualFloat64x4
OpFloorFloat64x4
- OpFusedMultiplyAdd132Float64x4
- OpFusedMultiplyAdd213Float64x4
- OpFusedMultiplyAdd231Float64x4
- OpFusedMultiplyAddSub132Float64x4
- OpFusedMultiplyAddSub213Float64x4
- OpFusedMultiplyAddSub231Float64x4
- OpFusedMultiplySub132Float64x4
- OpFusedMultiplySub213Float64x4
- OpFusedMultiplySub231Float64x4
- OpFusedMultiplySubAdd132Float64x4
- OpFusedMultiplySubAdd213Float64x4
- OpFusedMultiplySubAdd231Float64x4
- OpFusedNegativeMultiplyAdd132Float64x4
- OpFusedNegativeMultiplyAdd213Float64x4
- OpFusedNegativeMultiplyAdd231Float64x4
- OpFusedNegativeMultiplySub132Float64x4
- OpFusedNegativeMultiplySub213Float64x4
- OpFusedNegativeMultiplySub231Float64x4
+ OpFusedMultiplyAddFloat64x4
+ OpFusedMultiplyAddSubFloat64x4
+ OpFusedMultiplySubAddFloat64x4
OpGreaterFloat64x4
OpGreaterEqualFloat64x4
OpIsNanFloat64x4
OpMaskedApproximateReciprocalOfSqrtFloat64x4
OpMaskedDivFloat64x4
OpMaskedEqualFloat64x4
- OpMaskedFusedMultiplyAdd132Float64x4
- OpMaskedFusedMultiplyAdd213Float64x4
- OpMaskedFusedMultiplyAdd231Float64x4
- OpMaskedFusedMultiplyAddSub132Float64x4
- OpMaskedFusedMultiplyAddSub213Float64x4
- OpMaskedFusedMultiplyAddSub231Float64x4
- OpMaskedFusedMultiplySub132Float64x4
- OpMaskedFusedMultiplySub213Float64x4
- OpMaskedFusedMultiplySub231Float64x4
- OpMaskedFusedMultiplySubAdd132Float64x4
- OpMaskedFusedMultiplySubAdd213Float64x4
- OpMaskedFusedMultiplySubAdd231Float64x4
- OpMaskedFusedNegativeMultiplyAdd132Float64x4
- OpMaskedFusedNegativeMultiplyAdd213Float64x4
- OpMaskedFusedNegativeMultiplyAdd231Float64x4
- OpMaskedFusedNegativeMultiplySub132Float64x4
- OpMaskedFusedNegativeMultiplySub213Float64x4
- OpMaskedFusedNegativeMultiplySub231Float64x4
+ OpMaskedFusedMultiplyAddFloat64x4
+ OpMaskedFusedMultiplyAddSubFloat64x4
+ OpMaskedFusedMultiplySubAddFloat64x4
OpMaskedGreaterFloat64x4
OpMaskedGreaterEqualFloat64x4
OpMaskedIsNanFloat64x4
OpApproximateReciprocalOfSqrtFloat64x8
OpDivFloat64x8
OpEqualFloat64x8
- OpFusedMultiplyAdd132Float64x8
- OpFusedMultiplyAdd213Float64x8
- OpFusedMultiplyAdd231Float64x8
- OpFusedMultiplyAddSub132Float64x8
- OpFusedMultiplyAddSub213Float64x8
- OpFusedMultiplyAddSub231Float64x8
- OpFusedMultiplySub132Float64x8
- OpFusedMultiplySub213Float64x8
- OpFusedMultiplySub231Float64x8
- OpFusedMultiplySubAdd132Float64x8
- OpFusedMultiplySubAdd213Float64x8
- OpFusedMultiplySubAdd231Float64x8
- OpFusedNegativeMultiplyAdd132Float64x8
- OpFusedNegativeMultiplyAdd213Float64x8
- OpFusedNegativeMultiplyAdd231Float64x8
- OpFusedNegativeMultiplySub132Float64x8
- OpFusedNegativeMultiplySub213Float64x8
- OpFusedNegativeMultiplySub231Float64x8
+ OpFusedMultiplyAddFloat64x8
+ OpFusedMultiplyAddSubFloat64x8
+ OpFusedMultiplySubAddFloat64x8
OpGreaterFloat64x8
OpGreaterEqualFloat64x8
OpIsNanFloat64x8
OpMaskedApproximateReciprocalOfSqrtFloat64x8
OpMaskedDivFloat64x8
OpMaskedEqualFloat64x8
- OpMaskedFusedMultiplyAdd132Float64x8
- OpMaskedFusedMultiplyAdd213Float64x8
- OpMaskedFusedMultiplyAdd231Float64x8
- OpMaskedFusedMultiplyAddSub132Float64x8
- OpMaskedFusedMultiplyAddSub213Float64x8
- OpMaskedFusedMultiplyAddSub231Float64x8
- OpMaskedFusedMultiplySub132Float64x8
- OpMaskedFusedMultiplySub213Float64x8
- OpMaskedFusedMultiplySub231Float64x8
- OpMaskedFusedMultiplySubAdd132Float64x8
- OpMaskedFusedMultiplySubAdd213Float64x8
- OpMaskedFusedMultiplySubAdd231Float64x8
- OpMaskedFusedNegativeMultiplyAdd132Float64x8
- OpMaskedFusedNegativeMultiplyAdd213Float64x8
- OpMaskedFusedNegativeMultiplyAdd231Float64x8
- OpMaskedFusedNegativeMultiplySub132Float64x8
- OpMaskedFusedNegativeMultiplySub213Float64x8
- OpMaskedFusedNegativeMultiplySub231Float64x8
+ OpMaskedFusedMultiplyAddFloat64x8
+ OpMaskedFusedMultiplyAddSubFloat64x8
+ OpMaskedFusedMultiplySubAddFloat64x8
OpMaskedGreaterFloat64x8
OpMaskedGreaterEqualFloat64x8
OpMaskedIsNanFloat64x8
},
},
},
- {
- name: "VFMADD132PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMADD213PS512",
argLen: 3,
},
},
},
- {
- name: "VFMADD231PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB132PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMADDSUB213PS512",
argLen: 3,
},
},
},
- {
- name: "VFMADDSUB231PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB132PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB213PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB231PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD132PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMSUBADD213PS512",
argLen: 3,
},
},
},
- {
- name: "VFMSUBADD231PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD132PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD213PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD231PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB132PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB213PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB231PS512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VADDPSMasked512",
argLen: 3,
},
},
},
- {
- name: "VFMADD132PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMADD213PSMasked512",
argLen: 4,
},
},
},
- {
- name: "VFMADD231PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB132PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMADDSUB213PSMasked512",
argLen: 4,
},
},
},
- {
- name: "VFMADDSUB231PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB132PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB213PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB231PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD132PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMSUBADD213PSMasked512",
argLen: 4,
},
},
},
- {
- name: "VFMSUBADD231PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD132PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD213PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD231PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB132PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB213PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB231PSMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VMAXPSMasked512",
argLen: 3,
},
},
{
- name: "VFMADD132PS128",
+ name: "VFMADD213PS128",
argLen: 3,
resultInArg0: true,
- asm: x86.AVFMADD132PS,
+ asm: x86.AVFMADD213PS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADD213PS128",
+ name: "VFMADDSUB213PS128",
argLen: 3,
resultInArg0: true,
- asm: x86.AVFMADD213PS,
+ asm: x86.AVFMADDSUB213PS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADD231PS128",
+ name: "VFMSUBADD213PS128",
argLen: 3,
resultInArg0: true,
- asm: x86.AVFMADD231PS,
+ asm: x86.AVFMSUBADD213PS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB132PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PS,
+ name: "VADDPSMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVADDPS,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB213PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB213PS,
+ name: "VANDPSMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDPS,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB231PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PS,
+ name: "VANDNPSMasked128",
+ argLen: 3,
+ asm: x86.AVANDNPS,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB132PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB132PS,
+ name: "VRCP14PSMasked128",
+ argLen: 2,
+ asm: x86.AVRCP14PS,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB213PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB213PS,
+ name: "VRSQRT14PSMasked128",
+ argLen: 2,
+ asm: x86.AVRSQRT14PS,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB231PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB231PS,
+ name: "VDIVPSMasked128",
+ argLen: 3,
+ asm: x86.AVDIVPS,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD132PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD213PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD231PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD132PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD213PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD231PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB132PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB213PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB231PS128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VADDPSMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVADDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDPSMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPSMasked128",
- argLen: 3,
- asm: x86.AVANDNPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VRCP14PSMasked128",
- argLen: 2,
- asm: x86.AVRCP14PS,
- reg: regInfo{
- inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VRSQRT14PSMasked128",
- argLen: 2,
- asm: x86.AVRSQRT14PS,
- reg: regInfo{
- inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VDIVPSMasked128",
- argLen: 3,
- asm: x86.AVDIVPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADD132PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
- {
- name: "VFMADD231PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB132PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMADDSUB213PSMasked128",
argLen: 4,
},
},
},
- {
- name: "VFMADDSUB231PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB132PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB213PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB231PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD132PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMSUBADD213PSMasked128",
argLen: 4,
},
},
},
- {
- name: "VFMSUBADD231PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD132PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD213PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD231PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB132PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB213PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB231PSMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VMAXPSMasked128",
argLen: 3,
},
},
},
- {
- name: "VMULPS128",
- argLen: 2,
- commutative: true,
- asm: x86.AVMULPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VSCALEFPS128",
- argLen: 2,
- asm: x86.AVSCALEFPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VORPS128",
- argLen: 2,
- commutative: true,
- asm: x86.AVORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VHADDPS128",
- argLen: 2,
- asm: x86.AVHADDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VHSUBPS128",
- argLen: 2,
- asm: x86.AVHSUBPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VSQRTPS128",
- argLen: 1,
- asm: x86.AVSQRTPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VSUBPS128",
- argLen: 2,
- asm: x86.AVSUBPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VXORPS128",
- argLen: 2,
- commutative: true,
- asm: x86.AVXORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VADDPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVADDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VADDSUBPS256",
- argLen: 2,
- asm: x86.AVADDSUBPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPS256",
- argLen: 2,
- asm: x86.AVANDNPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VRCP14PS256",
- argLen: 1,
- asm: x86.AVRCP14PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VRSQRTPS256",
- argLen: 1,
- asm: x86.AVRSQRTPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VDIVPS256",
- argLen: 2,
- asm: x86.AVDIVPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADD132PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADD213PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADD213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADD231PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB132PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB213PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB231PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB132PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB213PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB231PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD132PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD213PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD231PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD132PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD213PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD231PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB132PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB213PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB231PS256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VADDPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVADDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPSMasked256",
- argLen: 3,
- asm: x86.AVANDNPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VRCP14PSMasked256",
- argLen: 2,
- asm: x86.AVRCP14PS,
- reg: regInfo{
- inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VRSQRT14PSMasked256",
- argLen: 2,
- asm: x86.AVRSQRT14PS,
- reg: regInfo{
- inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VDIVPSMasked256",
- argLen: 3,
- asm: x86.AVDIVPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADD132PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADD213PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADD231PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB132PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB213PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB231PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB132PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB213PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB231PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD132PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD213PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD231PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD132PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD213PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD231PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB132PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB132PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB213PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB213PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB231PSMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB231PS,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VMAXPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVMAXPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VMINPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVMINPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VMULPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVMULPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VSCALEFPSMasked256",
- argLen: 3,
- asm: x86.AVSCALEFPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VORPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VSQRTPSMasked256",
- argLen: 2,
- asm: x86.AVSQRTPS,
- reg: regInfo{
- inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VSUBPSMasked256",
- argLen: 3,
- asm: x86.AVSUBPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VXORPSMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVXORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VMAXPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVMAXPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VMINPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVMINPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VMULPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVMULPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VSCALEFPS256",
- argLen: 2,
- asm: x86.AVSCALEFPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VORPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VHADDPS256",
- argLen: 2,
- asm: x86.AVHADDPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VHSUBPS256",
- argLen: 2,
- asm: x86.AVHSUBPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VSQRTPS256",
- argLen: 1,
- asm: x86.AVSQRTPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VSUBPS256",
- argLen: 2,
- asm: x86.AVSUBPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VXORPS256",
- argLen: 2,
- commutative: true,
- asm: x86.AVXORPS,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VADDPD128",
- argLen: 2,
- commutative: true,
- asm: x86.AVADDPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VADDSUBPD128",
- argLen: 2,
- asm: x86.AVADDSUBPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDPD128",
- argLen: 2,
- commutative: true,
- asm: x86.AVANDPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VANDNPD128",
- argLen: 2,
- asm: x86.AVANDNPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VRCP14PD128",
- argLen: 1,
- asm: x86.AVRCP14PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VRSQRT14PD128",
- argLen: 1,
- asm: x86.AVRSQRT14PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VDIVPD128",
- argLen: 2,
- asm: x86.AVDIVPD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADD132PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADD132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADD213PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADD213PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADD231PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADD231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB132PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB213PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB213PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB231PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB132PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB213PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB213PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB231PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD132PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD213PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD213PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD231PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD132PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD213PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD213PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD231PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB132PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB132PD,
+ {
+ name: "VMULPS128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMULPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMSUB213PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB213PD,
+ name: "VSCALEFPS128",
+ argLen: 2,
+ asm: x86.AVSCALEFPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMSUB231PD128",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB231PD,
+ name: "VORPS128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVORPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDPDMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVADDPD,
+ name: "VHADDPS128",
+ argLen: 2,
+ asm: x86.AVHADDPS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPDMasked128",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPD,
+ name: "VHSUBPS128",
+ argLen: 2,
+ asm: x86.AVHSUBPS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPDMasked128",
- argLen: 3,
- asm: x86.AVANDNPD,
+ name: "VSQRTPS128",
+ argLen: 1,
+ asm: x86.AVSQRTPS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PDMasked128",
+ name: "VSUBPS128",
argLen: 2,
- asm: x86.AVRCP14PD,
+ asm: x86.AVSUBPS,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRSQRT14PDMasked128",
- argLen: 2,
- asm: x86.AVRSQRT14PD,
+ name: "VXORPS128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVXORPS,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VDIVPDMasked128",
- argLen: 3,
- asm: x86.AVDIVPD,
+ name: "VADDPS256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVADDPS,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADD132PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD132PD,
+ name: "VADDSUBPS256",
+ argLen: 2,
+ asm: x86.AVADDSUBPS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADD213PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD213PD,
+ name: "VANDPS256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVANDPS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADD231PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD231PD,
+ name: "VANDNPS256",
+ argLen: 2,
+ asm: x86.AVANDNPS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB132PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PD,
+ name: "VRCP14PS256",
+ argLen: 1,
+ asm: x86.AVRCP14PS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB213PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB213PD,
+ name: "VRSQRTPS256",
+ argLen: 1,
+ asm: x86.AVRSQRTPS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB231PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PD,
+ name: "VDIVPS256",
+ argLen: 2,
+ asm: x86.AVDIVPS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB132PDMasked128",
- argLen: 4,
+ name: "VFMADD213PS256",
+ argLen: 3,
resultInArg0: true,
- asm: x86.AVFMSUB132PD,
+ asm: x86.AVFMADD213PS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB213PDMasked128",
- argLen: 4,
+ name: "VFMADDSUB213PS256",
+ argLen: 3,
resultInArg0: true,
- asm: x86.AVFMSUB213PD,
+ asm: x86.AVFMADDSUB213PS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB231PDMasked128",
- argLen: 4,
+ name: "VFMSUBADD213PS256",
+ argLen: 3,
resultInArg0: true,
- asm: x86.AVFMSUB231PD,
+ asm: x86.AVFMSUBADD213PS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUBADD132PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PD,
+ name: "VADDPSMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVADDPS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUBADD213PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD213PD,
+ name: "VANDPSMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDPS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUBADD231PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PD,
+ name: "VANDNPSMasked256",
+ argLen: 3,
+ asm: x86.AVANDNPS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMADD132PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD132PD,
+ name: "VRCP14PSMasked256",
+ argLen: 2,
+ asm: x86.AVRCP14PS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMADD213PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD213PD,
+ name: "VRSQRT14PSMasked256",
+ argLen: 2,
+ asm: x86.AVRSQRT14PS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMADD231PDMasked128",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD231PD,
+ name: "VDIVPSMasked256",
+ argLen: 3,
+ asm: x86.AVDIVPS,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMSUB132PDMasked128",
+ name: "VFMADD213PSMasked256",
argLen: 4,
resultInArg0: true,
- asm: x86.AVFNMSUB132PD,
+ asm: x86.AVFMADD213PS,
reg: regInfo{
inputs: []inputInfo{
{3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VFNMSUB213PDMasked128",
+ name: "VFMADDSUB213PSMasked256",
argLen: 4,
resultInArg0: true,
- asm: x86.AVFNMSUB213PD,
+ asm: x86.AVFMADDSUB213PS,
reg: regInfo{
inputs: []inputInfo{
{3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VFNMSUB231PDMasked128",
+ name: "VFMSUBADD213PSMasked256",
argLen: 4,
resultInArg0: true,
- asm: x86.AVFNMSUB231PD,
+ asm: x86.AVFMSUBADD213PS,
reg: regInfo{
inputs: []inputInfo{
{3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VMAXPDMasked128",
+ name: "VMAXPSMasked256",
argLen: 3,
commutative: true,
- asm: x86.AVMAXPD,
+ asm: x86.AVMAXPS,
reg: regInfo{
inputs: []inputInfo{
{2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VMINPDMasked128",
+ name: "VMINPSMasked256",
argLen: 3,
commutative: true,
- asm: x86.AVMINPD,
+ asm: x86.AVMINPS,
reg: regInfo{
inputs: []inputInfo{
{2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VMULPDMasked128",
+ name: "VMULPSMasked256",
argLen: 3,
commutative: true,
- asm: x86.AVMULPD,
+ asm: x86.AVMULPS,
reg: regInfo{
inputs: []inputInfo{
{2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VSCALEFPDMasked128",
+ name: "VSCALEFPSMasked256",
argLen: 3,
- asm: x86.AVSCALEFPD,
+ asm: x86.AVSCALEFPS,
reg: regInfo{
inputs: []inputInfo{
{2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VORPDMasked128",
+ name: "VORPSMasked256",
argLen: 3,
commutative: true,
- asm: x86.AVORPD,
+ asm: x86.AVORPS,
reg: regInfo{
inputs: []inputInfo{
{2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VSQRTPDMasked128",
+ name: "VSQRTPSMasked256",
argLen: 2,
- asm: x86.AVSQRTPD,
+ asm: x86.AVSQRTPS,
reg: regInfo{
inputs: []inputInfo{
{1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VSUBPDMasked128",
+ name: "VSUBPSMasked256",
argLen: 3,
- asm: x86.AVSUBPD,
+ asm: x86.AVSUBPS,
reg: regInfo{
inputs: []inputInfo{
{2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VXORPDMasked128",
+ name: "VXORPSMasked256",
argLen: 3,
commutative: true,
- asm: x86.AVXORPD,
+ asm: x86.AVXORPS,
reg: regInfo{
inputs: []inputInfo{
{2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VMAXPD128",
+ name: "VMAXPS256",
argLen: 2,
commutative: true,
- asm: x86.AVMAXPD,
+ asm: x86.AVMAXPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMINPD128",
+ name: "VMINPS256",
argLen: 2,
commutative: true,
- asm: x86.AVMINPD,
+ asm: x86.AVMINPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VMULPD128",
+ name: "VMULPS256",
argLen: 2,
commutative: true,
- asm: x86.AVMULPD,
+ asm: x86.AVMULPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSCALEFPD128",
+ name: "VSCALEFPS256",
argLen: 2,
- asm: x86.AVSCALEFPD,
+ asm: x86.AVSCALEFPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VORPD128",
+ name: "VORPS256",
argLen: 2,
commutative: true,
- asm: x86.AVORPD,
+ asm: x86.AVORPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VHADDPD128",
+ name: "VHADDPS256",
argLen: 2,
- asm: x86.AVHADDPD,
+ asm: x86.AVHADDPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VHSUBPD128",
+ name: "VHSUBPS256",
argLen: 2,
- asm: x86.AVHSUBPD,
+ asm: x86.AVHSUBPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSQRTPD128",
+ name: "VSQRTPS256",
argLen: 1,
- asm: x86.AVSQRTPD,
+ asm: x86.AVSQRTPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VSUBPD128",
+ name: "VSUBPS256",
argLen: 2,
- asm: x86.AVSUBPD,
+ asm: x86.AVSUBPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VXORPD128",
+ name: "VXORPS256",
argLen: 2,
commutative: true,
- asm: x86.AVXORPD,
+ asm: x86.AVXORPS,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDPD256",
+ name: "VADDPD128",
argLen: 2,
commutative: true,
asm: x86.AVADDPD,
},
},
{
- name: "VADDSUBPD256",
+ name: "VADDSUBPD128",
argLen: 2,
asm: x86.AVADDSUBPD,
reg: regInfo{
},
},
{
- name: "VANDPD256",
+ name: "VANDPD128",
argLen: 2,
commutative: true,
asm: x86.AVANDPD,
},
},
{
- name: "VANDNPD256",
+ name: "VANDNPD128",
argLen: 2,
asm: x86.AVANDNPD,
reg: regInfo{
},
},
{
- name: "VRCP14PD256",
+ name: "VRCP14PD128",
argLen: 1,
asm: x86.AVRCP14PD,
reg: regInfo{
},
},
{
- name: "VRSQRT14PD256",
+ name: "VRSQRT14PD128",
argLen: 1,
asm: x86.AVRSQRT14PD,
reg: regInfo{
},
},
{
- name: "VDIVPD256",
+ name: "VDIVPD128",
argLen: 2,
asm: x86.AVDIVPD,
reg: regInfo{
},
},
{
- name: "VFMADD132PD256",
+ name: "VFMADD213PD128",
argLen: 3,
resultInArg0: true,
- asm: x86.AVFMADD132PD,
+ asm: x86.AVFMADD213PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADD213PD256",
+ name: "VFMADDSUB213PD128",
argLen: 3,
resultInArg0: true,
- asm: x86.AVFMADD213PD,
+ asm: x86.AVFMADDSUB213PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADD231PD256",
+ name: "VFMSUBADD213PD128",
argLen: 3,
resultInArg0: true,
- asm: x86.AVFMADD231PD,
+ asm: x86.AVFMSUBADD213PD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB132PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PD,
+ name: "VADDPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVADDPD,
reg: regInfo{
inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VANDPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB213PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB213PD,
+ name: "VANDNPDMasked128",
+ argLen: 3,
+ asm: x86.AVANDNPD,
reg: regInfo{
inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VRCP14PDMasked128",
+ argLen: 2,
+ asm: x86.AVRCP14PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB231PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PD,
+ name: "VRSQRT14PDMasked128",
+ argLen: 2,
+ asm: x86.AVRSQRT14PD,
reg: regInfo{
inputs: []inputInfo{
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VDIVPDMasked128",
+ argLen: 3,
+ asm: x86.AVDIVPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB132PD256",
- argLen: 3,
+ name: "VFMADD213PDMasked128",
+ argLen: 4,
resultInArg0: true,
- asm: x86.AVFMSUB132PD,
+ asm: x86.AVFMADD213PD,
reg: regInfo{
inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADDSUB213PDMasked128",
+ argLen: 4,
+ resultInArg0: true,
+ asm: x86.AVFMADDSUB213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB213PD256",
- argLen: 3,
+ name: "VFMSUBADD213PDMasked128",
+ argLen: 4,
resultInArg0: true,
- asm: x86.AVFMSUB213PD,
+ asm: x86.AVFMSUBADD213PD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMAXPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMAXPD,
reg: regInfo{
inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VMINPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMINPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB231PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB231PD,
+ name: "VMULPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVMULPD,
reg: regInfo{
inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VSCALEFPDMasked128",
+ argLen: 3,
+ asm: x86.AVSCALEFPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUBADD132PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PD,
+ name: "VORPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVORPD,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUBADD213PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD213PD,
+ name: "VSQRTPDMasked128",
+ argLen: 2,
+ asm: x86.AVSQRTPD,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUBADD231PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PD,
+ name: "VSUBPDMasked128",
+ argLen: 3,
+ asm: x86.AVSUBPD,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMADD132PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD132PD,
+ name: "VXORPDMasked128",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVXORPD,
reg: regInfo{
inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMADD213PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD213PD,
+ name: "VMAXPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMAXPD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMADD231PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD231PD,
+ name: "VMINPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMINPD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMSUB132PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB132PD,
+ name: "VMULPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVMULPD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMSUB213PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB213PD,
+ name: "VSCALEFPD128",
+ argLen: 2,
+ asm: x86.AVSCALEFPD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMSUB231PD256",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB231PD,
+ name: "VORPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVORPD,
reg: regInfo{
inputs: []inputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VADDPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVADDPD,
+ name: "VHADDPD128",
+ argLen: 2,
+ asm: x86.AVHADDPD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDPDMasked256",
- argLen: 3,
- commutative: true,
- asm: x86.AVANDPD,
+ name: "VHSUBPD128",
+ argLen: 2,
+ asm: x86.AVHSUBPD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VANDNPDMasked256",
- argLen: 3,
- asm: x86.AVANDNPD,
+ name: "VSQRTPD128",
+ argLen: 1,
+ asm: x86.AVSQRTPD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRCP14PDMasked256",
+ name: "VSUBPD128",
argLen: 2,
- asm: x86.AVRCP14PD,
+ asm: x86.AVSUBPD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VRSQRT14PDMasked256",
- argLen: 2,
- asm: x86.AVRSQRT14PD,
+ name: "VXORPD128",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVXORPD,
reg: regInfo{
inputs: []inputInfo{
- {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VDIVPDMasked256",
- argLen: 3,
- asm: x86.AVDIVPD,
+ name: "VADDPD256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVADDPD,
reg: regInfo{
inputs: []inputInfo{
- {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADD132PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD132PD,
+ name: "VADDSUBPD256",
+ argLen: 2,
+ asm: x86.AVADDSUBPD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADD213PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD213PD,
+ name: "VANDPD256",
+ argLen: 2,
+ commutative: true,
+ asm: x86.AVANDPD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADD231PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD231PD,
+ name: "VANDNPD256",
+ argLen: 2,
+ asm: x86.AVANDNPD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB132PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PD,
+ name: "VRCP14PD256",
+ argLen: 1,
+ asm: x86.AVRCP14PD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB213PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB213PD,
+ name: "VRSQRT14PD256",
+ argLen: 1,
+ asm: x86.AVRSQRT14PD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMADDSUB231PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PD,
+ name: "VDIVPD256",
+ argLen: 2,
+ asm: x86.AVDIVPD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB132PDMasked256",
- argLen: 4,
+ name: "VFMADD213PD256",
+ argLen: 3,
resultInArg0: true,
- asm: x86.AVFMSUB132PD,
+ asm: x86.AVFMADD213PD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB213PDMasked256",
- argLen: 4,
+ name: "VFMADDSUB213PD256",
+ argLen: 3,
resultInArg0: true,
- asm: x86.AVFMSUB213PD,
+ asm: x86.AVFMADDSUB213PD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUB231PDMasked256",
- argLen: 4,
+ name: "VFMSUBADD213PD256",
+ argLen: 3,
resultInArg0: true,
- asm: x86.AVFMSUB231PD,
+ asm: x86.AVFMSUBADD213PD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUBADD132PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PD,
+ name: "VADDPDMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVADDPD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUBADD213PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD213PD,
+ name: "VANDPDMasked256",
+ argLen: 3,
+ commutative: true,
+ asm: x86.AVANDPD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFMSUBADD231PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PD,
+ name: "VANDNPDMasked256",
+ argLen: 3,
+ asm: x86.AVANDNPD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMADD132PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD132PD,
+ name: "VRCP14PDMasked256",
+ argLen: 2,
+ asm: x86.AVRCP14PD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMADD213PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD213PD,
+ name: "VRSQRT14PDMasked256",
+ argLen: 2,
+ asm: x86.AVRSQRT14PD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMADD231PDMasked256",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD231PD,
+ name: "VDIVPDMasked256",
+ argLen: 3,
+ asm: x86.AVDIVPD,
reg: regInfo{
inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
+ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
outputs: []outputInfo{
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
{
- name: "VFNMSUB132PDMasked256",
+ name: "VFMADD213PDMasked256",
argLen: 4,
resultInArg0: true,
- asm: x86.AVFNMSUB132PD,
+ asm: x86.AVFMADD213PD,
reg: regInfo{
inputs: []inputInfo{
{3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VFNMSUB213PDMasked256",
+ name: "VFMADDSUB213PDMasked256",
argLen: 4,
resultInArg0: true,
- asm: x86.AVFNMSUB213PD,
+ asm: x86.AVFMADDSUB213PD,
reg: regInfo{
inputs: []inputInfo{
{3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
{
- name: "VFNMSUB231PDMasked256",
+ name: "VFMSUBADD213PDMasked256",
argLen: 4,
resultInArg0: true,
- asm: x86.AVFNMSUB231PD,
+ asm: x86.AVFMSUBADD213PD,
reg: regInfo{
inputs: []inputInfo{
{3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
},
},
},
- {
- name: "VFMADD132PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADD132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMADD213PD512",
argLen: 3,
},
},
},
- {
- name: "VFMADD231PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADD231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB132PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMADDSUB213PD512",
argLen: 3,
},
},
},
- {
- name: "VFMADDSUB231PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB132PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB213PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB213PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB231PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUB231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD132PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMSUBADD213PD512",
argLen: 3,
},
},
},
- {
- name: "VFMSUBADD231PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD132PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD213PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD213PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD231PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMADD231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB132PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB213PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB213PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB231PD512",
- argLen: 3,
- resultInArg0: true,
- asm: x86.AVFNMSUB231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VADDPDMasked512",
argLen: 3,
},
},
},
- {
- name: "VFMADD132PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMADD213PDMasked512",
argLen: 4,
},
},
},
- {
- name: "VFMADD231PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADD231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMADDSUB132PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMADDSUB213PDMasked512",
argLen: 4,
},
},
},
- {
- name: "VFMADDSUB231PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMADDSUB231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB132PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB213PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB213PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUB231PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUB231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFMSUBADD132PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VFMSUBADD213PDMasked512",
argLen: 4,
},
},
},
- {
- name: "VFMSUBADD231PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFMSUBADD231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD132PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD213PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD213PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMADD231PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMADD231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB132PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB132PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB213PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB213PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
- {
- name: "VFNMSUB231PDMasked512",
- argLen: 4,
- resultInArg0: true,
- asm: x86.AVFNMSUB231PD,
- reg: regInfo{
- inputs: []inputInfo{
- {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- outputs: []outputInfo{
- {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
- },
- },
- },
{
name: "VMAXPDMasked512",
argLen: 3,
generic: true,
},
{
- name: "FusedMultiplyAdd132Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAdd213Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAdd231Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub132Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub213Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub231Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub132Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub213Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub231Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd132Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd213Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd231Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd132Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd213Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd231Float32x16",
+ name: "FusedMultiplyAddFloat32x16",
argLen: 3,
generic: true,
},
{
- name: "FusedNegativeMultiplySub132Float32x16",
+ name: "FusedMultiplyAddSubFloat32x16",
argLen: 3,
generic: true,
},
{
- name: "FusedNegativeMultiplySub213Float32x16",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplySub231Float32x16",
+ name: "FusedMultiplySubAddFloat32x16",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "MaskedFusedMultiplyAdd132Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAdd213Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAdd231Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub132Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub213Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub231Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub132Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub213Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub231Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd132Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd213Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd231Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd132Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd213Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd231Float32x16",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplySub132Float32x16",
+ name: "MaskedFusedMultiplyAddFloat32x16",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedNegativeMultiplySub213Float32x16",
+ name: "MaskedFusedMultiplyAddSubFloat32x16",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedNegativeMultiplySub231Float32x16",
+ name: "MaskedFusedMultiplySubAddFloat32x16",
argLen: 4,
generic: true,
},
generic: true,
},
{
- name: "FusedMultiplyAdd132Float32x4",
+ name: "FusedMultiplyAddFloat32x4",
argLen: 3,
generic: true,
},
{
- name: "FusedMultiplyAdd213Float32x4",
+ name: "FusedMultiplyAddSubFloat32x4",
argLen: 3,
generic: true,
},
{
- name: "FusedMultiplyAdd231Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub132Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub213Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub231Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub132Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub213Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub231Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd132Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd213Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd231Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd132Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd213Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd231Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplySub132Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplySub213Float32x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplySub231Float32x4",
+ name: "FusedMultiplySubAddFloat32x4",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "MaskedFusedMultiplyAdd132Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAdd213Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAdd231Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub132Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub213Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub231Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub132Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub213Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub231Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd132Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd213Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd231Float32x4",
+ name: "MaskedFusedMultiplyAddFloat32x4",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedNegativeMultiplyAdd132Float32x4",
+ name: "MaskedFusedMultiplyAddSubFloat32x4",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedNegativeMultiplyAdd213Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd231Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplySub132Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplySub213Float32x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplySub231Float32x4",
+ name: "MaskedFusedMultiplySubAddFloat32x4",
argLen: 4,
generic: true,
},
generic: true,
},
{
- name: "FusedMultiplyAdd132Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAdd213Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAdd231Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub132Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub213Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub231Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub132Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub213Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub231Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd132Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd213Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd231Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd132Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd213Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd231Float32x8",
+ name: "FusedMultiplyAddFloat32x8",
argLen: 3,
generic: true,
},
{
- name: "FusedNegativeMultiplySub132Float32x8",
+ name: "FusedMultiplyAddSubFloat32x8",
argLen: 3,
generic: true,
},
{
- name: "FusedNegativeMultiplySub213Float32x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplySub231Float32x8",
+ name: "FusedMultiplySubAddFloat32x8",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "MaskedFusedMultiplyAdd132Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAdd213Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAdd231Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub132Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub213Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub231Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub132Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub213Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub231Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd132Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd213Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd231Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd132Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd213Float32x8",
+ name: "MaskedFusedMultiplyAddFloat32x8",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedNegativeMultiplyAdd231Float32x8",
+ name: "MaskedFusedMultiplyAddSubFloat32x8",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedNegativeMultiplySub132Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplySub213Float32x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplySub231Float32x8",
+ name: "MaskedFusedMultiplySubAddFloat32x8",
argLen: 4,
generic: true,
},
generic: true,
},
{
- name: "FusedMultiplyAdd132Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAdd213Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAdd231Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub132Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub213Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub231Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub132Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub213Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub231Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd132Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd213Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd231Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd132Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd213Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd231Float64x2",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplySub132Float64x2",
+ name: "FusedMultiplyAddFloat64x2",
argLen: 3,
generic: true,
},
{
- name: "FusedNegativeMultiplySub213Float64x2",
+ name: "FusedMultiplyAddSubFloat64x2",
argLen: 3,
generic: true,
},
{
- name: "FusedNegativeMultiplySub231Float64x2",
+ name: "FusedMultiplySubAddFloat64x2",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "MaskedFusedMultiplyAdd132Float64x2",
+ name: "MaskedFusedMultiplyAddFloat64x2",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedMultiplyAdd213Float64x2",
+ name: "MaskedFusedMultiplyAddSubFloat64x2",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedMultiplyAdd231Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub132Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub213Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub231Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub132Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub213Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub231Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd132Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd213Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd231Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd132Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd213Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd231Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplySub132Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplySub213Float64x2",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplySub231Float64x2",
+ name: "MaskedFusedMultiplySubAddFloat64x2",
argLen: 4,
generic: true,
},
generic: true,
},
{
- name: "FusedMultiplyAdd132Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAdd213Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAdd231Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub132Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub213Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub231Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub132Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub213Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub231Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd132Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd213Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd231Float64x4",
+ name: "FusedMultiplyAddFloat64x4",
argLen: 3,
generic: true,
},
{
- name: "FusedNegativeMultiplyAdd132Float64x4",
+ name: "FusedMultiplyAddSubFloat64x4",
argLen: 3,
generic: true,
},
{
- name: "FusedNegativeMultiplyAdd213Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd231Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplySub132Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplySub213Float64x4",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplySub231Float64x4",
+ name: "FusedMultiplySubAddFloat64x4",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "MaskedFusedMultiplyAdd132Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAdd213Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAdd231Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub132Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub213Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub231Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub132Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub213Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub231Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd132Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd213Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd231Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd132Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd213Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd231Float64x4",
+ name: "MaskedFusedMultiplyAddFloat64x4",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedNegativeMultiplySub132Float64x4",
+ name: "MaskedFusedMultiplyAddSubFloat64x4",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedNegativeMultiplySub213Float64x4",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplySub231Float64x4",
+ name: "MaskedFusedMultiplySubAddFloat64x4",
argLen: 4,
generic: true,
},
generic: true,
},
{
- name: "FusedMultiplyAdd132Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAdd213Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAdd231Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub132Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub213Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplyAddSub231Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub132Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub213Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySub231Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd132Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd213Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedMultiplySubAdd231Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd132Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplyAdd213Float64x8",
+ name: "FusedMultiplyAddFloat64x8",
argLen: 3,
generic: true,
},
{
- name: "FusedNegativeMultiplyAdd231Float64x8",
+ name: "FusedMultiplyAddSubFloat64x8",
argLen: 3,
generic: true,
},
{
- name: "FusedNegativeMultiplySub132Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplySub213Float64x8",
- argLen: 3,
- generic: true,
- },
- {
- name: "FusedNegativeMultiplySub231Float64x8",
+ name: "FusedMultiplySubAddFloat64x8",
argLen: 3,
generic: true,
},
generic: true,
},
{
- name: "MaskedFusedMultiplyAdd132Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAdd213Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAdd231Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub132Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub213Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplyAddSub231Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub132Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub213Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySub231Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd132Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd213Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedMultiplySubAdd231Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd132Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd213Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplyAdd231Float64x8",
- argLen: 4,
- generic: true,
- },
- {
- name: "MaskedFusedNegativeMultiplySub132Float64x8",
+ name: "MaskedFusedMultiplyAddFloat64x8",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedNegativeMultiplySub213Float64x8",
+ name: "MaskedFusedMultiplyAddSubFloat64x8",
argLen: 4,
generic: true,
},
{
- name: "MaskedFusedNegativeMultiplySub231Float64x8",
+ name: "MaskedFusedMultiplySubAddFloat64x8",
argLen: 4,
generic: true,
},
return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v)
case OpFloorWithPrecisionFloat64x8:
return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v)
- case OpFusedMultiplyAdd132Float32x16:
- v.Op = OpAMD64VFMADD132PS512
- return true
- case OpFusedMultiplyAdd132Float32x4:
- v.Op = OpAMD64VFMADD132PS128
- return true
- case OpFusedMultiplyAdd132Float32x8:
- v.Op = OpAMD64VFMADD132PS256
- return true
- case OpFusedMultiplyAdd132Float64x2:
- v.Op = OpAMD64VFMADD132PD128
- return true
- case OpFusedMultiplyAdd132Float64x4:
- v.Op = OpAMD64VFMADD132PD256
- return true
- case OpFusedMultiplyAdd132Float64x8:
- v.Op = OpAMD64VFMADD132PD512
- return true
- case OpFusedMultiplyAdd213Float32x16:
+ case OpFusedMultiplyAddFloat32x16:
v.Op = OpAMD64VFMADD213PS512
return true
- case OpFusedMultiplyAdd213Float32x4:
+ case OpFusedMultiplyAddFloat32x4:
v.Op = OpAMD64VFMADD213PS128
return true
- case OpFusedMultiplyAdd213Float32x8:
+ case OpFusedMultiplyAddFloat32x8:
v.Op = OpAMD64VFMADD213PS256
return true
- case OpFusedMultiplyAdd213Float64x2:
+ case OpFusedMultiplyAddFloat64x2:
v.Op = OpAMD64VFMADD213PD128
return true
- case OpFusedMultiplyAdd213Float64x4:
+ case OpFusedMultiplyAddFloat64x4:
v.Op = OpAMD64VFMADD213PD256
return true
- case OpFusedMultiplyAdd213Float64x8:
+ case OpFusedMultiplyAddFloat64x8:
v.Op = OpAMD64VFMADD213PD512
return true
- case OpFusedMultiplyAdd231Float32x16:
- v.Op = OpAMD64VFMADD231PS512
- return true
- case OpFusedMultiplyAdd231Float32x4:
- v.Op = OpAMD64VFMADD231PS128
- return true
- case OpFusedMultiplyAdd231Float32x8:
- v.Op = OpAMD64VFMADD231PS256
- return true
- case OpFusedMultiplyAdd231Float64x2:
- v.Op = OpAMD64VFMADD231PD128
- return true
- case OpFusedMultiplyAdd231Float64x4:
- v.Op = OpAMD64VFMADD231PD256
- return true
- case OpFusedMultiplyAdd231Float64x8:
- v.Op = OpAMD64VFMADD231PD512
- return true
- case OpFusedMultiplyAddSub132Float32x16:
- v.Op = OpAMD64VFMADDSUB132PS512
- return true
- case OpFusedMultiplyAddSub132Float32x4:
- v.Op = OpAMD64VFMADDSUB132PS128
- return true
- case OpFusedMultiplyAddSub132Float32x8:
- v.Op = OpAMD64VFMADDSUB132PS256
- return true
- case OpFusedMultiplyAddSub132Float64x2:
- v.Op = OpAMD64VFMADDSUB132PD128
- return true
- case OpFusedMultiplyAddSub132Float64x4:
- v.Op = OpAMD64VFMADDSUB132PD256
- return true
- case OpFusedMultiplyAddSub132Float64x8:
- v.Op = OpAMD64VFMADDSUB132PD512
- return true
- case OpFusedMultiplyAddSub213Float32x16:
+ case OpFusedMultiplyAddSubFloat32x16:
v.Op = OpAMD64VFMADDSUB213PS512
return true
- case OpFusedMultiplyAddSub213Float32x4:
+ case OpFusedMultiplyAddSubFloat32x4:
v.Op = OpAMD64VFMADDSUB213PS128
return true
- case OpFusedMultiplyAddSub213Float32x8:
+ case OpFusedMultiplyAddSubFloat32x8:
v.Op = OpAMD64VFMADDSUB213PS256
return true
- case OpFusedMultiplyAddSub213Float64x2:
+ case OpFusedMultiplyAddSubFloat64x2:
v.Op = OpAMD64VFMADDSUB213PD128
return true
- case OpFusedMultiplyAddSub213Float64x4:
+ case OpFusedMultiplyAddSubFloat64x4:
v.Op = OpAMD64VFMADDSUB213PD256
return true
- case OpFusedMultiplyAddSub213Float64x8:
+ case OpFusedMultiplyAddSubFloat64x8:
v.Op = OpAMD64VFMADDSUB213PD512
return true
- case OpFusedMultiplyAddSub231Float32x16:
- v.Op = OpAMD64VFMADDSUB231PS512
- return true
- case OpFusedMultiplyAddSub231Float32x4:
- v.Op = OpAMD64VFMADDSUB231PS128
- return true
- case OpFusedMultiplyAddSub231Float32x8:
- v.Op = OpAMD64VFMADDSUB231PS256
- return true
- case OpFusedMultiplyAddSub231Float64x2:
- v.Op = OpAMD64VFMADDSUB231PD128
- return true
- case OpFusedMultiplyAddSub231Float64x4:
- v.Op = OpAMD64VFMADDSUB231PD256
- return true
- case OpFusedMultiplyAddSub231Float64x8:
- v.Op = OpAMD64VFMADDSUB231PD512
- return true
- case OpFusedMultiplySub132Float32x16:
- v.Op = OpAMD64VFMSUB132PS512
- return true
- case OpFusedMultiplySub132Float32x4:
- v.Op = OpAMD64VFMSUB132PS128
- return true
- case OpFusedMultiplySub132Float32x8:
- v.Op = OpAMD64VFMSUB132PS256
- return true
- case OpFusedMultiplySub132Float64x2:
- v.Op = OpAMD64VFMSUB132PD128
- return true
- case OpFusedMultiplySub132Float64x4:
- v.Op = OpAMD64VFMSUB132PD256
- return true
- case OpFusedMultiplySub132Float64x8:
- v.Op = OpAMD64VFMSUB132PD512
- return true
- case OpFusedMultiplySub213Float32x16:
- v.Op = OpAMD64VFMSUB213PS512
- return true
- case OpFusedMultiplySub213Float32x4:
- v.Op = OpAMD64VFMSUB213PS128
- return true
- case OpFusedMultiplySub213Float32x8:
- v.Op = OpAMD64VFMSUB213PS256
- return true
- case OpFusedMultiplySub213Float64x2:
- v.Op = OpAMD64VFMSUB213PD128
- return true
- case OpFusedMultiplySub213Float64x4:
- v.Op = OpAMD64VFMSUB213PD256
- return true
- case OpFusedMultiplySub213Float64x8:
- v.Op = OpAMD64VFMSUB213PD512
- return true
- case OpFusedMultiplySub231Float32x16:
- v.Op = OpAMD64VFMSUB231PS512
- return true
- case OpFusedMultiplySub231Float32x4:
- v.Op = OpAMD64VFMSUB231PS128
- return true
- case OpFusedMultiplySub231Float32x8:
- v.Op = OpAMD64VFMSUB231PS256
- return true
- case OpFusedMultiplySub231Float64x2:
- v.Op = OpAMD64VFMSUB231PD128
- return true
- case OpFusedMultiplySub231Float64x4:
- v.Op = OpAMD64VFMSUB231PD256
- return true
- case OpFusedMultiplySub231Float64x8:
- v.Op = OpAMD64VFMSUB231PD512
- return true
- case OpFusedMultiplySubAdd132Float32x16:
- v.Op = OpAMD64VFMSUBADD132PS512
- return true
- case OpFusedMultiplySubAdd132Float32x4:
- v.Op = OpAMD64VFMSUBADD132PS128
- return true
- case OpFusedMultiplySubAdd132Float32x8:
- v.Op = OpAMD64VFMSUBADD132PS256
- return true
- case OpFusedMultiplySubAdd132Float64x2:
- v.Op = OpAMD64VFMSUBADD132PD128
- return true
- case OpFusedMultiplySubAdd132Float64x4:
- v.Op = OpAMD64VFMSUBADD132PD256
- return true
- case OpFusedMultiplySubAdd132Float64x8:
- v.Op = OpAMD64VFMSUBADD132PD512
- return true
- case OpFusedMultiplySubAdd213Float32x16:
+ case OpFusedMultiplySubAddFloat32x16:
v.Op = OpAMD64VFMSUBADD213PS512
return true
- case OpFusedMultiplySubAdd213Float32x4:
+ case OpFusedMultiplySubAddFloat32x4:
v.Op = OpAMD64VFMSUBADD213PS128
return true
- case OpFusedMultiplySubAdd213Float32x8:
+ case OpFusedMultiplySubAddFloat32x8:
v.Op = OpAMD64VFMSUBADD213PS256
return true
- case OpFusedMultiplySubAdd213Float64x2:
+ case OpFusedMultiplySubAddFloat64x2:
v.Op = OpAMD64VFMSUBADD213PD128
return true
- case OpFusedMultiplySubAdd213Float64x4:
+ case OpFusedMultiplySubAddFloat64x4:
v.Op = OpAMD64VFMSUBADD213PD256
return true
- case OpFusedMultiplySubAdd213Float64x8:
+ case OpFusedMultiplySubAddFloat64x8:
v.Op = OpAMD64VFMSUBADD213PD512
return true
- case OpFusedMultiplySubAdd231Float32x16:
- v.Op = OpAMD64VFMSUBADD231PS512
- return true
- case OpFusedMultiplySubAdd231Float32x4:
- v.Op = OpAMD64VFMSUBADD231PS128
- return true
- case OpFusedMultiplySubAdd231Float32x8:
- v.Op = OpAMD64VFMSUBADD231PS256
- return true
- case OpFusedMultiplySubAdd231Float64x2:
- v.Op = OpAMD64VFMSUBADD231PD128
- return true
- case OpFusedMultiplySubAdd231Float64x4:
- v.Op = OpAMD64VFMSUBADD231PD256
- return true
- case OpFusedMultiplySubAdd231Float64x8:
- v.Op = OpAMD64VFMSUBADD231PD512
- return true
- case OpFusedNegativeMultiplyAdd132Float32x16:
- v.Op = OpAMD64VFNMADD132PS512
- return true
- case OpFusedNegativeMultiplyAdd132Float32x4:
- v.Op = OpAMD64VFNMADD132PS128
- return true
- case OpFusedNegativeMultiplyAdd132Float32x8:
- v.Op = OpAMD64VFNMADD132PS256
- return true
- case OpFusedNegativeMultiplyAdd132Float64x2:
- v.Op = OpAMD64VFNMADD132PD128
- return true
- case OpFusedNegativeMultiplyAdd132Float64x4:
- v.Op = OpAMD64VFNMADD132PD256
- return true
- case OpFusedNegativeMultiplyAdd132Float64x8:
- v.Op = OpAMD64VFNMADD132PD512
- return true
- case OpFusedNegativeMultiplyAdd213Float32x16:
- v.Op = OpAMD64VFNMADD213PS512
- return true
- case OpFusedNegativeMultiplyAdd213Float32x4:
- v.Op = OpAMD64VFNMADD213PS128
- return true
- case OpFusedNegativeMultiplyAdd213Float32x8:
- v.Op = OpAMD64VFNMADD213PS256
- return true
- case OpFusedNegativeMultiplyAdd213Float64x2:
- v.Op = OpAMD64VFNMADD213PD128
- return true
- case OpFusedNegativeMultiplyAdd213Float64x4:
- v.Op = OpAMD64VFNMADD213PD256
- return true
- case OpFusedNegativeMultiplyAdd213Float64x8:
- v.Op = OpAMD64VFNMADD213PD512
- return true
- case OpFusedNegativeMultiplyAdd231Float32x16:
- v.Op = OpAMD64VFNMADD231PS512
- return true
- case OpFusedNegativeMultiplyAdd231Float32x4:
- v.Op = OpAMD64VFNMADD231PS128
- return true
- case OpFusedNegativeMultiplyAdd231Float32x8:
- v.Op = OpAMD64VFNMADD231PS256
- return true
- case OpFusedNegativeMultiplyAdd231Float64x2:
- v.Op = OpAMD64VFNMADD231PD128
- return true
- case OpFusedNegativeMultiplyAdd231Float64x4:
- v.Op = OpAMD64VFNMADD231PD256
- return true
- case OpFusedNegativeMultiplyAdd231Float64x8:
- v.Op = OpAMD64VFNMADD231PD512
- return true
- case OpFusedNegativeMultiplySub132Float32x16:
- v.Op = OpAMD64VFNMSUB132PS512
- return true
- case OpFusedNegativeMultiplySub132Float32x4:
- v.Op = OpAMD64VFNMSUB132PS128
- return true
- case OpFusedNegativeMultiplySub132Float32x8:
- v.Op = OpAMD64VFNMSUB132PS256
- return true
- case OpFusedNegativeMultiplySub132Float64x2:
- v.Op = OpAMD64VFNMSUB132PD128
- return true
- case OpFusedNegativeMultiplySub132Float64x4:
- v.Op = OpAMD64VFNMSUB132PD256
- return true
- case OpFusedNegativeMultiplySub132Float64x8:
- v.Op = OpAMD64VFNMSUB132PD512
- return true
- case OpFusedNegativeMultiplySub213Float32x16:
- v.Op = OpAMD64VFNMSUB213PS512
- return true
- case OpFusedNegativeMultiplySub213Float32x4:
- v.Op = OpAMD64VFNMSUB213PS128
- return true
- case OpFusedNegativeMultiplySub213Float32x8:
- v.Op = OpAMD64VFNMSUB213PS256
- return true
- case OpFusedNegativeMultiplySub213Float64x2:
- v.Op = OpAMD64VFNMSUB213PD128
- return true
- case OpFusedNegativeMultiplySub213Float64x4:
- v.Op = OpAMD64VFNMSUB213PD256
- return true
- case OpFusedNegativeMultiplySub213Float64x8:
- v.Op = OpAMD64VFNMSUB213PD512
- return true
- case OpFusedNegativeMultiplySub231Float32x16:
- v.Op = OpAMD64VFNMSUB231PS512
- return true
- case OpFusedNegativeMultiplySub231Float32x4:
- v.Op = OpAMD64VFNMSUB231PS128
- return true
- case OpFusedNegativeMultiplySub231Float32x8:
- v.Op = OpAMD64VFNMSUB231PS256
- return true
- case OpFusedNegativeMultiplySub231Float64x2:
- v.Op = OpAMD64VFNMSUB231PD128
- return true
- case OpFusedNegativeMultiplySub231Float64x4:
- v.Op = OpAMD64VFNMSUB231PD256
- return true
- case OpFusedNegativeMultiplySub231Float64x8:
- v.Op = OpAMD64VFNMSUB231PD512
- return true
case OpGetCallerPC:
v.Op = OpAMD64LoweredGetCallerPC
return true
return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v)
case OpMaskedFloorWithPrecisionFloat64x8:
return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v)
- case OpMaskedFusedMultiplyAdd132Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v)
- case OpMaskedFusedMultiplyAdd132Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v)
- case OpMaskedFusedMultiplyAdd132Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v)
- case OpMaskedFusedMultiplyAdd132Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v)
- case OpMaskedFusedMultiplyAdd132Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v)
- case OpMaskedFusedMultiplyAdd132Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v)
- case OpMaskedFusedMultiplyAdd213Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v)
- case OpMaskedFusedMultiplyAdd213Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v)
- case OpMaskedFusedMultiplyAdd213Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v)
- case OpMaskedFusedMultiplyAdd213Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v)
- case OpMaskedFusedMultiplyAdd213Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v)
- case OpMaskedFusedMultiplyAdd213Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v)
- case OpMaskedFusedMultiplyAdd231Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v)
- case OpMaskedFusedMultiplyAdd231Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v)
- case OpMaskedFusedMultiplyAdd231Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v)
- case OpMaskedFusedMultiplyAdd231Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v)
- case OpMaskedFusedMultiplyAdd231Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v)
- case OpMaskedFusedMultiplyAdd231Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v)
- case OpMaskedFusedMultiplyAddSub132Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v)
- case OpMaskedFusedMultiplyAddSub132Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v)
- case OpMaskedFusedMultiplyAddSub132Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v)
- case OpMaskedFusedMultiplyAddSub132Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v)
- case OpMaskedFusedMultiplyAddSub132Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v)
- case OpMaskedFusedMultiplyAddSub132Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v)
- case OpMaskedFusedMultiplyAddSub213Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v)
- case OpMaskedFusedMultiplyAddSub213Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v)
- case OpMaskedFusedMultiplyAddSub213Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v)
- case OpMaskedFusedMultiplyAddSub213Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v)
- case OpMaskedFusedMultiplyAddSub213Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v)
- case OpMaskedFusedMultiplyAddSub213Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v)
- case OpMaskedFusedMultiplyAddSub231Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v)
- case OpMaskedFusedMultiplyAddSub231Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v)
- case OpMaskedFusedMultiplyAddSub231Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v)
- case OpMaskedFusedMultiplyAddSub231Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v)
- case OpMaskedFusedMultiplyAddSub231Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v)
- case OpMaskedFusedMultiplyAddSub231Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v)
- case OpMaskedFusedMultiplySub132Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v)
- case OpMaskedFusedMultiplySub132Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v)
- case OpMaskedFusedMultiplySub132Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v)
- case OpMaskedFusedMultiplySub132Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v)
- case OpMaskedFusedMultiplySub132Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v)
- case OpMaskedFusedMultiplySub132Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v)
- case OpMaskedFusedMultiplySub213Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v)
- case OpMaskedFusedMultiplySub213Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v)
- case OpMaskedFusedMultiplySub213Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v)
- case OpMaskedFusedMultiplySub213Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v)
- case OpMaskedFusedMultiplySub213Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v)
- case OpMaskedFusedMultiplySub213Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v)
- case OpMaskedFusedMultiplySub231Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v)
- case OpMaskedFusedMultiplySub231Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v)
- case OpMaskedFusedMultiplySub231Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v)
- case OpMaskedFusedMultiplySub231Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v)
- case OpMaskedFusedMultiplySub231Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v)
- case OpMaskedFusedMultiplySub231Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v)
- case OpMaskedFusedMultiplySubAdd132Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v)
- case OpMaskedFusedMultiplySubAdd132Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v)
- case OpMaskedFusedMultiplySubAdd132Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v)
- case OpMaskedFusedMultiplySubAdd132Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v)
- case OpMaskedFusedMultiplySubAdd132Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v)
- case OpMaskedFusedMultiplySubAdd132Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v)
- case OpMaskedFusedMultiplySubAdd213Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v)
- case OpMaskedFusedMultiplySubAdd213Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v)
- case OpMaskedFusedMultiplySubAdd213Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v)
- case OpMaskedFusedMultiplySubAdd213Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v)
- case OpMaskedFusedMultiplySubAdd213Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v)
- case OpMaskedFusedMultiplySubAdd213Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v)
- case OpMaskedFusedMultiplySubAdd231Float32x16:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v)
- case OpMaskedFusedMultiplySubAdd231Float32x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v)
- case OpMaskedFusedMultiplySubAdd231Float32x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v)
- case OpMaskedFusedMultiplySubAdd231Float64x2:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v)
- case OpMaskedFusedMultiplySubAdd231Float64x4:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v)
- case OpMaskedFusedMultiplySubAdd231Float64x8:
- return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v)
- case OpMaskedFusedNegativeMultiplyAdd132Float32x16:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v)
- case OpMaskedFusedNegativeMultiplyAdd132Float32x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v)
- case OpMaskedFusedNegativeMultiplyAdd132Float32x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v)
- case OpMaskedFusedNegativeMultiplyAdd132Float64x2:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v)
- case OpMaskedFusedNegativeMultiplyAdd132Float64x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v)
- case OpMaskedFusedNegativeMultiplyAdd132Float64x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v)
- case OpMaskedFusedNegativeMultiplyAdd213Float32x16:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v)
- case OpMaskedFusedNegativeMultiplyAdd213Float32x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v)
- case OpMaskedFusedNegativeMultiplyAdd213Float32x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v)
- case OpMaskedFusedNegativeMultiplyAdd213Float64x2:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v)
- case OpMaskedFusedNegativeMultiplyAdd213Float64x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v)
- case OpMaskedFusedNegativeMultiplyAdd213Float64x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v)
- case OpMaskedFusedNegativeMultiplyAdd231Float32x16:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v)
- case OpMaskedFusedNegativeMultiplyAdd231Float32x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v)
- case OpMaskedFusedNegativeMultiplyAdd231Float32x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v)
- case OpMaskedFusedNegativeMultiplyAdd231Float64x2:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v)
- case OpMaskedFusedNegativeMultiplyAdd231Float64x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v)
- case OpMaskedFusedNegativeMultiplyAdd231Float64x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v)
- case OpMaskedFusedNegativeMultiplySub132Float32x16:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v)
- case OpMaskedFusedNegativeMultiplySub132Float32x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v)
- case OpMaskedFusedNegativeMultiplySub132Float32x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v)
- case OpMaskedFusedNegativeMultiplySub132Float64x2:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v)
- case OpMaskedFusedNegativeMultiplySub132Float64x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v)
- case OpMaskedFusedNegativeMultiplySub132Float64x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v)
- case OpMaskedFusedNegativeMultiplySub213Float32x16:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v)
- case OpMaskedFusedNegativeMultiplySub213Float32x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v)
- case OpMaskedFusedNegativeMultiplySub213Float32x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v)
- case OpMaskedFusedNegativeMultiplySub213Float64x2:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v)
- case OpMaskedFusedNegativeMultiplySub213Float64x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v)
- case OpMaskedFusedNegativeMultiplySub213Float64x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v)
- case OpMaskedFusedNegativeMultiplySub231Float32x16:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v)
- case OpMaskedFusedNegativeMultiplySub231Float32x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v)
- case OpMaskedFusedNegativeMultiplySub231Float32x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v)
- case OpMaskedFusedNegativeMultiplySub231Float64x2:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v)
- case OpMaskedFusedNegativeMultiplySub231Float64x4:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v)
- case OpMaskedFusedNegativeMultiplySub231Float64x8:
- return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v)
+ case OpMaskedFusedMultiplyAddFloat32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x16(v)
+ case OpMaskedFusedMultiplyAddFloat32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x4(v)
+ case OpMaskedFusedMultiplyAddFloat32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x8(v)
+ case OpMaskedFusedMultiplyAddFloat64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x2(v)
+ case OpMaskedFusedMultiplyAddFloat64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x4(v)
+ case OpMaskedFusedMultiplyAddFloat64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x8(v)
+ case OpMaskedFusedMultiplyAddSubFloat32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x16(v)
+ case OpMaskedFusedMultiplyAddSubFloat32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x4(v)
+ case OpMaskedFusedMultiplyAddSubFloat32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x8(v)
+ case OpMaskedFusedMultiplyAddSubFloat64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x2(v)
+ case OpMaskedFusedMultiplyAddSubFloat64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x4(v)
+ case OpMaskedFusedMultiplyAddSubFloat64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x8(v)
+ case OpMaskedFusedMultiplySubAddFloat32x16:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x16(v)
+ case OpMaskedFusedMultiplySubAddFloat32x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x4(v)
+ case OpMaskedFusedMultiplySubAddFloat32x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x8(v)
+ case OpMaskedFusedMultiplySubAddFloat64x2:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x2(v)
+ case OpMaskedFusedMultiplySubAddFloat64x4:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v)
+ case OpMaskedFusedMultiplySubAddFloat64x8:
+ return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v)
case OpMaskedGreaterEqualFloat32x16:
return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v)
case OpMaskedGreaterEqualFloat32x4:
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x16(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAdd132Float32x16 x y z mask)
- // result: (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD132PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd132Float32x4 x y z mask)
- // result: (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD132PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd132Float32x8 x y z mask)
- // result: (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD132PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd132Float64x2 x y z mask)
- // result: (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD132PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd132Float64x4 x y z mask)
- // result: (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD132PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd132Float64x8 x y z mask)
- // result: (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD132PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd213Float32x16 x y z mask)
+ // match: (MaskedFusedMultiplyAddFloat32x16 x y z mask)
// result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x4(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAdd213Float32x4 x y z mask)
+ // match: (MaskedFusedMultiplyAddFloat32x4 x y z mask)
// result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x8(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAdd213Float32x8 x y z mask)
+ // match: (MaskedFusedMultiplyAddFloat32x8 x y z mask)
// result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x2(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAdd213Float64x2 x y z mask)
+ // match: (MaskedFusedMultiplyAddFloat64x2 x y z mask)
// result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x4(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAdd213Float64x4 x y z mask)
+ // match: (MaskedFusedMultiplyAddFloat64x4 x y z mask)
// result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x8(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAdd213Float64x8 x y z mask)
+ // match: (MaskedFusedMultiplyAddFloat64x8 x y z mask)
// result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd231Float32x16 x y z mask)
- // result: (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD231PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd231Float32x4 x y z mask)
- // result: (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD231PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd231Float32x8 x y z mask)
- // result: (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD231PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd231Float64x2 x y z mask)
- // result: (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD231PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd231Float64x4 x y z mask)
- // result: (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD231PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAdd231Float64x8 x y z mask)
- // result: (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADD231PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAddSub132Float32x16 x y z mask)
- // result: (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB132PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAddSub132Float32x4 x y z mask)
- // result: (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB132PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAddSub132Float32x8 x y z mask)
- // result: (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB132PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAddSub132Float64x2 x y z mask)
- // result: (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB132PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAddSub132Float64x4 x y z mask)
- // result: (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB132PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAddSub132Float64x8 x y z mask)
- // result: (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB132PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x16(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAddSub213Float32x16 x y z mask)
+ // match: (MaskedFusedMultiplyAddSubFloat32x16 x y z mask)
// result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x4(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAddSub213Float32x4 x y z mask)
+ // match: (MaskedFusedMultiplyAddSubFloat32x4 x y z mask)
// result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x8(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAddSub213Float32x8 x y z mask)
+ // match: (MaskedFusedMultiplyAddSubFloat32x8 x y z mask)
// result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x2(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAddSub213Float64x2 x y z mask)
+ // match: (MaskedFusedMultiplyAddSubFloat64x2 x y z mask)
// result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x4(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAddSub213Float64x4 x y z mask)
+ // match: (MaskedFusedMultiplyAddSubFloat64x4 x y z mask)
// result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x8(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAddSub213Float64x8 x y z mask)
+ // match: (MaskedFusedMultiplyAddSubFloat64x8 x y z mask)
// result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x16(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplyAddSub231Float32x16 x y z mask)
- // result: (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB231PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAddSub231Float32x4 x y z mask)
- // result: (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB231PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAddSub231Float32x8 x y z mask)
- // result: (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB231PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAddSub231Float64x2 x y z mask)
- // result: (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB231PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAddSub231Float64x4 x y z mask)
- // result: (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB231PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplyAddSub231Float64x8 x y z mask)
- // result: (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMADDSUB231PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub132Float32x16 x y z mask)
- // result: (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB132PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub132Float32x4 x y z mask)
- // result: (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB132PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub132Float32x8 x y z mask)
- // result: (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB132PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub132Float64x2 x y z mask)
- // result: (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB132PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub132Float64x4 x y z mask)
- // result: (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB132PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub132Float64x8 x y z mask)
- // result: (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB132PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub213Float32x16 x y z mask)
- // result: (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB213PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub213Float32x4 x y z mask)
- // result: (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB213PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub213Float32x8 x y z mask)
- // result: (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB213PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub213Float64x2 x y z mask)
- // result: (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB213PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub213Float64x4 x y z mask)
- // result: (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB213PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub213Float64x8 x y z mask)
- // result: (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB213PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub231Float32x16 x y z mask)
- // result: (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB231PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub231Float32x4 x y z mask)
- // result: (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB231PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub231Float32x8 x y z mask)
- // result: (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB231PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub231Float64x2 x y z mask)
- // result: (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB231PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub231Float64x4 x y z mask)
- // result: (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB231PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySub231Float64x8 x y z mask)
- // result: (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUB231PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd132Float32x16 x y z mask)
- // result: (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD132PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd132Float32x4 x y z mask)
- // result: (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD132PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd132Float32x8 x y z mask)
- // result: (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD132PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd132Float64x2 x y z mask)
- // result: (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD132PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd132Float64x4 x y z mask)
- // result: (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD132PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd132Float64x8 x y z mask)
- // result: (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD132PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd213Float32x16 x y z mask)
+ // match: (MaskedFusedMultiplySubAddFloat32x16 x y z mask)
// result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x4(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplySubAdd213Float32x4 x y z mask)
+ // match: (MaskedFusedMultiplySubAddFloat32x4 x y z mask)
// result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x8(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplySubAdd213Float32x8 x y z mask)
+ // match: (MaskedFusedMultiplySubAddFloat32x8 x y z mask)
// result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x2(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplySubAdd213Float64x2 x y z mask)
+ // match: (MaskedFusedMultiplySubAddFloat64x2 x y z mask)
// result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplySubAdd213Float64x4 x y z mask)
+ // match: (MaskedFusedMultiplySubAddFloat64x4 x y z mask)
// result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v *Value) bool {
+func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MaskedFusedMultiplySubAdd213Float64x8 x y z mask)
+ // match: (MaskedFusedMultiplySubAddFloat64x8 x y z mask)
// result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
x := v_0
return true
}
}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd231Float32x16 x y z mask)
- // result: (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD231PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd231Float32x4 x y z mask)
- // result: (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD231PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd231Float32x8 x y z mask)
- // result: (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD231PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd231Float64x2 x y z mask)
- // result: (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD231PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd231Float64x4 x y z mask)
- // result: (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD231PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedMultiplySubAdd231Float64x8 x y z mask)
- // result: (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFMSUBADD231PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask)
- // result: (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD132PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask)
- // result: (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD132PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask)
- // result: (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD132PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask)
- // result: (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD132PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask)
- // result: (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD132PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask)
- // result: (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD132PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask)
- // result: (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD213PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask)
- // result: (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD213PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask)
- // result: (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD213PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask)
- // result: (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD213PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask)
- // result: (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD213PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask)
- // result: (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD213PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask)
- // result: (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD231PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask)
- // result: (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD231PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask)
- // result: (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD231PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask)
- // result: (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD231PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask)
- // result: (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD231PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask)
- // result: (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMADD231PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub132Float32x16 x y z mask)
- // result: (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB132PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub132Float32x4 x y z mask)
- // result: (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB132PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub132Float32x8 x y z mask)
- // result: (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB132PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub132Float64x2 x y z mask)
- // result: (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB132PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub132Float64x4 x y z mask)
- // result: (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB132PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub132Float64x8 x y z mask)
- // result: (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB132PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub213Float32x16 x y z mask)
- // result: (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB213PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub213Float32x4 x y z mask)
- // result: (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB213PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub213Float32x8 x y z mask)
- // result: (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB213PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub213Float64x2 x y z mask)
- // result: (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB213PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub213Float64x4 x y z mask)
- // result: (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB213PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub213Float64x8 x y z mask)
- // result: (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB213PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub231Float32x16 x y z mask)
- // result: (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB231PSMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub231Float32x4 x y z mask)
- // result: (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB231PSMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub231Float32x8 x y z mask)
- // result: (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB231PSMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub231Float64x2 x y z mask)
- // result: (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB231PDMasked128)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub231Float64x4 x y z mask)
- // result: (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB231PDMasked256)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFusedNegativeMultiplySub231Float64x8 x y z mask)
- // result: (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- x := v_0
- y := v_1
- z := v_2
- mask := v_3
- v.reset(OpAMD64VFNMSUB231PDMasked512)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg4(x, y, z, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x16.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64)
+ addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64)
+ addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64)
+ addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64)
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8
-/* FusedMultiplyAdd132 */
+/* FusedMultiplyAdd */
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplyAdd132(y Float32x4, z Float32x4) Float32x4
-
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplyAdd132(y Float32x8, z Float32x8) Float32x8
-
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplyAdd132(y Float32x16, z Float32x16) Float32x16
-
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplyAdd132(y Float64x2, z Float64x2) Float64x2
-
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplyAdd132(y Float64x4, z Float64x4) Float64x4
-
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplyAdd132(y Float64x8, z Float64x8) Float64x8
-
-/* FusedMultiplyAdd213 */
-
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplyAdd213(y Float32x4, z Float32x4) Float32x4
+func (x Float32x4) FusedMultiplyAdd(y Float32x4, z Float32x4) Float32x4
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplyAdd213(y Float32x8, z Float32x8) Float32x8
+func (x Float32x8) FusedMultiplyAdd(y Float32x8, z Float32x8) Float32x8
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplyAdd213(y Float32x16, z Float32x16) Float32x16
+func (x Float32x16) FusedMultiplyAdd(y Float32x16, z Float32x16) Float32x16
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplyAdd213(y Float64x2, z Float64x2) Float64x2
+func (x Float64x2) FusedMultiplyAdd(y Float64x2, z Float64x2) Float64x2
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplyAdd213(y Float64x4, z Float64x4) Float64x4
+func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplyAdd213(y Float64x8, z Float64x8) Float64x8
+func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8
-/* FusedMultiplyAdd231 */
+/* FusedMultiplyAddSub */
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplyAdd231(y Float32x4, z Float32x4) Float32x4
-
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplyAdd231(y Float32x8, z Float32x8) Float32x8
-
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplyAdd231(y Float32x16, z Float32x16) Float32x16
-
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplyAdd231(y Float64x2, z Float64x2) Float64x2
-
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplyAdd231(y Float64x4, z Float64x4) Float64x4
-
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplyAdd231(y Float64x8, z Float64x8) Float64x8
-
-/* FusedMultiplyAddSub132 */
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplyAddSub132(y Float32x4, z Float32x4) Float32x4
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplyAddSub132(y Float32x8, z Float32x8) Float32x8
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplyAddSub132(y Float32x16, z Float32x16) Float32x16
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplyAddSub132(y Float64x2, z Float64x2) Float64x2
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplyAddSub132(y Float64x4, z Float64x4) Float64x4
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplyAddSub132(y Float64x8, z Float64x8) Float64x8
-
-/* FusedMultiplyAddSub213 */
-
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplyAddSub213(y Float32x4, z Float32x4) Float32x4
+func (x Float32x4) FusedMultiplyAddSub(y Float32x4, z Float32x4) Float32x4
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplyAddSub213(y Float32x8, z Float32x8) Float32x8
+func (x Float32x8) FusedMultiplyAddSub(y Float32x8, z Float32x8) Float32x8
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplyAddSub213(y Float32x16, z Float32x16) Float32x16
+func (x Float32x16) FusedMultiplyAddSub(y Float32x16, z Float32x16) Float32x16
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplyAddSub213(y Float64x2, z Float64x2) Float64x2
+func (x Float64x2) FusedMultiplyAddSub(y Float64x2, z Float64x2) Float64x2
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplyAddSub213(y Float64x4, z Float64x4) Float64x4
+func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplyAddSub213(y Float64x8, z Float64x8) Float64x8
-
-/* FusedMultiplyAddSub231 */
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplyAddSub231(y Float32x4, z Float32x4) Float32x4
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplyAddSub231(y Float32x8, z Float32x8) Float32x8
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplyAddSub231(y Float32x16, z Float32x16) Float32x16
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplyAddSub231(y Float64x2, z Float64x2) Float64x2
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplyAddSub231(y Float64x4, z Float64x4) Float64x4
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplyAddSub231(y Float64x8, z Float64x8) Float64x8
-
-/* FusedMultiplySub132 */
+func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplySub132(y Float32x4, z Float32x4) Float32x4
-
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplySub132(y Float32x8, z Float32x8) Float32x8
-
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplySub132(y Float32x16, z Float32x16) Float32x16
-
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplySub132(y Float64x2, z Float64x2) Float64x2
-
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplySub132(y Float64x4, z Float64x4) Float64x4
+/* FusedMultiplySubAdd */
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplySub132(y Float64x8, z Float64x8) Float64x8
-
-/* FusedMultiplySub213 */
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplySub213(y Float32x4, z Float32x4) Float32x4
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplySub213(y Float32x8, z Float32x8) Float32x8
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplySub213(y Float32x16, z Float32x16) Float32x16
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplySub213(y Float64x2, z Float64x2) Float64x2
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplySub213(y Float64x4, z Float64x4) Float64x4
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplySub213(y Float64x8, z Float64x8) Float64x8
-
-/* FusedMultiplySub231 */
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplySub231(y Float32x4, z Float32x4) Float32x4
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplySub231(y Float32x8, z Float32x8) Float32x8
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplySub231(y Float32x16, z Float32x16) Float32x16
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplySub231(y Float64x2, z Float64x2) Float64x2
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplySub231(y Float64x4, z Float64x4) Float64x4
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplySub231(y Float64x8, z Float64x8) Float64x8
-
-/* FusedMultiplySubAdd132 */
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplySubAdd132(y Float32x4, z Float32x4) Float32x4
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplySubAdd132(y Float32x8, z Float32x8) Float32x8
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplySubAdd132(y Float32x16, z Float32x16) Float32x16
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplySubAdd132(y Float64x2, z Float64x2) Float64x2
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplySubAdd132(y Float64x4, z Float64x4) Float64x4
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplySubAdd132(y Float64x8, z Float64x8) Float64x8
-
-/* FusedMultiplySubAdd213 */
-
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplySubAdd213(y Float32x4, z Float32x4) Float32x4
+func (x Float32x4) FusedMultiplySubAdd(y Float32x4, z Float32x4) Float32x4
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplySubAdd213(y Float32x8, z Float32x8) Float32x8
+func (x Float32x8) FusedMultiplySubAdd(y Float32x8, z Float32x8) Float32x8
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplySubAdd213(y Float32x16, z Float32x16) Float32x16
+func (x Float32x16) FusedMultiplySubAdd(y Float32x16, z Float32x16) Float32x16
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplySubAdd213(y Float64x2, z Float64x2) Float64x2
+func (x Float64x2) FusedMultiplySubAdd(y Float64x2, z Float64x2) Float64x2
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplySubAdd213(y Float64x4, z Float64x4) Float64x4
+func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplySubAdd213(y Float64x8, z Float64x8) Float64x8
-
-/* FusedMultiplySubAdd231 */
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedMultiplySubAdd231(y Float32x4, z Float32x4) Float32x4
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedMultiplySubAdd231(y Float32x8, z Float32x8) Float32x8
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedMultiplySubAdd231(y Float32x16, z Float32x16) Float32x16
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedMultiplySubAdd231(y Float64x2, z Float64x2) Float64x2
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedMultiplySubAdd231(y Float64x4, z Float64x4) Float64x4
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedMultiplySubAdd231(y Float64x8, z Float64x8) Float64x8
-
-/* FusedNegativeMultiplyAdd132 */
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedNegativeMultiplyAdd132(y Float32x4, z Float32x4) Float32x4
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedNegativeMultiplyAdd132(y Float32x8, z Float32x8) Float32x8
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedNegativeMultiplyAdd132(y Float32x16, z Float32x16) Float32x16
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedNegativeMultiplyAdd132(y Float64x2, z Float64x2) Float64x2
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedNegativeMultiplyAdd132(y Float64x4, z Float64x4) Float64x4
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedNegativeMultiplyAdd132(y Float64x8, z Float64x8) Float64x8
-
-/* FusedNegativeMultiplyAdd213 */
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedNegativeMultiplyAdd213(y Float32x4, z Float32x4) Float32x4
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedNegativeMultiplyAdd213(y Float32x8, z Float32x8) Float32x8
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedNegativeMultiplyAdd213(y Float32x16, z Float32x16) Float32x16
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedNegativeMultiplyAdd213(y Float64x2, z Float64x2) Float64x2
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedNegativeMultiplyAdd213(y Float64x4, z Float64x4) Float64x4
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedNegativeMultiplyAdd213(y Float64x8, z Float64x8) Float64x8
-
-/* FusedNegativeMultiplyAdd231 */
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedNegativeMultiplyAdd231(y Float32x4, z Float32x4) Float32x4
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedNegativeMultiplyAdd231(y Float32x8, z Float32x8) Float32x8
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedNegativeMultiplyAdd231(y Float32x16, z Float32x16) Float32x16
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedNegativeMultiplyAdd231(y Float64x2, z Float64x2) Float64x2
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedNegativeMultiplyAdd231(y Float64x4, z Float64x4) Float64x4
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedNegativeMultiplyAdd231(y Float64x8, z Float64x8) Float64x8
-
-/* FusedNegativeMultiplySub132 */
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedNegativeMultiplySub132(y Float32x4, z Float32x4) Float32x4
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedNegativeMultiplySub132(y Float32x8, z Float32x8) Float32x8
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedNegativeMultiplySub132(y Float32x16, z Float32x16) Float32x16
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedNegativeMultiplySub132(y Float64x2, z Float64x2) Float64x2
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedNegativeMultiplySub132(y Float64x4, z Float64x4) Float64x4
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedNegativeMultiplySub132(y Float64x8, z Float64x8) Float64x8
-
-/* FusedNegativeMultiplySub213 */
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedNegativeMultiplySub213(y Float32x4, z Float32x4) Float32x4
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedNegativeMultiplySub213(y Float32x8, z Float32x8) Float32x8
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedNegativeMultiplySub213(y Float32x16, z Float32x16) Float32x16
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedNegativeMultiplySub213(y Float64x2, z Float64x2) Float64x2
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedNegativeMultiplySub213(y Float64x4, z Float64x4) Float64x4
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedNegativeMultiplySub213(y Float64x8, z Float64x8) Float64x8
-
-/* FusedNegativeMultiplySub231 */
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) FusedNegativeMultiplySub231(y Float32x4, z Float32x4) Float32x4
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) FusedNegativeMultiplySub231(y Float32x8, z Float32x8) Float32x8
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) FusedNegativeMultiplySub231(y Float32x16, z Float32x16) Float32x16
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) FusedNegativeMultiplySub231(y Float64x2, z Float64x2) Float64x2
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) FusedNegativeMultiplySub231(y Float64x4, z Float64x4) Float64x4
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) FusedNegativeMultiplySub231(y Float64x8, z Float64x8) Float64x8
+func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8
/* Greater */
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8
-/* MaskedFusedMultiplyAdd132 */
+/* MaskedFusedMultiplyAdd */
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedMultiplyAdd132 performs `(v1 * v3) + v2`.
-//
-// Asm: VFMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedMultiplyAdd213 */
-
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+func (x Float32x4) MaskedFusedMultiplyAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+func (x Float32x8) MaskedFusedMultiplyAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+func (x Float32x16) MaskedFusedMultiplyAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+func (x Float64x2) MaskedFusedMultiplyAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+func (x Float64x4) MaskedFusedMultiplyAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-// FusedMultiplyAdd213 performs `(v2 * v1) + v3`.
+// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+func (x Float64x8) MaskedFusedMultiplyAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-/* MaskedFusedMultiplyAdd231 */
+/* MaskedFusedMultiplyAddSub */
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedMultiplyAdd231 performs `(v2 * v3) + v1`.
-//
-// Asm: VFMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedMultiplyAddSub132 */
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplyAddSub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplyAddSub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplyAddSub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplyAddSub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplyAddSub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements.
-//
-// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplyAddSub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedMultiplyAddSub213 */
-
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplyAddSub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+func (x Float32x4) MaskedFusedMultiplyAddSub(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplyAddSub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+func (x Float32x8) MaskedFusedMultiplyAddSub(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplyAddSub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+func (x Float32x16) MaskedFusedMultiplyAddSub(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplyAddSub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+func (x Float64x2) MaskedFusedMultiplyAddSub(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplyAddSub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+func (x Float64x4) MaskedFusedMultiplyAddSub(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements.
+// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplyAddSub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedMultiplyAddSub231 */
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplyAddSub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplyAddSub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplyAddSub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplyAddSub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplyAddSub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements.
-//
-// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplyAddSub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedMultiplySub132 */
+func (x Float64x8) MaskedFusedMultiplyAddSub(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+/* MaskedFusedMultiplySubAdd */
-// FusedMultiplySub132 performs `(v1 * v3) - v2`.
-//
-// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedMultiplySub213 */
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedMultiplySub213 performs `(v2 * v1) - v3`.
-//
-// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedMultiplySub231 */
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedMultiplySub231 performs `(v2 * v3) - v1`.
-//
-// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedMultiplySubAdd132 */
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplySubAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplySubAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplySubAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplySubAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplySubAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements.
-//
-// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplySubAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedMultiplySubAdd213 */
-
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplySubAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
+func (x Float32x4) MaskedFusedMultiplySubAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplySubAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
+func (x Float32x8) MaskedFusedMultiplySubAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplySubAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
+func (x Float32x16) MaskedFusedMultiplySubAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplySubAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
+func (x Float64x2) MaskedFusedMultiplySubAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplySubAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
+func (x Float64x4) MaskedFusedMultiplySubAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements.
+// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplySubAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedMultiplySubAdd231 */
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedMultiplySubAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedMultiplySubAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedMultiplySubAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedMultiplySubAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedMultiplySubAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements.
-//
-// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedMultiplySubAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedNegativeMultiplyAdd132 */
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedNegativeMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedNegativeMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedNegativeMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedNegativeMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedNegativeMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`.
-//
-// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedNegativeMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedNegativeMultiplyAdd213 */
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedNegativeMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedNegativeMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedNegativeMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedNegativeMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedNegativeMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`.
-//
-// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedNegativeMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedNegativeMultiplyAdd231 */
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedNegativeMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedNegativeMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedNegativeMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedNegativeMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedNegativeMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`.
-//
-// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedNegativeMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedNegativeMultiplySub132 */
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedNegativeMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedNegativeMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedNegativeMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedNegativeMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedNegativeMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`.
-//
-// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedNegativeMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedNegativeMultiplySub213 */
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedNegativeMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedNegativeMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedNegativeMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedNegativeMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedNegativeMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`.
-//
-// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedNegativeMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8
-
-/* MaskedFusedNegativeMultiplySub231 */
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFusedNegativeMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFusedNegativeMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFusedNegativeMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFusedNegativeMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFusedNegativeMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4
-
-// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`.
-//
-// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFusedNegativeMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8
+func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8
/* MaskedGreater */