This CL is generated by CL 685035.
Change-Id: Ic3a043e83e62d0be77de97ef63a20d34bf1e2dc0
Reviewed-on: https://go-review.googlesource.com/c/go/+/685055
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: David Chase <drchase@google.com>
(CeilFloat32x8 x) => (VROUNDPS256 [2] x)
(CeilFloat64x2 x) => (VROUNDPD128 [2] x)
(CeilFloat64x4 x) => (VROUNDPD256 [2] x)
-(CeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+10] x)
-(CeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+10] x)
-(CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+10] x)
-(CeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+10] x)
-(CeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+10] x)
-(CeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+10] x)
(CeilWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+2] x)
(CeilWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+2] x)
(CeilWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x)
(CeilWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x)
(CeilWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x)
(CeilWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x)
-(DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+10] x)
-(DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+10] x)
-(DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+10] x)
-(DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+10] x)
-(DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+10] x)
-(DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+10] x)
(DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x)
(DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x)
(DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x)
(DiffWithCeilWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x)
(DiffWithCeilWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x)
(DiffWithCeilWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x)
-(DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+9] x)
-(DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+9] x)
-(DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+9] x)
-(DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+9] x)
-(DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+9] x)
-(DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+9] x)
(DiffWithFloorWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x)
(DiffWithFloorWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x)
(DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x)
(DiffWithFloorWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x)
(DiffWithFloorWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x)
(DiffWithFloorWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x)
-(DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+8] x)
-(DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+8] x)
-(DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+8] x)
-(DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+8] x)
-(DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+8] x)
-(DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+8] x)
(DiffWithRoundWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x)
(DiffWithRoundWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x)
(DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x)
(DiffWithRoundWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x)
(DiffWithRoundWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x)
(DiffWithRoundWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x)
-(DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+11] x)
-(DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+11] x)
-(DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+11] x)
-(DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+11] x)
-(DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+11] x)
-(DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+11] x)
(DiffWithTruncWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x)
(DiffWithTruncWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x)
(DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x)
(FloorFloat32x8 x) => (VROUNDPS256 [1] x)
(FloorFloat64x2 x) => (VROUNDPD128 [1] x)
(FloorFloat64x4 x) => (VROUNDPD256 [1] x)
-(FloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+9] x)
-(FloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+9] x)
-(FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+9] x)
-(FloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+9] x)
-(FloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+9] x)
-(FloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+9] x)
(FloorWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+1] x)
(FloorWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+1] x)
(FloorWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x)
(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM <types.TypeMask> mask))
(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM <types.TypeMask> mask))
(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM <types.TypeMask> mask))
-(MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedCeilWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedCeilWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedCeilWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(MaskedCeilWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM <types.TypeMask> mask))
(MaskedCeilWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM <types.TypeMask> mask))
(MaskedCeilWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM <types.TypeMask> mask))
(MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM <types.TypeMask> mask))
(MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM <types.TypeMask> mask))
(MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM <types.TypeMask> mask))
(MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM <types.TypeMask> mask))
(MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM <types.TypeMask> mask))
(MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
(MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
(MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
-(MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedFloorWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedFloorWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedFloorWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(MaskedRotateRightUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
(MaskedRotateRightUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
(MaskedRotateRightUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedRoundWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedRoundWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedRoundWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
(MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
(MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
-(MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM <types.TypeMask> mask))
-(MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM <types.TypeMask> mask))
-(MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM <types.TypeMask> mask))
-(MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM <types.TypeMask> mask))
-(MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM <types.TypeMask> mask))
-(MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(MaskedTruncWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(MaskedTruncWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(MaskedTruncWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(RoundFloat32x8 x) => (VROUNDPS256 [0] x)
(RoundFloat64x2 x) => (VROUNDPD128 [0] x)
(RoundFloat64x4 x) => (VROUNDPD256 [0] x)
-(RoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+8] x)
-(RoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+8] x)
-(RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+8] x)
-(RoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+8] x)
-(RoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+8] x)
-(RoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+8] x)
(RoundWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x)
(RoundWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x)
(RoundWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x)
(TruncFloat32x8 x) => (VROUNDPS256 [3] x)
(TruncFloat64x2 x) => (VROUNDPD128 [3] x)
(TruncFloat64x4 x) => (VROUNDPD256 [3] x)
-(TruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+11] x)
-(TruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+11] x)
-(TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+11] x)
-(TruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+11] x)
-(TruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+11] x)
-(TruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+11] x)
(TruncWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+3] x)
(TruncWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+3] x)
(TruncWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x)
{name: "SaturatedSubUint8x64", argLength: 2, commutative: false},
{name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false},
{name: "SubUint8x64", argLength: 2, commutative: false},
- {name: "CeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
{name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
{name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
{name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
{name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "CeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "CeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
{name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
{name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
{name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
{name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "CeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithCeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithFloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
{name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedDiffWithTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
{name: "MaskedTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
{name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"},
{name: "MaskedShiftAllRightAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"},
OpSaturatedSubUint8x64
OpSaturatedUnsignedSignedPairDotProdUint8x64
OpSubUint8x64
- OpCeilSuppressExceptionWithPrecisionFloat32x16
OpCeilWithPrecisionFloat32x16
- OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16
OpDiffWithCeilWithPrecisionFloat32x16
- OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16
OpDiffWithFloorWithPrecisionFloat32x16
- OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16
OpDiffWithRoundWithPrecisionFloat32x16
- OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16
OpDiffWithTruncWithPrecisionFloat32x16
- OpFloorSuppressExceptionWithPrecisionFloat32x16
OpFloorWithPrecisionFloat32x16
- OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16
OpMaskedCeilWithPrecisionFloat32x16
- OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16
OpMaskedDiffWithCeilWithPrecisionFloat32x16
- OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16
OpMaskedDiffWithFloorWithPrecisionFloat32x16
- OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16
OpMaskedDiffWithRoundWithPrecisionFloat32x16
- OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16
OpMaskedDiffWithTruncWithPrecisionFloat32x16
- OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16
OpMaskedFloorWithPrecisionFloat32x16
- OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16
OpMaskedRoundWithPrecisionFloat32x16
- OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16
OpMaskedTruncWithPrecisionFloat32x16
- OpRoundSuppressExceptionWithPrecisionFloat32x16
OpRoundWithPrecisionFloat32x16
- OpTruncSuppressExceptionWithPrecisionFloat32x16
OpTruncWithPrecisionFloat32x16
- OpCeilSuppressExceptionWithPrecisionFloat32x4
OpCeilWithPrecisionFloat32x4
- OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4
OpDiffWithCeilWithPrecisionFloat32x4
- OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4
OpDiffWithFloorWithPrecisionFloat32x4
- OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4
OpDiffWithRoundWithPrecisionFloat32x4
- OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4
OpDiffWithTruncWithPrecisionFloat32x4
- OpFloorSuppressExceptionWithPrecisionFloat32x4
OpFloorWithPrecisionFloat32x4
- OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4
OpMaskedCeilWithPrecisionFloat32x4
- OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4
OpMaskedDiffWithCeilWithPrecisionFloat32x4
- OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4
OpMaskedDiffWithFloorWithPrecisionFloat32x4
- OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4
OpMaskedDiffWithRoundWithPrecisionFloat32x4
- OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4
OpMaskedDiffWithTruncWithPrecisionFloat32x4
- OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4
OpMaskedFloorWithPrecisionFloat32x4
- OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4
OpMaskedRoundWithPrecisionFloat32x4
- OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4
OpMaskedTruncWithPrecisionFloat32x4
- OpRoundSuppressExceptionWithPrecisionFloat32x4
OpRoundWithPrecisionFloat32x4
- OpTruncSuppressExceptionWithPrecisionFloat32x4
OpTruncWithPrecisionFloat32x4
- OpCeilSuppressExceptionWithPrecisionFloat32x8
OpCeilWithPrecisionFloat32x8
- OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8
OpDiffWithCeilWithPrecisionFloat32x8
- OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8
OpDiffWithFloorWithPrecisionFloat32x8
- OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8
OpDiffWithRoundWithPrecisionFloat32x8
- OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8
OpDiffWithTruncWithPrecisionFloat32x8
- OpFloorSuppressExceptionWithPrecisionFloat32x8
OpFloorWithPrecisionFloat32x8
- OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8
OpMaskedCeilWithPrecisionFloat32x8
- OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8
OpMaskedDiffWithCeilWithPrecisionFloat32x8
- OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8
OpMaskedDiffWithFloorWithPrecisionFloat32x8
- OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8
OpMaskedDiffWithRoundWithPrecisionFloat32x8
- OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8
OpMaskedDiffWithTruncWithPrecisionFloat32x8
- OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8
OpMaskedFloorWithPrecisionFloat32x8
- OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8
OpMaskedRoundWithPrecisionFloat32x8
- OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8
OpMaskedTruncWithPrecisionFloat32x8
- OpRoundSuppressExceptionWithPrecisionFloat32x8
OpRoundWithPrecisionFloat32x8
- OpTruncSuppressExceptionWithPrecisionFloat32x8
OpTruncWithPrecisionFloat32x8
- OpCeilSuppressExceptionWithPrecisionFloat64x2
OpCeilWithPrecisionFloat64x2
- OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2
OpDiffWithCeilWithPrecisionFloat64x2
- OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2
OpDiffWithFloorWithPrecisionFloat64x2
- OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2
OpDiffWithRoundWithPrecisionFloat64x2
- OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2
OpDiffWithTruncWithPrecisionFloat64x2
- OpFloorSuppressExceptionWithPrecisionFloat64x2
OpFloorWithPrecisionFloat64x2
- OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2
OpMaskedCeilWithPrecisionFloat64x2
- OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2
OpMaskedDiffWithCeilWithPrecisionFloat64x2
- OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2
OpMaskedDiffWithFloorWithPrecisionFloat64x2
- OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2
OpMaskedDiffWithRoundWithPrecisionFloat64x2
- OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2
OpMaskedDiffWithTruncWithPrecisionFloat64x2
- OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2
OpMaskedFloorWithPrecisionFloat64x2
- OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2
OpMaskedRoundWithPrecisionFloat64x2
- OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2
OpMaskedTruncWithPrecisionFloat64x2
- OpRoundSuppressExceptionWithPrecisionFloat64x2
OpRoundWithPrecisionFloat64x2
- OpTruncSuppressExceptionWithPrecisionFloat64x2
OpTruncWithPrecisionFloat64x2
- OpCeilSuppressExceptionWithPrecisionFloat64x4
OpCeilWithPrecisionFloat64x4
- OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4
OpDiffWithCeilWithPrecisionFloat64x4
- OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4
OpDiffWithFloorWithPrecisionFloat64x4
- OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4
OpDiffWithRoundWithPrecisionFloat64x4
- OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4
OpDiffWithTruncWithPrecisionFloat64x4
- OpFloorSuppressExceptionWithPrecisionFloat64x4
OpFloorWithPrecisionFloat64x4
- OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4
OpMaskedCeilWithPrecisionFloat64x4
- OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4
OpMaskedDiffWithCeilWithPrecisionFloat64x4
- OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4
OpMaskedDiffWithFloorWithPrecisionFloat64x4
- OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4
OpMaskedDiffWithRoundWithPrecisionFloat64x4
- OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4
OpMaskedDiffWithTruncWithPrecisionFloat64x4
- OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4
OpMaskedFloorWithPrecisionFloat64x4
- OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4
OpMaskedRoundWithPrecisionFloat64x4
- OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4
OpMaskedTruncWithPrecisionFloat64x4
- OpRoundSuppressExceptionWithPrecisionFloat64x4
OpRoundWithPrecisionFloat64x4
- OpTruncSuppressExceptionWithPrecisionFloat64x4
OpTruncWithPrecisionFloat64x4
- OpCeilSuppressExceptionWithPrecisionFloat64x8
OpCeilWithPrecisionFloat64x8
- OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8
OpDiffWithCeilWithPrecisionFloat64x8
- OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8
OpDiffWithFloorWithPrecisionFloat64x8
- OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8
OpDiffWithRoundWithPrecisionFloat64x8
- OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8
OpDiffWithTruncWithPrecisionFloat64x8
- OpFloorSuppressExceptionWithPrecisionFloat64x8
OpFloorWithPrecisionFloat64x8
- OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8
OpMaskedCeilWithPrecisionFloat64x8
- OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8
OpMaskedDiffWithCeilWithPrecisionFloat64x8
- OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8
OpMaskedDiffWithFloorWithPrecisionFloat64x8
- OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8
OpMaskedDiffWithRoundWithPrecisionFloat64x8
- OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8
OpMaskedDiffWithTruncWithPrecisionFloat64x8
- OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8
OpMaskedFloorWithPrecisionFloat64x8
- OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8
OpMaskedRoundWithPrecisionFloat64x8
- OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8
OpMaskedTruncWithPrecisionFloat64x8
- OpRoundSuppressExceptionWithPrecisionFloat64x8
OpRoundWithPrecisionFloat64x8
- OpTruncSuppressExceptionWithPrecisionFloat64x8
OpTruncWithPrecisionFloat64x8
OpMaskedShiftAllLeftAndFillUpperFromInt16x16
OpMaskedShiftAllRightAndFillUpperFromInt16x16
argLen: 2,
generic: true,
},
- {
- name: "CeilSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "CeilWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithCeilWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithFloorWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithRoundWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithTruncWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "FloorSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "FloorWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedCeilWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithCeilWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithFloorWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithRoundWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithTruncWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedFloorWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedRoundWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedTruncWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "RoundSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "RoundWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "TruncSuppressExceptionWithPrecisionFloat32x16",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "TruncWithPrecisionFloat32x16",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "CeilSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "CeilWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithCeilWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithFloorWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithRoundWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithTruncWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "FloorSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "FloorWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedCeilWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithCeilWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithFloorWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithRoundWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithTruncWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedFloorWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedRoundWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedTruncWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "RoundSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "RoundWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "TruncSuppressExceptionWithPrecisionFloat32x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "TruncWithPrecisionFloat32x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "CeilSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "CeilWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithCeilWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithFloorWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithRoundWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithTruncWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "FloorSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "FloorWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedCeilWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithCeilWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithFloorWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithRoundWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithTruncWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedFloorWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedRoundWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedTruncWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "RoundSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "RoundWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "TruncSuppressExceptionWithPrecisionFloat32x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "TruncWithPrecisionFloat32x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "CeilSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "CeilWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithCeilWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithFloorWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithRoundWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithTruncWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "FloorSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "FloorWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedCeilWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithCeilWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithFloorWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithRoundWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithTruncWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedFloorWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedRoundWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedTruncWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "RoundSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "RoundWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "TruncSuppressExceptionWithPrecisionFloat64x2",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "TruncWithPrecisionFloat64x2",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "CeilSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "CeilWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithCeilWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithFloorWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithRoundWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithTruncWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "FloorSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "FloorWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedCeilWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithCeilWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithFloorWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithRoundWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithTruncWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedFloorWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedRoundWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedTruncWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "RoundSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "RoundWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "TruncSuppressExceptionWithPrecisionFloat64x4",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "TruncWithPrecisionFloat64x4",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "CeilSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "CeilWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithCeilWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithFloorWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithRoundWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "DiffWithTruncWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "FloorSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "FloorWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedCeilWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithCeilWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithFloorWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithRoundWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedDiffWithTruncWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedFloorWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedRoundWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 2,
- generic: true,
- },
{
name: "MaskedTruncWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 2,
generic: true,
},
- {
- name: "RoundSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "RoundWithPrecisionFloat64x8",
auxType: auxInt8,
argLen: 1,
generic: true,
},
- {
- name: "TruncSuppressExceptionWithPrecisionFloat64x8",
- auxType: auxInt8,
- argLen: 1,
- generic: true,
- },
{
name: "TruncWithPrecisionFloat64x8",
auxType: auxInt8,
return rewriteValueAMD64_OpCeilFloat64x2(v)
case OpCeilFloat64x4:
return rewriteValueAMD64_OpCeilFloat64x4(v)
- case OpCeilSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x16(v)
- case OpCeilSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x4(v)
- case OpCeilSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x8(v)
- case OpCeilSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x2(v)
- case OpCeilSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x4(v)
- case OpCeilSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x8(v)
case OpCeilWithPrecisionFloat32x16:
return rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v)
case OpCeilWithPrecisionFloat32x4:
case OpCvtBoolToUint8:
v.Op = OpCopy
return true
- case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v)
- case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v)
- case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v)
- case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v)
- case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v)
- case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v)
case OpDiffWithCeilWithPrecisionFloat32x16:
return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v)
case OpDiffWithCeilWithPrecisionFloat32x4:
return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v)
case OpDiffWithCeilWithPrecisionFloat64x8:
return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v)
- case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v)
- case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v)
- case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v)
- case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v)
- case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v)
- case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v)
case OpDiffWithFloorWithPrecisionFloat32x16:
return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v)
case OpDiffWithFloorWithPrecisionFloat32x4:
return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v)
case OpDiffWithFloorWithPrecisionFloat64x8:
return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v)
- case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v)
- case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v)
- case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v)
- case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v)
- case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v)
- case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v)
case OpDiffWithRoundWithPrecisionFloat32x16:
return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v)
case OpDiffWithRoundWithPrecisionFloat32x4:
return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v)
case OpDiffWithRoundWithPrecisionFloat64x8:
return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v)
- case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v)
- case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v)
- case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v)
- case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v)
- case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v)
- case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v)
case OpDiffWithTruncWithPrecisionFloat32x16:
return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v)
case OpDiffWithTruncWithPrecisionFloat32x4:
return rewriteValueAMD64_OpFloorFloat64x2(v)
case OpFloorFloat64x4:
return rewriteValueAMD64_OpFloorFloat64x4(v)
- case OpFloorSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x16(v)
- case OpFloorSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x4(v)
- case OpFloorSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x8(v)
- case OpFloorSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x2(v)
- case OpFloorSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x4(v)
- case OpFloorSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x8(v)
case OpFloorWithPrecisionFloat32x16:
return rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v)
case OpFloorWithPrecisionFloat32x4:
return rewriteValueAMD64_OpMaskedAverageUint8x32(v)
case OpMaskedAverageUint8x64:
return rewriteValueAMD64_OpMaskedAverageUint8x64(v)
- case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16(v)
- case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4(v)
- case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8(v)
- case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2(v)
- case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4(v)
- case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8(v)
case OpMaskedCeilWithPrecisionFloat32x16:
return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v)
case OpMaskedCeilWithPrecisionFloat32x4:
return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x4(v)
case OpMaskedCeilWithPrecisionFloat64x8:
return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v)
- case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v)
- case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v)
- case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v)
- case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v)
- case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v)
- case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v)
case OpMaskedDiffWithCeilWithPrecisionFloat32x16:
return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v)
case OpMaskedDiffWithCeilWithPrecisionFloat32x4:
return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x4(v)
case OpMaskedDiffWithCeilWithPrecisionFloat64x8:
return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v)
- case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v)
- case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v)
- case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v)
- case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v)
- case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v)
- case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v)
case OpMaskedDiffWithFloorWithPrecisionFloat32x16:
return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v)
case OpMaskedDiffWithFloorWithPrecisionFloat32x4:
return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x4(v)
case OpMaskedDiffWithFloorWithPrecisionFloat64x8:
return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v)
- case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v)
- case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v)
- case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v)
- case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v)
- case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v)
- case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v)
case OpMaskedDiffWithRoundWithPrecisionFloat32x16:
return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v)
case OpMaskedDiffWithRoundWithPrecisionFloat32x4:
return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x4(v)
case OpMaskedDiffWithRoundWithPrecisionFloat64x8:
return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v)
- case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v)
- case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v)
- case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v)
- case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v)
- case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v)
- case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v)
case OpMaskedDiffWithTruncWithPrecisionFloat32x16:
return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v)
case OpMaskedDiffWithTruncWithPrecisionFloat32x4:
return rewriteValueAMD64_OpMaskedEqualUint8x32(v)
case OpMaskedEqualUint8x64:
return rewriteValueAMD64_OpMaskedEqualUint8x64(v)
- case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16(v)
- case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4(v)
- case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8(v)
- case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2(v)
- case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4(v)
- case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8(v)
case OpMaskedFloorWithPrecisionFloat32x16:
return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v)
case OpMaskedFloorWithPrecisionFloat32x4:
return rewriteValueAMD64_OpMaskedRotateRightUint64x4(v)
case OpMaskedRotateRightUint64x8:
return rewriteValueAMD64_OpMaskedRotateRightUint64x8(v)
- case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v)
- case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v)
- case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v)
- case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v)
- case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v)
- case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v)
case OpMaskedRoundWithPrecisionFloat32x16:
return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v)
case OpMaskedRoundWithPrecisionFloat32x4:
return rewriteValueAMD64_OpMaskedSubUint8x32(v)
case OpMaskedSubUint8x64:
return rewriteValueAMD64_OpMaskedSubUint8x64(v)
- case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16(v)
- case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4(v)
- case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8(v)
- case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2(v)
- case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4(v)
- case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8(v)
case OpMaskedTruncWithPrecisionFloat32x16:
return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v)
case OpMaskedTruncWithPrecisionFloat32x4:
return rewriteValueAMD64_OpRoundFloat64x2(v)
case OpRoundFloat64x4:
return rewriteValueAMD64_OpRoundFloat64x4(v)
- case OpRoundSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x16(v)
- case OpRoundSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x4(v)
- case OpRoundSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x8(v)
- case OpRoundSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x2(v)
- case OpRoundSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x4(v)
- case OpRoundSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x8(v)
case OpRoundToEven:
return rewriteValueAMD64_OpRoundToEven(v)
case OpRoundWithPrecisionFloat32x16:
return rewriteValueAMD64_OpTruncFloat64x2(v)
case OpTruncFloat64x4:
return rewriteValueAMD64_OpTruncFloat64x4(v)
- case OpTruncSuppressExceptionWithPrecisionFloat32x16:
- return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x16(v)
- case OpTruncSuppressExceptionWithPrecisionFloat32x4:
- return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x4(v)
- case OpTruncSuppressExceptionWithPrecisionFloat32x8:
- return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x8(v)
- case OpTruncSuppressExceptionWithPrecisionFloat64x2:
- return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x2(v)
- case OpTruncSuppressExceptionWithPrecisionFloat64x4:
- return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x4(v)
- case OpTruncSuppressExceptionWithPrecisionFloat64x8:
- return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x8(v)
case OpTruncWithPrecisionFloat32x16:
return rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v)
case OpTruncWithPrecisionFloat32x4:
return true
}
}
-func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_0 := v.Args[0]
- // match: (CeilSuppressExceptionWithPrecisionFloat32x16 [a] x)
- // result: (VRNDSCALEPS512 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS512)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (CeilSuppressExceptionWithPrecisionFloat32x4 [a] x)
- // result: (VRNDSCALEPS128 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS128)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (CeilSuppressExceptionWithPrecisionFloat32x8 [a] x)
- // result: (VRNDSCALEPS256 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS256)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_0 := v.Args[0]
- // match: (CeilSuppressExceptionWithPrecisionFloat64x2 [a] x)
- // result: (VRNDSCALEPD128 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD128)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (CeilSuppressExceptionWithPrecisionFloat64x4 [a] x)
- // result: (VRNDSCALEPD256 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD256)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (CeilSuppressExceptionWithPrecisionFloat64x8 [a] x)
- // result: (VRNDSCALEPD512 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD512)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool {
v_0 := v.Args[0]
// match: (CeilWithPrecisionFloat32x16 [a] x)
}
return false
}
-func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x)
- // result: (VREDUCEPS512 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS512)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x)
- // result: (VREDUCEPS128 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS128)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x)
- // result: (VREDUCEPS256 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS256)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x)
- // result: (VREDUCEPD128 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD128)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x)
- // result: (VREDUCEPD256 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD256)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x)
- // result: (VREDUCEPD512 [a+10] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD512)
- v.AuxInt = int8ToAuxInt(a + 10)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool {
v_0 := v.Args[0]
// match: (DiffWithCeilWithPrecisionFloat32x16 [a] x)
return true
}
}
-func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x)
- // result: (VREDUCEPS512 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS512)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x)
- // result: (VREDUCEPS128 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS128)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x)
- // result: (VREDUCEPS256 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS256)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x)
- // result: (VREDUCEPD128 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD128)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x)
- // result: (VREDUCEPD256 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD256)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x)
- // result: (VREDUCEPD512 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD512)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v *Value) bool {
v_0 := v.Args[0]
// match: (DiffWithFloorWithPrecisionFloat32x16 [a] x)
return true
}
}
-func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x)
- // result: (VREDUCEPS512 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS512)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x)
- // result: (VREDUCEPS128 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS128)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x)
- // result: (VREDUCEPS256 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS256)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x)
- // result: (VREDUCEPD128 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD128)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x)
- // result: (VREDUCEPD256 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD256)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x)
- // result: (VREDUCEPD512 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD512)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v *Value) bool {
v_0 := v.Args[0]
// match: (DiffWithRoundWithPrecisionFloat32x16 [a] x)
return true
}
}
-func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x)
- // result: (VREDUCEPS512 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS512)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x)
- // result: (VREDUCEPS128 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS128)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x)
- // result: (VREDUCEPS256 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPS256)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x)
- // result: (VREDUCEPD128 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD128)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x)
- // result: (VREDUCEPD256 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD256)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x)
- // result: (VREDUCEPD512 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VREDUCEPD512)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v *Value) bool {
v_0 := v.Args[0]
// match: (DiffWithTruncWithPrecisionFloat32x16 [a] x)
return true
}
}
-func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_0 := v.Args[0]
- // match: (FloorSuppressExceptionWithPrecisionFloat32x16 [a] x)
- // result: (VRNDSCALEPS512 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS512)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (FloorSuppressExceptionWithPrecisionFloat32x4 [a] x)
- // result: (VRNDSCALEPS128 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS128)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (FloorSuppressExceptionWithPrecisionFloat32x8 [a] x)
- // result: (VRNDSCALEPS256 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS256)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_0 := v.Args[0]
- // match: (FloorSuppressExceptionWithPrecisionFloat64x2 [a] x)
- // result: (VRNDSCALEPD128 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD128)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (FloorSuppressExceptionWithPrecisionFloat64x4 [a] x)
- // result: (VRNDSCALEPD256 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD256)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (FloorSuppressExceptionWithPrecisionFloat64x8 [a] x)
- // result: (VRNDSCALEPD512 [a+9] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD512)
- v.AuxInt = int8ToAuxInt(a + 9)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool {
v_0 := v.Args[0]
// match: (FloorWithPrecisionFloat32x16 [a] x)
return true
}
}
-func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask)
- // result: (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask)
- // result: (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask)
- // result: (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask)
- // result: (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask)
- // result: (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask)
- // result: (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
return true
}
}
-func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask)
- // result: (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask)
- // result: (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask)
- // result: (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask)
- // result: (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask)
- // result: (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask)
- // result: (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 10)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
return true
}
}
-func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask)
- // result: (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask)
- // result: (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask)
- // result: (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask)
- // result: (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask)
- // result: (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask)
- // result: (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
return true
}
}
-func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask)
- // result: (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask)
- // result: (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask)
- // result: (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask)
- // result: (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask)
- // result: (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask)
- // result: (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
return true
}
}
-func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask)
- // result: (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask)
- // result: (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask)
- // result: (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask)
- // result: (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask)
- // result: (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask)
- // result: (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VREDUCEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
return true
}
}
-func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask)
- // result: (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask)
- // result: (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask)
- // result: (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask)
- // result: (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask)
- // result: (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask)
- // result: (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 9)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
return true
}
}
-func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask)
- // result: (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask)
- // result: (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask)
- // result: (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask)
- // result: (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask)
- // result: (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask)
- // result: (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 8)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
return true
}
}
-func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask)
- // result: (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask)
- // result: (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask)
- // result: (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask)
- // result: (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask)
- // result: (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask)
- // result: (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM <types.TypeMask> mask))
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- mask := v_1
- v.reset(OpAMD64VRNDSCALEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 11)
- v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
- v0.AddArg(mask)
- v.AddArg2(x, v0)
- return true
- }
-}
func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
return true
}
}
-func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_0 := v.Args[0]
- // match: (RoundSuppressExceptionWithPrecisionFloat32x16 [a] x)
- // result: (VRNDSCALEPS512 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS512)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (RoundSuppressExceptionWithPrecisionFloat32x4 [a] x)
- // result: (VRNDSCALEPS128 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS128)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (RoundSuppressExceptionWithPrecisionFloat32x8 [a] x)
- // result: (VRNDSCALEPS256 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS256)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_0 := v.Args[0]
- // match: (RoundSuppressExceptionWithPrecisionFloat64x2 [a] x)
- // result: (VRNDSCALEPD128 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD128)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (RoundSuppressExceptionWithPrecisionFloat64x4 [a] x)
- // result: (VRNDSCALEPD256 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD256)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (RoundSuppressExceptionWithPrecisionFloat64x8 [a] x)
- // result: (VRNDSCALEPD512 [a+8] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD512)
- v.AuxInt = int8ToAuxInt(a + 8)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
v_0 := v.Args[0]
// match: (RoundToEven x)
return true
}
}
-func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool {
- v_0 := v.Args[0]
- // match: (TruncSuppressExceptionWithPrecisionFloat32x16 [a] x)
- // result: (VRNDSCALEPS512 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS512)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (TruncSuppressExceptionWithPrecisionFloat32x4 [a] x)
- // result: (VRNDSCALEPS128 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS128)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (TruncSuppressExceptionWithPrecisionFloat32x8 [a] x)
- // result: (VRNDSCALEPS256 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPS256)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool {
- v_0 := v.Args[0]
- // match: (TruncSuppressExceptionWithPrecisionFloat64x2 [a] x)
- // result: (VRNDSCALEPD128 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD128)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool {
- v_0 := v.Args[0]
- // match: (TruncSuppressExceptionWithPrecisionFloat64x4 [a] x)
- // result: (VRNDSCALEPD256 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD256)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (TruncSuppressExceptionWithPrecisionFloat64x8 [a] x)
- // result: (VRNDSCALEPD512 [a+11] x)
- for {
- a := auxIntToInt8(v.AuxInt)
- x := v_0
- v.reset(OpAMD64VRNDSCALEPD512)
- v.AuxInt = int8ToAuxInt(a + 11)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool {
v_0 := v.Args[0]
// match: (TruncWithPrecisionFloat32x16 [a] x)
addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x4.Ceil", opLen1(ssa.OpCeilFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float32x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float32x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float32x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float32x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x4.Floor", opLen1(ssa.OpFloorFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint16x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x16, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Uint64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Uint64x2.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint64x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint64x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Uint64x2.MaskedSub", opLen3(ssa.OpMaskedSubUint64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Uint64x4.MaskedSub", opLen3(ssa.OpMaskedSubUint64x4, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Uint64x8.MaskedSub", opLen3(ssa.OpMaskedSubUint64x8, types.TypeVec512), sys.AMD64)
- addF(simdPackage, "Float32x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64)
addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64)
addF(simdPackage, "Float64x4.Trunc", opLen1(ssa.OpTruncFloat64x4, types.TypeVec256), sys.AMD64)
- addF(simdPackage, "Float32x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float32x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float32x16.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
- addF(simdPackage, "Float64x2.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64)
- addF(simdPackage, "Float64x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64)
- addF(simdPackage, "Float64x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64)
addF(simdPackage, "Float32x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64)
addF(simdPackage, "Float32x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64)
addF(simdPackage, "Float32x16.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64)
/* The operations below cannot be tested via wrappers, please test them directly */
-// CeilSuppressExceptionWithPrecision
// CeilWithPrecision
-// DiffWithCeilSuppressExceptionWithPrecision
// DiffWithCeilWithPrecision
-// DiffWithFloorSuppressExceptionWithPrecision
// DiffWithFloorWithPrecision
-// DiffWithRoundSuppressExceptionWithPrecision
// DiffWithRoundWithPrecision
-// DiffWithTruncSuppressExceptionWithPrecision
// DiffWithTruncWithPrecision
-// FloorSuppressExceptionWithPrecision
// FloorWithPrecision
// GaloisFieldAffineTransform
// GaloisFieldAffineTransformInversed
// GetElem
-// MaskedCeilSuppressExceptionWithPrecision
// MaskedCeilWithPrecision
-// MaskedDiffWithCeilSuppressExceptionWithPrecision
// MaskedDiffWithCeilWithPrecision
-// MaskedDiffWithFloorSuppressExceptionWithPrecision
// MaskedDiffWithFloorWithPrecision
-// MaskedDiffWithRoundSuppressExceptionWithPrecision
// MaskedDiffWithRoundWithPrecision
-// MaskedDiffWithTruncSuppressExceptionWithPrecision
// MaskedDiffWithTruncWithPrecision
-// MaskedFloorSuppressExceptionWithPrecision
// MaskedFloorWithPrecision
// MaskedGaloisFieldAffineTransform
// MaskedGaloisFieldAffineTransformInversed
// MaskedRotateAllLeft
// MaskedRotateAllRight
-// MaskedRoundSuppressExceptionWithPrecision
// MaskedRoundWithPrecision
// MaskedShiftAllLeft
// MaskedShiftAllLeftAndFillUpperFrom
// MaskedShiftAllRight
// MaskedShiftAllRightAndFillUpperFrom
// MaskedShiftAllRightSignExtended
-// MaskedTruncSuppressExceptionWithPrecision
// MaskedTruncWithPrecision
// RotateAllLeft
// RotateAllRight
-// RoundSuppressExceptionWithPrecision
// RoundWithPrecision
// SetElem
// ShiftAllLeft
// ShiftAllRight
// ShiftAllRightAndFillUpperFrom
// ShiftAllRightSignExtended
-// TruncSuppressExceptionWithPrecision
// TruncWithPrecision
/* Ceil */
// Ceil rounds elements up to the nearest integer.
-// Const Immediate = 2.
//
// Asm: VROUNDPS, CPU Feature: AVX
func (x Float32x4) Ceil() Float32x4
// Ceil rounds elements up to the nearest integer.
-// Const Immediate = 2.
//
// Asm: VROUNDPS, CPU Feature: AVX
func (x Float32x8) Ceil() Float32x8
// Ceil rounds elements up to the nearest integer.
-// Const Immediate = 2.
//
// Asm: VROUNDPD, CPU Feature: AVX
func (x Float64x2) Ceil() Float64x2
// Ceil rounds elements up to the nearest integer.
-// Const Immediate = 2.
//
// Asm: VROUNDPD, CPU Feature: AVX
func (x Float64x4) Ceil() Float64x4
-/* CeilSuppressExceptionWithPrecision */
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) CeilSuppressExceptionWithPrecision(imm uint8) Float32x4
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) CeilSuppressExceptionWithPrecision(imm uint8) Float32x8
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) CeilSuppressExceptionWithPrecision(imm uint8) Float32x16
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) CeilSuppressExceptionWithPrecision(imm uint8) Float64x2
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) CeilSuppressExceptionWithPrecision(imm uint8) Float64x4
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) CeilSuppressExceptionWithPrecision(imm uint8) Float64x8
-
/* CeilWithPrecision */
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x4) CeilWithPrecision(imm uint8) Float32x4
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x8) CeilWithPrecision(imm uint8) Float32x8
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x16) CeilWithPrecision(imm uint8) Float32x16
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x2) CeilWithPrecision(imm uint8) Float64x2
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x4) CeilWithPrecision(imm uint8) Float64x4
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8
-/* DiffWithCeilSuppressExceptionWithPrecision */
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x4
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x8
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x16
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x2
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x4
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x8
-
/* DiffWithCeilWithPrecision */
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x4) DiffWithCeilWithPrecision(imm uint8) Float32x4
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x8) DiffWithCeilWithPrecision(imm uint8) Float32x8
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x16) DiffWithCeilWithPrecision(imm uint8) Float32x16
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x2) DiffWithCeilWithPrecision(imm uint8) Float64x2
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x4) DiffWithCeilWithPrecision(imm uint8) Float64x4
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8
-/* DiffWithFloorSuppressExceptionWithPrecision */
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x4
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x8
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x16
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x2
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x4
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x8
-
/* DiffWithFloorWithPrecision */
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x4) DiffWithFloorWithPrecision(imm uint8) Float32x4
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x8) DiffWithFloorWithPrecision(imm uint8) Float32x8
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x16) DiffWithFloorWithPrecision(imm uint8) Float32x16
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x2) DiffWithFloorWithPrecision(imm uint8) Float64x2
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x4) DiffWithFloorWithPrecision(imm uint8) Float64x4
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8
-/* DiffWithRoundSuppressExceptionWithPrecision */
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x4
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x8
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x16
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x2
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x4
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x8
-
/* DiffWithRoundWithPrecision */
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x4) DiffWithRoundWithPrecision(imm uint8) Float32x4
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x8) DiffWithRoundWithPrecision(imm uint8) Float32x8
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x16) DiffWithRoundWithPrecision(imm uint8) Float32x16
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x2) DiffWithRoundWithPrecision(imm uint8) Float64x2
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x4) DiffWithRoundWithPrecision(imm uint8) Float64x4
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8
-/* DiffWithTruncSuppressExceptionWithPrecision */
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x4
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x8
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x16
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x2
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x4
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x8
-
/* DiffWithTruncWithPrecision */
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x4) DiffWithTruncWithPrecision(imm uint8) Float32x4
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x8) DiffWithTruncWithPrecision(imm uint8) Float32x8
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x16) DiffWithTruncWithPrecision(imm uint8) Float32x16
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x2) DiffWithTruncWithPrecision(imm uint8) Float64x2
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x4) DiffWithTruncWithPrecision(imm uint8) Float64x4
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8
/* DotProdBroadcast */
// DotProdBroadcast multiplies all elements and broadcasts the sum.
-// Const Immediate = 127.
//
// Asm: VDPPD, CPU Feature: AVX
func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2
/* Equal */
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VPCMPEQB, CPU Feature: AVX
func (x Int8x16) Equal(y Int8x16) Mask8x16
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VPCMPEQB, CPU Feature: AVX2
func (x Int8x32) Equal(y Int8x32) Mask8x32
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VPCMPEQW, CPU Feature: AVX
func (x Int16x8) Equal(y Int16x8) Mask16x8
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VPCMPEQW, CPU Feature: AVX2
func (x Int16x16) Equal(y Int16x16) Mask16x16
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VPCMPEQD, CPU Feature: AVX
func (x Int32x4) Equal(y Int32x4) Mask32x4
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VPCMPEQD, CPU Feature: AVX2
func (x Int32x8) Equal(y Int32x8) Mask32x8
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VPCMPEQQ, CPU Feature: AVX
func (x Int64x2) Equal(y Int64x2) Mask64x2
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VPCMPEQQ, CPU Feature: AVX2
func (x Int64x4) Equal(y Int64x4) Mask64x4
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x4) Equal(y Float32x4) Mask32x4
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x8) Equal(y Float32x8) Mask32x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) Equal(y Float32x16) Mask32x16
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x2) Equal(y Float64x2) Mask64x2
// Equal compares for equality.
-// Const Immediate = 0.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x4) Equal(y Float64x4) Mask64x4
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) Equal(y Float64x8) Mask64x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) Equal(y Int8x64) Mask8x64
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) Equal(y Int16x32) Mask16x32
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) Equal(y Int32x16) Mask32x16
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) Equal(y Int64x8) Mask64x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) Equal(y Uint8x16) Mask8x16
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) Equal(y Uint8x32) Mask8x32
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) Equal(y Uint8x64) Mask8x64
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) Equal(y Uint16x8) Mask16x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) Equal(y Uint16x16) Mask16x16
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) Equal(y Uint16x32) Mask16x32
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) Equal(y Uint32x4) Mask32x4
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) Equal(y Uint32x8) Mask32x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) Equal(y Uint32x16) Mask32x16
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) Equal(y Uint64x2) Mask64x2
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) Equal(y Uint64x4) Mask64x4
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) Equal(y Uint64x8) Mask64x8
/* Floor */
// Floor rounds elements down to the nearest integer.
-// Const Immediate = 1.
//
// Asm: VROUNDPS, CPU Feature: AVX
func (x Float32x4) Floor() Float32x4
// Floor rounds elements down to the nearest integer.
-// Const Immediate = 1.
//
// Asm: VROUNDPS, CPU Feature: AVX
func (x Float32x8) Floor() Float32x8
// Floor rounds elements down to the nearest integer.
-// Const Immediate = 1.
//
// Asm: VROUNDPD, CPU Feature: AVX
func (x Float64x2) Floor() Float64x2
// Floor rounds elements down to the nearest integer.
-// Const Immediate = 1.
//
// Asm: VROUNDPD, CPU Feature: AVX
func (x Float64x4) Floor() Float64x4
-/* FloorSuppressExceptionWithPrecision */
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) FloorSuppressExceptionWithPrecision(imm uint8) Float32x4
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) FloorSuppressExceptionWithPrecision(imm uint8) Float32x8
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) FloorSuppressExceptionWithPrecision(imm uint8) Float32x16
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) FloorSuppressExceptionWithPrecision(imm uint8) Float64x2
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) FloorSuppressExceptionWithPrecision(imm uint8) Float64x4
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) FloorSuppressExceptionWithPrecision(imm uint8) Float64x8
-
/* FloorWithPrecision */
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x4) FloorWithPrecision(imm uint8) Float32x4
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x8) FloorWithPrecision(imm uint8) Float32x8
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x16) FloorWithPrecision(imm uint8) Float32x16
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x2) FloorWithPrecision(imm uint8) Float64x2
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x4) FloorWithPrecision(imm uint8) Float64x4
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8
/* Greater */
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPGTB, CPU Feature: AVX
func (x Int8x16) Greater(y Int8x16) Mask8x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPGTB, CPU Feature: AVX2
func (x Int8x32) Greater(y Int8x32) Mask8x32
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPGTW, CPU Feature: AVX
func (x Int16x8) Greater(y Int16x8) Mask16x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPGTW, CPU Feature: AVX2
func (x Int16x16) Greater(y Int16x16) Mask16x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPGTD, CPU Feature: AVX
func (x Int32x4) Greater(y Int32x4) Mask32x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPGTD, CPU Feature: AVX2
func (x Int32x8) Greater(y Int32x8) Mask32x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPGTQ, CPU Feature: AVX2
func (x Int64x4) Greater(y Int64x4) Mask64x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x4) Greater(y Float32x4) Mask32x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x8) Greater(y Float32x8) Mask32x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) Greater(y Float32x16) Mask32x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x2) Greater(y Float64x2) Mask64x2
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x4) Greater(y Float64x4) Mask64x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) Greater(y Float64x8) Mask64x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) Greater(y Int8x64) Mask8x64
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) Greater(y Int16x32) Mask16x32
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) Greater(y Int32x16) Mask32x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x2) Greater(y Int64x2) Mask64x2
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) Greater(y Int64x8) Mask64x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) Greater(y Uint8x16) Mask8x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) Greater(y Uint8x32) Mask8x32
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) Greater(y Uint8x64) Mask8x64
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) Greater(y Uint16x8) Mask16x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) Greater(y Uint16x16) Mask16x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) Greater(y Uint16x32) Mask16x32
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) Greater(y Uint32x4) Mask32x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) Greater(y Uint32x8) Mask32x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) Greater(y Uint32x16) Mask32x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) Greater(y Uint64x2) Mask64x2
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) Greater(y Uint64x4) Mask64x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) Greater(y Uint64x8) Mask64x8
/* GreaterEqual */
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8
/* IsNan */
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x4) IsNan(y Float32x4) Mask32x4
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x8) IsNan(y Float32x8) Mask32x8
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) IsNan(y Float32x16) Mask32x16
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x2) IsNan(y Float64x2) Mask64x2
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x4) IsNan(y Float64x4) Mask64x4
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) IsNan(y Float64x8) Mask64x8
/* Less */
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x4) Less(y Float32x4) Mask32x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x8) Less(y Float32x8) Mask32x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) Less(y Float32x16) Mask32x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x2) Less(y Float64x2) Mask64x2
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x4) Less(y Float64x4) Mask64x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) Less(y Float64x8) Mask64x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x16) Less(y Int8x16) Mask8x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x32) Less(y Int8x32) Mask8x32
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) Less(y Int8x64) Mask8x64
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x8) Less(y Int16x8) Mask16x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x16) Less(y Int16x16) Mask16x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) Less(y Int16x32) Mask16x32
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x4) Less(y Int32x4) Mask32x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x8) Less(y Int32x8) Mask32x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) Less(y Int32x16) Mask32x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x2) Less(y Int64x2) Mask64x2
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x4) Less(y Int64x4) Mask64x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) Less(y Int64x8) Mask64x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) Less(y Uint8x16) Mask8x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) Less(y Uint8x32) Mask8x32
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) Less(y Uint8x64) Mask8x64
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) Less(y Uint16x8) Mask16x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) Less(y Uint16x16) Mask16x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) Less(y Uint16x32) Mask16x32
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) Less(y Uint32x4) Mask32x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) Less(y Uint32x8) Mask32x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) Less(y Uint32x16) Mask32x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) Less(y Uint64x2) Mask64x2
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) Less(y Uint64x4) Mask64x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) Less(y Uint64x8) Mask64x8
/* LessEqual */
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x4) LessEqual(y Float32x4) Mask32x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x8) LessEqual(y Float32x8) Mask32x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) LessEqual(y Float32x16) Mask32x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x2) LessEqual(y Float64x2) Mask64x2
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x4) LessEqual(y Float64x4) Mask64x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) LessEqual(y Float64x8) Mask64x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x16) LessEqual(y Int8x16) Mask8x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x32) LessEqual(y Int8x32) Mask8x32
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) LessEqual(y Int8x64) Mask8x64
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x8) LessEqual(y Int16x8) Mask16x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x16) LessEqual(y Int16x16) Mask16x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) LessEqual(y Int16x32) Mask16x32
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x4) LessEqual(y Int32x4) Mask32x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x8) LessEqual(y Int32x8) Mask32x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) LessEqual(y Int32x16) Mask32x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x2) LessEqual(y Int64x2) Mask64x2
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x4) LessEqual(y Int64x4) Mask64x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) LessEqual(y Int64x8) Mask64x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8
// Asm: VPAVGW, CPU Feature: AVX512EVEX
func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32
-/* MaskedCeilSuppressExceptionWithPrecision */
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4
-
-// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8
-
/* MaskedCeilWithPrecision */
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4
// CeilWithPrecision rounds elements up with specified precision, masked.
-// Const Immediate = 2.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8
-/* MaskedDiffWithCeilSuppressExceptionWithPrecision */
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4
-
-// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions.
-// Const Immediate = 10.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8
-
/* MaskedDiffWithCeilWithPrecision */
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
-// Const Immediate = 2.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8
-/* MaskedDiffWithFloorSuppressExceptionWithPrecision */
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4
-
-// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions.
-// Const Immediate = 9.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8
-
/* MaskedDiffWithFloorWithPrecision */
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
-// Const Immediate = 1.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8
-/* MaskedDiffWithRoundSuppressExceptionWithPrecision */
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4
-
-// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8
-
/* MaskedDiffWithRoundWithPrecision */
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
-// Const Immediate = 0.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8
-/* MaskedDiffWithTruncSuppressExceptionWithPrecision */
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4
-
-// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8
-
/* MaskedDiffWithTruncWithPrecision */
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
-// Const Immediate = 3.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8
/* MaskedEqual */
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4
// Equal compares for equality, masked.
-// Const Immediate = 0.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8
-/* MaskedFloorSuppressExceptionWithPrecision */
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4
-
-// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked.
-// Const Immediate = 9.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8
-
/* MaskedFloorWithPrecision */
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4
// FloorWithPrecision rounds elements down with specified precision, masked.
-// Const Immediate = 1.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8
/* MaskedGreater */
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4
// Greater compares for greater than.
-// Const Immediate = 6.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8
/* MaskedGreaterEqual */
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4
// GreaterEqual compares for greater than or equal.
-// Const Immediate = 5.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8
/* MaskedIsNan */
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4
// IsNan checks if elements are NaN. Use as x.IsNan(x).
-// Const Immediate = 3.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8
/* MaskedLess */
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4
// Less compares for less than.
-// Const Immediate = 1.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8
/* MaskedLessEqual */
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4
// LessEqual compares for less than or equal.
-// Const Immediate = 2.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8
/* MaskedNotEqual */
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func (x Uint64x8) MaskedRotateRight(y Uint64x8, z Mask64x8) Uint64x8
-/* MaskedRoundSuppressExceptionWithPrecision */
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8
-
/* MaskedRoundWithPrecision */
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8
// Asm: VPSUBQ, CPU Feature: AVX512EVEX
func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8
-/* MaskedTruncSuppressExceptionWithPrecision */
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8
-
/* MaskedTruncWithPrecision */
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8
/* NotEqual */
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x4) NotEqual(y Float32x4) Mask32x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPS, CPU Feature: AVX
func (x Float32x8) NotEqual(y Float32x8) Mask32x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func (x Float32x16) NotEqual(y Float32x16) Mask32x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x2) NotEqual(y Float64x2) Mask64x2
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPD, CPU Feature: AVX
func (x Float64x4) NotEqual(y Float64x4) Mask64x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func (x Float64x8) NotEqual(y Float64x8) Mask64x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x16) NotEqual(y Int8x16) Mask8x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x32) NotEqual(y Int8x32) Mask8x32
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func (x Int8x64) NotEqual(y Int8x64) Mask8x64
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x8) NotEqual(y Int16x8) Mask16x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x16) NotEqual(y Int16x16) Mask16x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func (x Int16x32) NotEqual(y Int16x32) Mask16x32
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x4) NotEqual(y Int32x4) Mask32x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x8) NotEqual(y Int32x8) Mask32x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func (x Int32x16) NotEqual(y Int32x16) Mask32x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x2) NotEqual(y Int64x2) Mask64x2
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x4) NotEqual(y Int64x4) Mask64x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func (x Int64x8) NotEqual(y Int64x8) Mask64x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4
// NotEqual compares for inequality.
-// Const Immediate = 4.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8
/* Round */
// Round rounds elements to the nearest integer.
-// Const Immediate = 0.
//
// Asm: VROUNDPS, CPU Feature: AVX
func (x Float32x4) Round() Float32x4
// Round rounds elements to the nearest integer.
-// Const Immediate = 0.
//
// Asm: VROUNDPS, CPU Feature: AVX
func (x Float32x8) Round() Float32x8
// Round rounds elements to the nearest integer.
-// Const Immediate = 0.
//
// Asm: VROUNDPD, CPU Feature: AVX
func (x Float64x2) Round() Float64x2
// Round rounds elements to the nearest integer.
-// Const Immediate = 0.
//
// Asm: VROUNDPD, CPU Feature: AVX
func (x Float64x4) Round() Float64x4
-/* RoundSuppressExceptionWithPrecision */
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) RoundSuppressExceptionWithPrecision(imm uint8) Float32x4
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) RoundSuppressExceptionWithPrecision(imm uint8) Float32x8
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) RoundSuppressExceptionWithPrecision(imm uint8) Float32x16
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) RoundSuppressExceptionWithPrecision(imm uint8) Float64x2
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) RoundSuppressExceptionWithPrecision(imm uint8) Float64x4
-
-// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions.
-// Const Immediate = 8.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) RoundSuppressExceptionWithPrecision(imm uint8) Float64x8
-
/* RoundWithPrecision */
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x4) RoundWithPrecision(imm uint8) Float32x4
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x8) RoundWithPrecision(imm uint8) Float32x8
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x16) RoundWithPrecision(imm uint8) Float32x16
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x2) RoundWithPrecision(imm uint8) Float64x2
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x4) RoundWithPrecision(imm uint8) Float64x4
// RoundWithPrecision rounds elements with specified precision.
-// Const Immediate = 0.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8
/* Trunc */
// Trunc truncates elements towards zero.
-// Const Immediate = 3.
//
// Asm: VROUNDPS, CPU Feature: AVX
func (x Float32x4) Trunc() Float32x4
// Trunc truncates elements towards zero.
-// Const Immediate = 3.
//
// Asm: VROUNDPS, CPU Feature: AVX
func (x Float32x8) Trunc() Float32x8
// Trunc truncates elements towards zero.
-// Const Immediate = 3.
//
// Asm: VROUNDPD, CPU Feature: AVX
func (x Float64x2) Trunc() Float64x2
// Trunc truncates elements towards zero.
-// Const Immediate = 3.
//
// Asm: VROUNDPD, CPU Feature: AVX
func (x Float64x4) Trunc() Float64x4
-/* TruncSuppressExceptionWithPrecision */
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x4) TruncSuppressExceptionWithPrecision(imm uint8) Float32x4
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x8) TruncSuppressExceptionWithPrecision(imm uint8) Float32x8
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
-func (x Float32x16) TruncSuppressExceptionWithPrecision(imm uint8) Float32x16
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x2) TruncSuppressExceptionWithPrecision(imm uint8) Float64x2
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x4) TruncSuppressExceptionWithPrecision(imm uint8) Float64x4
-
-// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions.
-// Const Immediate = 11.
-//
-// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
-func (x Float64x8) TruncSuppressExceptionWithPrecision(imm uint8) Float64x8
-
/* TruncWithPrecision */
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x4) TruncWithPrecision(imm uint8) Float32x4
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x8) TruncWithPrecision(imm uint8) Float32x8
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func (x Float32x16) TruncWithPrecision(imm uint8) Float32x16
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x2) TruncWithPrecision(imm uint8) Float64x2
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x4) TruncWithPrecision(imm uint8) Float64x4
// TruncWithPrecision truncates elements with specified precision.
-// Const Immediate = 3.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8