(GetElemUint16x8 [a] x) => (VPEXTRW128 [a] x)
(GetElemUint32x4 [a] x) => (VPEXTRD128 [a] x)
(GetElemUint64x2 [a] x) => (VPEXTRQ128 [a] x)
-(GreaterFloat32x4 x y) => (VCMPPS128 [6] x y)
-(GreaterFloat32x8 x y) => (VCMPPS256 [6] x y)
-(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y))
-(GreaterFloat64x2 x y) => (VCMPPD128 [6] x y)
-(GreaterFloat64x4 x y) => (VCMPPD256 [6] x y)
-(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] x y))
+(GreaterFloat32x4 x y) => (VCMPPS128 [14] x y)
+(GreaterFloat32x8 x y) => (VCMPPS256 [14] x y)
+(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [14] x y))
+(GreaterFloat64x2 x y) => (VCMPPD128 [14] x y)
+(GreaterFloat64x4 x y) => (VCMPPD256 [14] x y)
+(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [14] x y))
(GreaterInt8x16 ...) => (VPCMPGTB128 ...)
(GreaterInt8x32 ...) => (VPCMPGTB256 ...)
-(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] x y))
+(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [14] x y))
(GreaterInt16x8 ...) => (VPCMPGTW128 ...)
(GreaterInt16x16 ...) => (VPCMPGTW256 ...)
-(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [6] x y))
+(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [14] x y))
(GreaterInt32x4 ...) => (VPCMPGTD128 ...)
(GreaterInt32x8 ...) => (VPCMPGTD256 ...)
-(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] x y))
-(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [6] x y))
+(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [14] x y))
+(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [14] x y))
(GreaterInt64x4 ...) => (VPCMPGTQ256 ...)
-(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [6] x y))
-(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] x y))
-(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] x y))
-(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] x y))
-(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y))
-(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] x y))
-(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] x y))
-(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] x y))
-(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] x y))
-(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] x y))
-(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y))
-(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y))
-(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y))
-(GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] x y)
-(GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] x y)
-(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] x y))
-(GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] x y)
-(GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] x y)
-(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] x y))
-(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] x y))
-(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] x y))
-(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] x y))
-(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] x y))
-(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] x y))
-(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] x y))
-(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] x y))
-(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] x y))
-(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] x y))
-(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] x y))
-(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] x y))
-(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] x y))
-(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] x y))
-(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] x y))
-(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] x y))
-(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] x y))
-(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] x y))
-(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] x y))
-(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] x y))
-(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] x y))
-(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] x y))
-(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y))
-(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y))
-(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y))
+(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [14] x y))
+(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [14] x y))
+(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [14] x y))
+(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [14] x y))
+(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [14] x y))
+(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [14] x y))
+(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [14] x y))
+(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [14] x y))
+(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [14] x y))
+(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [14] x y))
+(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y))
+(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y))
+(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y))
+(GreaterEqualFloat32x4 x y) => (VCMPPS128 [13] x y)
+(GreaterEqualFloat32x8 x y) => (VCMPPS256 [13] x y)
+(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [13] x y))
+(GreaterEqualFloat64x2 x y) => (VCMPPD128 [13] x y)
+(GreaterEqualFloat64x4 x y) => (VCMPPD256 [13] x y)
+(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [13] x y))
+(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [13] x y))
+(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [13] x y))
+(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [13] x y))
+(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [13] x y))
+(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [13] x y))
+(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [13] x y))
+(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [13] x y))
+(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [13] x y))
+(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [13] x y))
+(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [13] x y))
+(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [13] x y))
+(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [13] x y))
+(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [13] x y))
+(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [13] x y))
+(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [13] x y))
+(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [13] x y))
+(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [13] x y))
+(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [13] x y))
+(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [13] x y))
+(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [13] x y))
+(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [13] x y))
+(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y))
+(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y))
+(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y))
(IsNanFloat32x4 x y) => (VCMPPS128 [3] x y)
(IsNanFloat32x8 x y) => (VCMPPS256 [3] x y)
(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y))
(MaskedGaloisFieldMulUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM <types.TypeMask> mask))
(MaskedGaloisFieldMulUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM <types.TypeMask> mask))
(MaskedGaloisFieldMulUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM <types.TypeMask> mask))
-(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
-(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
-(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
-(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
-(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
-(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
-(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
-(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
-(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
-(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
-(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
-(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
-(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
-(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
-(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
-(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
-(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
-(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
-(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
-(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
-(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
-(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
-(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
-(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
-(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
-(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
-(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
-(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
-(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
-(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
-(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
+(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
+(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
+(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
+(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
+(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
+(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
+(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
+(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
+(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
+(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
+(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
+(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
(MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
(MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualFloat32x16 x y)
- // result: (VPMOVMToVec32x16 (VCMPPS512 [5] x y))
+ // result: (VPMOVMToVec32x16 (VCMPPS512 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (GreaterEqualFloat32x4 x y)
- // result: (VCMPPS128 [5] x y)
+ // result: (VCMPPS128 [13] x y)
for {
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS128)
- v.AuxInt = int8ToAuxInt(5)
+ v.AuxInt = int8ToAuxInt(13)
v.AddArg2(x, y)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (GreaterEqualFloat32x8 x y)
- // result: (VCMPPS256 [5] x y)
+ // result: (VCMPPS256 [13] x y)
for {
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS256)
- v.AuxInt = int8ToAuxInt(5)
+ v.AuxInt = int8ToAuxInt(13)
v.AddArg2(x, y)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (GreaterEqualFloat64x2 x y)
- // result: (VCMPPD128 [5] x y)
+ // result: (VCMPPD128 [13] x y)
for {
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD128)
- v.AuxInt = int8ToAuxInt(5)
+ v.AuxInt = int8ToAuxInt(13)
v.AddArg2(x, y)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (GreaterEqualFloat64x4 x y)
- // result: (VCMPPD256 [5] x y)
+ // result: (VCMPPD256 [13] x y)
for {
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD256)
- v.AuxInt = int8ToAuxInt(5)
+ v.AuxInt = int8ToAuxInt(13)
v.AddArg2(x, y)
return true
}
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualFloat64x8 x y)
- // result: (VPMOVMToVec64x8 (VCMPPD512 [5] x y))
+ // result: (VPMOVMToVec64x8 (VCMPPD512 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt16x16 x y)
- // result: (VPMOVMToVec16x16 (VPCMPW256 [5] x y))
+ // result: (VPMOVMToVec16x16 (VPCMPW256 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt16x32 x y)
- // result: (VPMOVMToVec16x32 (VPCMPW512 [5] x y))
+ // result: (VPMOVMToVec16x32 (VPCMPW512 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt16x8 x y)
- // result: (VPMOVMToVec16x8 (VPCMPW128 [5] x y))
+ // result: (VPMOVMToVec16x8 (VPCMPW128 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt32x16 x y)
- // result: (VPMOVMToVec32x16 (VPCMPD512 [5] x y))
+ // result: (VPMOVMToVec32x16 (VPCMPD512 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt32x4 x y)
- // result: (VPMOVMToVec32x4 (VPCMPD128 [5] x y))
+ // result: (VPMOVMToVec32x4 (VPCMPD128 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt32x8 x y)
- // result: (VPMOVMToVec32x8 (VPCMPD256 [5] x y))
+ // result: (VPMOVMToVec32x8 (VPCMPD256 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt64x2 x y)
- // result: (VPMOVMToVec64x2 (VPCMPQ128 [5] x y))
+ // result: (VPMOVMToVec64x2 (VPCMPQ128 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt64x4 x y)
- // result: (VPMOVMToVec64x4 (VPCMPQ256 [5] x y))
+ // result: (VPMOVMToVec64x4 (VPCMPQ256 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt64x8 x y)
- // result: (VPMOVMToVec64x8 (VPCMPQ512 [5] x y))
+ // result: (VPMOVMToVec64x8 (VPCMPQ512 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt8x16 x y)
- // result: (VPMOVMToVec8x16 (VPCMPB128 [5] x y))
+ // result: (VPMOVMToVec8x16 (VPCMPB128 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt8x32 x y)
- // result: (VPMOVMToVec8x32 (VPCMPB256 [5] x y))
+ // result: (VPMOVMToVec8x32 (VPCMPB256 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualInt8x64 x y)
- // result: (VPMOVMToVec8x64 (VPCMPB512 [5] x y))
+ // result: (VPMOVMToVec8x64 (VPCMPB512 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint16x16 x y)
- // result: (VPMOVMToVec16x16 (VPCMPUW256 [5] x y))
+ // result: (VPMOVMToVec16x16 (VPCMPUW256 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint16x32 x y)
- // result: (VPMOVMToVec16x32 (VPCMPUW512 [5] x y))
+ // result: (VPMOVMToVec16x32 (VPCMPUW512 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint16x8 x y)
- // result: (VPMOVMToVec16x8 (VPCMPUW128 [5] x y))
+ // result: (VPMOVMToVec16x8 (VPCMPUW128 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint32x16 x y)
- // result: (VPMOVMToVec32x16 (VPCMPUD512 [5] x y))
+ // result: (VPMOVMToVec32x16 (VPCMPUD512 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint32x4 x y)
- // result: (VPMOVMToVec32x4 (VPCMPUD128 [5] x y))
+ // result: (VPMOVMToVec32x4 (VPCMPUD128 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint32x8 x y)
- // result: (VPMOVMToVec32x8 (VPCMPUD256 [5] x y))
+ // result: (VPMOVMToVec32x8 (VPCMPUD256 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint64x2 x y)
- // result: (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y))
+ // result: (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint64x4 x y)
- // result: (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y))
+ // result: (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint64x8 x y)
- // result: (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y))
+ // result: (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint8x16 x y)
- // result: (VPMOVMToVec8x16 (VPCMPUB128 [5] x y))
+ // result: (VPMOVMToVec8x16 (VPCMPUB128 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint8x32 x y)
- // result: (VPMOVMToVec8x32 (VPCMPUB256 [5] x y))
+ // result: (VPMOVMToVec8x32 (VPCMPUB256 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterEqualUint8x64 x y)
- // result: (VPMOVMToVec8x64 (VPCMPUB512 [5] x y))
+ // result: (VPMOVMToVec8x64 (VPCMPUB512 [13] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterFloat32x16 x y)
- // result: (VPMOVMToVec32x16 (VCMPPS512 [6] x y))
+ // result: (VPMOVMToVec32x16 (VCMPPS512 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (GreaterFloat32x4 x y)
- // result: (VCMPPS128 [6] x y)
+ // result: (VCMPPS128 [14] x y)
for {
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS128)
- v.AuxInt = int8ToAuxInt(6)
+ v.AuxInt = int8ToAuxInt(14)
v.AddArg2(x, y)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (GreaterFloat32x8 x y)
- // result: (VCMPPS256 [6] x y)
+ // result: (VCMPPS256 [14] x y)
for {
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS256)
- v.AuxInt = int8ToAuxInt(6)
+ v.AuxInt = int8ToAuxInt(14)
v.AddArg2(x, y)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (GreaterFloat64x2 x y)
- // result: (VCMPPD128 [6] x y)
+ // result: (VCMPPD128 [14] x y)
for {
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD128)
- v.AuxInt = int8ToAuxInt(6)
+ v.AuxInt = int8ToAuxInt(14)
v.AddArg2(x, y)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (GreaterFloat64x4 x y)
- // result: (VCMPPD256 [6] x y)
+ // result: (VCMPPD256 [14] x y)
for {
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD256)
- v.AuxInt = int8ToAuxInt(6)
+ v.AuxInt = int8ToAuxInt(14)
v.AddArg2(x, y)
return true
}
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterFloat64x8 x y)
- // result: (VPMOVMToVec64x8 (VCMPPD512 [6] x y))
+ // result: (VPMOVMToVec64x8 (VCMPPD512 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterInt16x32 x y)
- // result: (VPMOVMToVec16x32 (VPCMPW512 [6] x y))
+ // result: (VPMOVMToVec16x32 (VPCMPW512 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterInt32x16 x y)
- // result: (VPMOVMToVec32x16 (VPCMPD512 [6] x y))
+ // result: (VPMOVMToVec32x16 (VPCMPD512 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterInt64x2 x y)
- // result: (VPMOVMToVec64x2 (VPCMPQ128 [6] x y))
+ // result: (VPMOVMToVec64x2 (VPCMPQ128 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterInt64x8 x y)
- // result: (VPMOVMToVec64x8 (VPCMPQ512 [6] x y))
+ // result: (VPMOVMToVec64x8 (VPCMPQ512 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterInt8x64 x y)
- // result: (VPMOVMToVec8x64 (VPCMPB512 [6] x y))
+ // result: (VPMOVMToVec8x64 (VPCMPB512 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint16x16 x y)
- // result: (VPMOVMToVec16x16 (VPCMPUW256 [6] x y))
+ // result: (VPMOVMToVec16x16 (VPCMPUW256 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint16x32 x y)
- // result: (VPMOVMToVec16x32 (VPCMPUW512 [6] x y))
+ // result: (VPMOVMToVec16x32 (VPCMPUW512 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint16x8 x y)
- // result: (VPMOVMToVec16x8 (VPCMPUW128 [6] x y))
+ // result: (VPMOVMToVec16x8 (VPCMPUW128 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint32x16 x y)
- // result: (VPMOVMToVec32x16 (VPCMPUD512 [6] x y))
+ // result: (VPMOVMToVec32x16 (VPCMPUD512 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint32x4 x y)
- // result: (VPMOVMToVec32x4 (VPCMPUD128 [6] x y))
+ // result: (VPMOVMToVec32x4 (VPCMPUD128 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint32x8 x y)
- // result: (VPMOVMToVec32x8 (VPCMPUD256 [6] x y))
+ // result: (VPMOVMToVec32x8 (VPCMPUD256 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint64x2 x y)
- // result: (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y))
+ // result: (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint64x4 x y)
- // result: (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y))
+ // result: (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint64x8 x y)
- // result: (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y))
+ // result: (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint8x16 x y)
- // result: (VPMOVMToVec8x16 (VPCMPUB128 [6] x y))
+ // result: (VPMOVMToVec8x16 (VPCMPUB128 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint8x32 x y)
- // result: (VPMOVMToVec8x32 (VPCMPUB256 [6] x y))
+ // result: (VPMOVMToVec8x32 (VPCMPUB256 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (GreaterUint8x64 x y)
- // result: (VPMOVMToVec8x64 (VPCMPUB512 [6] x y))
+ // result: (VPMOVMToVec8x64 (VPCMPUB512 [14] x y))
for {
x := v_0
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualFloat32x16 x y mask)
- // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualFloat32x4 x y mask)
- // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualFloat32x8 x y mask)
- // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualFloat64x2 x y mask)
- // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualFloat64x4 x y mask)
- // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualFloat64x8 x y mask)
- // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt16x16 x y mask)
- // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt16x32 x y mask)
- // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt16x8 x y mask)
- // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt32x16 x y mask)
- // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt32x4 x y mask)
- // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt32x8 x y mask)
- // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt64x2 x y mask)
- // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt64x4 x y mask)
- // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt64x8 x y mask)
- // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt8x16 x y mask)
- // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt8x32 x y mask)
- // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualInt8x64 x y mask)
- // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint16x16 x y mask)
- // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint16x32 x y mask)
- // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint16x8 x y mask)
- // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint32x16 x y mask)
- // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint32x4 x y mask)
- // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint32x8 x y mask)
- // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint64x2 x y mask)
- // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint64x4 x y mask)
- // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint64x8 x y mask)
- // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint8x16 x y mask)
- // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint8x32 x y mask)
- // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterEqualUint8x64 x y mask)
- // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(5)
+ v0.AuxInt = int8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterFloat32x16 x y mask)
- // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterFloat32x4 x y mask)
- // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterFloat32x8 x y mask)
- // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterFloat64x2 x y mask)
- // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterFloat64x4 x y mask)
- // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterFloat64x8 x y mask)
- // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt16x16 x y mask)
- // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt16x32 x y mask)
- // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt16x8 x y mask)
- // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt32x16 x y mask)
- // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt32x4 x y mask)
- // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt32x8 x y mask)
- // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt64x2 x y mask)
- // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt64x4 x y mask)
- // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt64x8 x y mask)
- // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt8x16 x y mask)
- // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt8x32 x y mask)
- // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterInt8x64 x y mask)
- // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint16x16 x y mask)
- // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint16x32 x y mask)
- // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint16x8 x y mask)
- // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint32x16 x y mask)
- // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint32x4 x y mask)
- // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint32x8 x y mask)
- // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint64x2 x y mask)
- // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint64x4 x y mask)
- // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint64x8 x y mask)
- // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint8x16 x y mask)
- // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint8x32 x y mask)
- // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (MaskedGreaterUint8x64 x y mask)
- // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
+ // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM <types.TypeMask> mask)))
for {
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(6)
+ v0.AuxInt = int8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)