// Example instruction: VROUNDPD $7, X2, X2
func simdV11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog {
p := s.Prog(v.Op.Asm())
- imm := v.AuxInt
- if imm < 0 || imm > 255 {
- v.Fatalf("Invalid source selection immediate")
- }
- p.From.Offset = imm
+ p.From.Offset = int64(v.AuxUInt8())
p.From.Type = obj.TYPE_CONST
p.AddRestSourceReg(simdReg(v.Args[0]))
p.To.Type = obj.TYPE_REG
// Example instruction: VREDUCEPD $126, X1, K3, X31
func simdVkvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog {
p := s.Prog(v.Op.Asm())
- imm := v.AuxInt
- if imm < 0 || imm > 255 {
- v.Fatalf("Invalid source selection immediate")
- }
- p.From.Offset = imm
+ p.From.Offset = int64(v.AuxUInt8())
p.From.Type = obj.TYPE_CONST
p.AddRestSourceReg(simdReg(v.Args[0]))
p.AddRestSourceReg(maskReg(v.Args[1]))
// Example instruction: VCMPPS $7, X2, X9, X2
func simdV21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog {
p := s.Prog(v.Op.Asm())
- imm := v.AuxInt
- if imm < 0 || imm > 255 {
- v.Fatalf("Invalid source selection immediate")
- }
- p.From.Offset = imm
+ p.From.Offset = int64(v.AuxUInt8())
p.From.Type = obj.TYPE_CONST
p.AddRestSourceReg(simdReg(v.Args[1]))
p.AddRestSourceReg(simdReg(v.Args[0]))
// Example instruction: VPINSRB $3, DX, X0, X0
func simdVgpvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog {
p := s.Prog(v.Op.Asm())
- imm := v.AuxInt
- if imm < 0 || imm > 255 {
- v.Fatalf("Invalid source selection immediate")
- }
- p.From.Offset = imm
+ p.From.Offset = int64(v.AuxUInt8())
p.From.Type = obj.TYPE_CONST
p.AddRestSourceReg(v.Args[1].Reg())
p.AddRestSourceReg(simdReg(v.Args[0]))
// Example instruction: VPCMPD $1, Z1, Z2, K1
func simdV2kImm8(s *ssagen.State, v *ssa.Value) *obj.Prog {
p := s.Prog(v.Op.Asm())
- imm := v.AuxInt
- if imm < 0 || imm > 255 {
- v.Fatalf("Invalid source selection immediate")
- }
- p.From.Offset = imm
+ p.From.Offset = int64(v.AuxUInt8())
p.From.Type = obj.TYPE_CONST
p.AddRestSourceReg(simdReg(v.Args[1]))
p.AddRestSourceReg(simdReg(v.Args[0]))
// Example instruction: VPCMPD $1, Z1, Z2, K2, K1
func simdV2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog {
p := s.Prog(v.Op.Asm())
- imm := v.AuxInt
- if imm < 0 || imm > 255 {
- v.Fatalf("Invalid source selection immediate")
- }
- p.From.Offset = imm
+ p.From.Offset = int64(v.AuxUInt8())
p.From.Type = obj.TYPE_CONST
p.AddRestSourceReg(simdReg(v.Args[1]))
p.AddRestSourceReg(simdReg(v.Args[0]))
}
func simdV2kvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog {
- return simdV2kkImm8(s, v)
+ p := s.Prog(v.Op.Asm())
+ p.From.Offset = int64(v.AuxUInt8())
+ p.From.Type = obj.TYPE_CONST
+ p.AddRestSourceReg(simdReg(v.Args[1]))
+ p.AddRestSourceReg(simdReg(v.Args[0]))
+ p.AddRestSourceReg(maskReg(v.Args[2]))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = simdReg(v)
+ return p
}
// Example instruction: VFMADD213PD Z2, Z1, Z0
func simdVgpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog {
p := s.Prog(v.Op.Asm())
- imm := v.AuxInt
- if imm < 0 || imm > 255 {
- v.Fatalf("Invalid source selection immediate")
- }
- p.From.Offset = imm
+ p.From.Offset = int64(v.AuxUInt8())
p.From.Type = obj.TYPE_CONST
p.AddRestSourceReg(simdReg(v.Args[0]))
p.To.Type = obj.TYPE_REG
(SetLoUint32x16 x y) => (VINSERTI64X4512 [0] x y)
(SetLoUint64x4 x y) => (VINSERTI128256 [0] x y)
(SetLoUint64x8 x y) => (VINSERTI64X4512 [0] x y)
-(ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x)
+(ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x)
(ShiftAllLeftInt16x8 x y) => (VPSLLW128 x y)
-(ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x)
+(ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x)
(ShiftAllLeftInt16x16 x y) => (VPSLLW256 x y)
-(ShiftAllLeftInt16x32 x (MOVQconst [c])) => (VPSLLW512const [int8(c)] x)
+(ShiftAllLeftInt16x32 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x)
(ShiftAllLeftInt16x32 x y) => (VPSLLW512 x y)
-(ShiftAllLeftInt32x4 x (MOVQconst [c])) => (VPSLLD128const [int8(c)] x)
+(ShiftAllLeftInt32x4 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x)
(ShiftAllLeftInt32x4 x y) => (VPSLLD128 x y)
-(ShiftAllLeftInt32x8 x (MOVQconst [c])) => (VPSLLD256const [int8(c)] x)
+(ShiftAllLeftInt32x8 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x)
(ShiftAllLeftInt32x8 x y) => (VPSLLD256 x y)
-(ShiftAllLeftInt32x16 x (MOVQconst [c])) => (VPSLLD512const [int8(c)] x)
+(ShiftAllLeftInt32x16 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x)
(ShiftAllLeftInt32x16 x y) => (VPSLLD512 x y)
-(ShiftAllLeftInt64x2 x (MOVQconst [c])) => (VPSLLQ128const [int8(c)] x)
+(ShiftAllLeftInt64x2 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x)
(ShiftAllLeftInt64x2 x y) => (VPSLLQ128 x y)
-(ShiftAllLeftInt64x4 x (MOVQconst [c])) => (VPSLLQ256const [int8(c)] x)
+(ShiftAllLeftInt64x4 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x)
(ShiftAllLeftInt64x4 x y) => (VPSLLQ256 x y)
-(ShiftAllLeftInt64x8 x (MOVQconst [c])) => (VPSLLQ512const [int8(c)] x)
+(ShiftAllLeftInt64x8 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x)
(ShiftAllLeftInt64x8 x y) => (VPSLLQ512 x y)
-(ShiftAllLeftUint16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x)
+(ShiftAllLeftUint16x8 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x)
(ShiftAllLeftUint16x8 x y) => (VPSLLW128 x y)
-(ShiftAllLeftUint16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x)
+(ShiftAllLeftUint16x16 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x)
(ShiftAllLeftUint16x16 x y) => (VPSLLW256 x y)
-(ShiftAllLeftUint16x32 x (MOVQconst [c])) => (VPSLLW512const [int8(c)] x)
+(ShiftAllLeftUint16x32 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x)
(ShiftAllLeftUint16x32 x y) => (VPSLLW512 x y)
-(ShiftAllLeftUint32x4 x (MOVQconst [c])) => (VPSLLD128const [int8(c)] x)
+(ShiftAllLeftUint32x4 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x)
(ShiftAllLeftUint32x4 x y) => (VPSLLD128 x y)
-(ShiftAllLeftUint32x8 x (MOVQconst [c])) => (VPSLLD256const [int8(c)] x)
+(ShiftAllLeftUint32x8 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x)
(ShiftAllLeftUint32x8 x y) => (VPSLLD256 x y)
-(ShiftAllLeftUint32x16 x (MOVQconst [c])) => (VPSLLD512const [int8(c)] x)
+(ShiftAllLeftUint32x16 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x)
(ShiftAllLeftUint32x16 x y) => (VPSLLD512 x y)
-(ShiftAllLeftUint64x2 x (MOVQconst [c])) => (VPSLLQ128const [int8(c)] x)
+(ShiftAllLeftUint64x2 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x)
(ShiftAllLeftUint64x2 x y) => (VPSLLQ128 x y)
-(ShiftAllLeftUint64x4 x (MOVQconst [c])) => (VPSLLQ256const [int8(c)] x)
+(ShiftAllLeftUint64x4 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x)
(ShiftAllLeftUint64x4 x y) => (VPSLLQ256 x y)
-(ShiftAllLeftUint64x8 x (MOVQconst [c])) => (VPSLLQ512const [int8(c)] x)
+(ShiftAllLeftUint64x8 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x)
(ShiftAllLeftUint64x8 x y) => (VPSLLQ512 x y)
(ShiftAllLeftConcatInt16x8 ...) => (VPSHLDW128 ...)
(ShiftAllLeftConcatInt16x16 ...) => (VPSHLDW256 ...)
(ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM <types.TypeMask> mask))
(ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM <types.TypeMask> mask))
(ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedInt32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedInt32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedInt32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedUint16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedUint16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedUint16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedUint32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedUint32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedUint32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
-(ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
+(ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
-(ShiftAllRightInt16x8 x (MOVQconst [c])) => (VPSRAW128const [int8(c)] x)
+(ShiftAllRightInt16x8 x (MOVQconst [c])) => (VPSRAW128const [uint8(c)] x)
(ShiftAllRightInt16x8 x y) => (VPSRAW128 x y)
-(ShiftAllRightInt16x16 x (MOVQconst [c])) => (VPSRAW256const [int8(c)] x)
+(ShiftAllRightInt16x16 x (MOVQconst [c])) => (VPSRAW256const [uint8(c)] x)
(ShiftAllRightInt16x16 x y) => (VPSRAW256 x y)
-(ShiftAllRightInt16x32 x (MOVQconst [c])) => (VPSRAW512const [int8(c)] x)
+(ShiftAllRightInt16x32 x (MOVQconst [c])) => (VPSRAW512const [uint8(c)] x)
(ShiftAllRightInt16x32 x y) => (VPSRAW512 x y)
-(ShiftAllRightInt32x4 x (MOVQconst [c])) => (VPSRAD128const [int8(c)] x)
+(ShiftAllRightInt32x4 x (MOVQconst [c])) => (VPSRAD128const [uint8(c)] x)
(ShiftAllRightInt32x4 x y) => (VPSRAD128 x y)
-(ShiftAllRightInt32x8 x (MOVQconst [c])) => (VPSRAD256const [int8(c)] x)
+(ShiftAllRightInt32x8 x (MOVQconst [c])) => (VPSRAD256const [uint8(c)] x)
(ShiftAllRightInt32x8 x y) => (VPSRAD256 x y)
-(ShiftAllRightInt32x16 x (MOVQconst [c])) => (VPSRAD512const [int8(c)] x)
+(ShiftAllRightInt32x16 x (MOVQconst [c])) => (VPSRAD512const [uint8(c)] x)
(ShiftAllRightInt32x16 x y) => (VPSRAD512 x y)
-(ShiftAllRightInt64x2 x (MOVQconst [c])) => (VPSRAQ128const [int8(c)] x)
+(ShiftAllRightInt64x2 x (MOVQconst [c])) => (VPSRAQ128const [uint8(c)] x)
(ShiftAllRightInt64x2 x y) => (VPSRAQ128 x y)
-(ShiftAllRightInt64x4 x (MOVQconst [c])) => (VPSRAQ256const [int8(c)] x)
+(ShiftAllRightInt64x4 x (MOVQconst [c])) => (VPSRAQ256const [uint8(c)] x)
(ShiftAllRightInt64x4 x y) => (VPSRAQ256 x y)
-(ShiftAllRightInt64x8 x (MOVQconst [c])) => (VPSRAQ512const [int8(c)] x)
+(ShiftAllRightInt64x8 x (MOVQconst [c])) => (VPSRAQ512const [uint8(c)] x)
(ShiftAllRightInt64x8 x y) => (VPSRAQ512 x y)
-(ShiftAllRightUint16x8 x (MOVQconst [c])) => (VPSRLW128const [int8(c)] x)
+(ShiftAllRightUint16x8 x (MOVQconst [c])) => (VPSRLW128const [uint8(c)] x)
(ShiftAllRightUint16x8 x y) => (VPSRLW128 x y)
-(ShiftAllRightUint16x16 x (MOVQconst [c])) => (VPSRLW256const [int8(c)] x)
+(ShiftAllRightUint16x16 x (MOVQconst [c])) => (VPSRLW256const [uint8(c)] x)
(ShiftAllRightUint16x16 x y) => (VPSRLW256 x y)
-(ShiftAllRightUint16x32 x (MOVQconst [c])) => (VPSRLW512const [int8(c)] x)
+(ShiftAllRightUint16x32 x (MOVQconst [c])) => (VPSRLW512const [uint8(c)] x)
(ShiftAllRightUint16x32 x y) => (VPSRLW512 x y)
-(ShiftAllRightUint32x4 x (MOVQconst [c])) => (VPSRLD128const [int8(c)] x)
+(ShiftAllRightUint32x4 x (MOVQconst [c])) => (VPSRLD128const [uint8(c)] x)
(ShiftAllRightUint32x4 x y) => (VPSRLD128 x y)
-(ShiftAllRightUint32x8 x (MOVQconst [c])) => (VPSRLD256const [int8(c)] x)
+(ShiftAllRightUint32x8 x (MOVQconst [c])) => (VPSRLD256const [uint8(c)] x)
(ShiftAllRightUint32x8 x y) => (VPSRLD256 x y)
-(ShiftAllRightUint32x16 x (MOVQconst [c])) => (VPSRLD512const [int8(c)] x)
+(ShiftAllRightUint32x16 x (MOVQconst [c])) => (VPSRLD512const [uint8(c)] x)
(ShiftAllRightUint32x16 x y) => (VPSRLD512 x y)
-(ShiftAllRightUint64x2 x (MOVQconst [c])) => (VPSRLQ128const [int8(c)] x)
+(ShiftAllRightUint64x2 x (MOVQconst [c])) => (VPSRLQ128const [uint8(c)] x)
(ShiftAllRightUint64x2 x y) => (VPSRLQ128 x y)
-(ShiftAllRightUint64x4 x (MOVQconst [c])) => (VPSRLQ256const [int8(c)] x)
+(ShiftAllRightUint64x4 x (MOVQconst [c])) => (VPSRLQ256const [uint8(c)] x)
(ShiftAllRightUint64x4 x y) => (VPSRLQ256 x y)
-(ShiftAllRightUint64x8 x (MOVQconst [c])) => (VPSRLQ512const [int8(c)] x)
+(ShiftAllRightUint64x8 x (MOVQconst [c])) => (VPSRLQ512const [uint8(c)] x)
(ShiftAllRightUint64x8 x y) => (VPSRLQ512 x y)
(ShiftAllRightConcatInt16x8 ...) => (VPSHRDW128 ...)
(ShiftAllRightConcatInt16x16 ...) => (VPSHRDW256 ...)
(ShiftAllRightConcatMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM <types.TypeMask> mask))
(ShiftAllRightConcatMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM <types.TypeMask> mask))
(ShiftAllRightConcatMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [int8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [uint8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
(ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [int8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [uint8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
(ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [int8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [uint8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
(ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSRADMasked128const [int8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSRADMasked128const [uint8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(ShiftAllRightMaskedInt32x4 x y mask) => (VPSRADMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSRADMasked256const [int8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSRADMasked256const [uint8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(ShiftAllRightMaskedInt32x8 x y mask) => (VPSRADMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSRADMasked512const [int8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSRADMasked512const [uint8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(ShiftAllRightMaskedInt32x16 x y mask) => (VPSRADMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [int8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [uint8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
(ShiftAllRightMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [int8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [uint8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
(ShiftAllRightMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [int8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [uint8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(ShiftAllRightMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSRLWMasked128const [int8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSRLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
(ShiftAllRightMaskedUint16x8 x y mask) => (VPSRLWMasked128 x y (VPMOVVec16x8ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSRLWMasked256const [int8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSRLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
(ShiftAllRightMaskedUint16x16 x y mask) => (VPSRLWMasked256 x y (VPMOVVec16x16ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSRLWMasked512const [int8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSRLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
(ShiftAllRightMaskedUint16x32 x y mask) => (VPSRLWMasked512 x y (VPMOVVec16x32ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSRLDMasked128const [int8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSRLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
(ShiftAllRightMaskedUint32x4 x y mask) => (VPSRLDMasked128 x y (VPMOVVec32x4ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSRLDMasked256const [int8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSRLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
(ShiftAllRightMaskedUint32x8 x y mask) => (VPSRLDMasked256 x y (VPMOVVec32x8ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSRLDMasked512const [int8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSRLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
(ShiftAllRightMaskedUint32x16 x y mask) => (VPSRLDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSRLQMasked128const [int8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSRLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
(ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSRLQMasked256const [int8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSRLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
(ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM <types.TypeMask> mask))
-(ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSRLQMasked512const [int8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
+(ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSRLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
(ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
(ShiftLeftInt16x8 ...) => (VPSLLVW128 ...)
(ShiftLeftInt16x16 ...) => (VPSLLVW256 ...)
{name: "VSUBPSMasked128", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false},
{name: "VSUBPSMasked256", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false},
{name: "VSUBPSMasked512", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false},
- {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false},
- {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false},
- {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false},
- {name: "VCMPPD512", argLength: 2, reg: w2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VCMPPDMasked512", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false},
- {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false},
- {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false},
- {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false},
- {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false},
- {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false},
- {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VINSERTI64X4512", argLength: 2, reg: w21, asm: "VINSERTI64X4", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSLLW128const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSLLW256const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSLLW512const", argLength: 1, reg: w11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSLLD128const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSLLD256const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSLLD512const", argLength: 1, reg: w11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSLLQ128const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSLLQ256const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSLLQ512const", argLength: 1, reg: w11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSLLWMasked128const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSLLWMasked256const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSLLWMasked512const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSLLDMasked128const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSLLDMasked256const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSLLDMasked512const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSLLQMasked128const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSLLQMasked256const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSLLQMasked512const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRLW128const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRLW256const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRLW512const", argLength: 1, reg: w11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRLD128const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRLD256const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRLD512const", argLength: 1, reg: w11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRLQ128const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRLQ256const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRLQ512const", argLength: 1, reg: w11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRAW128const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRAW256const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRAW512const", argLength: 1, reg: w11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRAD128const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRAD256const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRAD512const", argLength: 1, reg: w11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRAQ128const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRAQ256const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRAQ512const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRLWMasked128const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRLWMasked256const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRLWMasked512const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRLDMasked128const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRLDMasked256const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRLDMasked512const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRLQMasked128const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRLQMasked256const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRLQMasked512const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRAWMasked128const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRAWMasked256const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRAWMasked512const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRADMasked128const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRADMasked256const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRADMasked512const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
- {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false},
- {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false},
- {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Vec128", resultInArg0: false},
+ {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Vec256", resultInArg0: false},
+ {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Vec128", resultInArg0: false},
+ {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Vec256", resultInArg0: false},
+ {name: "VCMPPD512", argLength: 2, reg: w2k, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VCMPPDMasked512", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false},
+ {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "UInt8", commutative: false, typ: "int8", resultInArg0: false},
+ {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "UInt8", commutative: false, typ: "int16", resultInArg0: false},
+ {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "UInt8", commutative: false, typ: "int32", resultInArg0: false},
+ {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "UInt8", commutative: false, typ: "int64", resultInArg0: false},
+ {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false},
+ {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VINSERTI64X4512", argLength: 2, reg: w21, asm: "VINSERTI64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSLLW128const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSLLW256const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSLLW512const", argLength: 1, reg: w11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSLLD128const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSLLD256const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSLLD512const", argLength: 1, reg: w11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSLLQ128const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSLLQ256const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSLLQ512const", argLength: 1, reg: w11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSLLWMasked128const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSLLWMasked256const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSLLWMasked512const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSLLDMasked128const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSLLDMasked256const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSLLDMasked512const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSLLQMasked128const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSLLQMasked256const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSLLQMasked512const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRLW128const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRLW256const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRLW512const", argLength: 1, reg: w11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRLD128const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRLD256const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRLD512const", argLength: 1, reg: w11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRLQ128const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRLQ256const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRLQ512const", argLength: 1, reg: w11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRAW128const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRAW256const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRAW512const", argLength: 1, reg: w11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRAD128const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRAD256const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRAD512const", argLength: 1, reg: w11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRAQ128const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRAQ256const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRAQ512const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRLWMasked128const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRLWMasked256const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRLWMasked512const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRLDMasked128const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRLDMasked256const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRLDMasked512const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRLQMasked128const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRLQMasked256const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRLQMasked512const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRAWMasked128const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRAWMasked256const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRAWMasked512const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRADMasked128const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRADMasked256const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRADMasked512const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
+ {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false},
+ {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false},
+ {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false},
}
}
{name: "blendMaskedInt16x32", argLength: 3, commutative: false},
{name: "blendMaskedInt32x16", argLength: 3, commutative: false},
{name: "blendMaskedInt64x8", argLength: 3, commutative: false},
- {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "CeilScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "CeilScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "CeilScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "CeilScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "CeilScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "CeilScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "FloorScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"},
- {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"},
- {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "RoundToEvenScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatInt16x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatInt16x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatInt16x32", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatInt32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatInt32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatInt32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatInt64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatInt64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatInt64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatUint16x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatUint16x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatUint16x32", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatUint32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatUint32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatUint32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllLeftConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatInt16x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatInt16x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatInt16x32", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatInt32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatInt32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatInt32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatInt64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatInt64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatInt64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatUint16x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatUint16x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatUint16x32", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatUint32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatUint32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatUint32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "ShiftAllRightConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"},
- {name: "TruncScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"},
+ {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "CeilScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "FloorScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "RoundToEvenScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatInt16x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatInt16x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatInt16x32", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatInt32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatInt32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatInt32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatInt64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatInt64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatInt64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatUint16x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatUint16x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatUint16x32", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatUint32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatUint32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatUint32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatUint64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatUint64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllLeftConcatUint64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatInt16x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatInt16x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatInt16x32", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatInt32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatInt32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatInt32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatInt64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatInt64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatInt64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatUint16x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatUint16x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatUint16x32", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatUint32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatUint32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatUint32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatUint64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatUint64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "ShiftAllRightConcatUint64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"},
+ {name: "TruncScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"},
}
}
case auxInt128:
// AuxInt must be zero, so leave canHaveAuxInt set to false.
case auxUInt8:
- if v.AuxInt != int64(uint8(v.AuxInt)) {
+ // Cast to int8 due to requirement of AuxInt, check its comment for details.
+ if v.AuxInt != int64(int8(v.AuxInt)) {
f.Fatalf("bad uint8 AuxInt value for %v", v)
}
canHaveAuxInt = true
},
{
name: "VROUNDPS128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVROUNDPS,
reg: regInfo{
},
{
name: "VROUNDPS256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVROUNDPS,
reg: regInfo{
},
{
name: "VROUNDPD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVROUNDPD,
reg: regInfo{
},
{
name: "VROUNDPD256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVROUNDPD,
reg: regInfo{
},
{
name: "VRNDSCALEPS128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVRNDSCALEPS,
reg: regInfo{
},
{
name: "VRNDSCALEPS256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVRNDSCALEPS,
reg: regInfo{
},
{
name: "VRNDSCALEPS512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVRNDSCALEPS,
reg: regInfo{
},
{
name: "VRNDSCALEPD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVRNDSCALEPD,
reg: regInfo{
},
{
name: "VRNDSCALEPD256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVRNDSCALEPD,
reg: regInfo{
},
{
name: "VRNDSCALEPD512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVRNDSCALEPD,
reg: regInfo{
},
{
name: "VRNDSCALEPSMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVRNDSCALEPS,
reg: regInfo{
},
{
name: "VRNDSCALEPSMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVRNDSCALEPS,
reg: regInfo{
},
{
name: "VRNDSCALEPSMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVRNDSCALEPS,
reg: regInfo{
},
{
name: "VRNDSCALEPDMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVRNDSCALEPD,
reg: regInfo{
},
{
name: "VRNDSCALEPDMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVRNDSCALEPD,
reg: regInfo{
},
{
name: "VRNDSCALEPDMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVRNDSCALEPD,
reg: regInfo{
},
{
name: "VREDUCEPS128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVREDUCEPS,
reg: regInfo{
},
{
name: "VREDUCEPS256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVREDUCEPS,
reg: regInfo{
},
{
name: "VREDUCEPS512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVREDUCEPS,
reg: regInfo{
},
{
name: "VREDUCEPD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVREDUCEPD,
reg: regInfo{
},
{
name: "VREDUCEPD256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVREDUCEPD,
reg: regInfo{
},
{
name: "VREDUCEPD512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVREDUCEPD,
reg: regInfo{
},
{
name: "VREDUCEPSMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVREDUCEPS,
reg: regInfo{
},
{
name: "VREDUCEPSMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVREDUCEPS,
reg: regInfo{
},
{
name: "VREDUCEPSMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVREDUCEPS,
reg: regInfo{
},
{
name: "VREDUCEPDMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVREDUCEPD,
reg: regInfo{
},
{
name: "VREDUCEPDMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVREDUCEPD,
reg: regInfo{
},
{
name: "VREDUCEPDMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVREDUCEPD,
reg: regInfo{
},
{
name: "VCMPPS128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
commutative: true,
asm: x86.AVCMPPS,
},
{
name: "VCMPPS256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
commutative: true,
asm: x86.AVCMPPS,
},
{
name: "VCMPPS512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
commutative: true,
asm: x86.AVCMPPS,
},
{
name: "VCMPPD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
commutative: true,
asm: x86.AVCMPPD,
},
{
name: "VCMPPD256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
commutative: true,
asm: x86.AVCMPPD,
},
{
name: "VCMPPD512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
commutative: true,
asm: x86.AVCMPPD,
},
{
name: "VCMPPSMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVCMPPS,
},
{
name: "VCMPPSMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVCMPPS,
},
{
name: "VCMPPSMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVCMPPS,
},
{
name: "VCMPPDMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVCMPPD,
},
{
name: "VCMPPDMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVCMPPD,
},
{
name: "VCMPPDMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVCMPPD,
},
{
name: "VPCMPBMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPB,
},
{
name: "VPCMPBMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPB,
},
{
name: "VPCMPBMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPB,
},
{
name: "VPCMPWMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPW,
},
{
name: "VPCMPWMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPW,
},
{
name: "VPCMPWMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPW,
},
{
name: "VPCMPDMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPD,
},
{
name: "VPCMPDMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPD,
},
{
name: "VPCMPDMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPD,
},
{
name: "VPCMPQMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPQ,
},
{
name: "VPCMPQMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPQ,
},
{
name: "VPCMPQMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPQ,
},
{
name: "VPCMPUBMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUB,
},
{
name: "VPCMPUBMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUB,
},
{
name: "VPCMPUBMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUB,
},
{
name: "VPCMPUWMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUW,
},
{
name: "VPCMPUWMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUW,
},
{
name: "VPCMPUWMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUW,
},
{
name: "VPCMPUDMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUD,
},
{
name: "VPCMPUDMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUD,
},
{
name: "VPCMPUDMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUD,
},
{
name: "VPCMPUQMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUQ,
},
{
name: "VPCMPUQMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUQ,
},
{
name: "VPCMPUQMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
commutative: true,
asm: x86.AVPCMPUQ,
},
{
name: "VGF2P8AFFINEQB128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVGF2P8AFFINEQB,
reg: regInfo{
},
{
name: "VGF2P8AFFINEQB256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVGF2P8AFFINEQB,
reg: regInfo{
},
{
name: "VGF2P8AFFINEQB512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVGF2P8AFFINEQB,
reg: regInfo{
},
{
name: "VGF2P8AFFINEINVQB128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVGF2P8AFFINEINVQB,
reg: regInfo{
},
{
name: "VGF2P8AFFINEINVQB256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVGF2P8AFFINEINVQB,
reg: regInfo{
},
{
name: "VGF2P8AFFINEINVQB512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVGF2P8AFFINEINVQB,
reg: regInfo{
},
{
name: "VGF2P8AFFINEINVQBMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVGF2P8AFFINEINVQB,
reg: regInfo{
},
{
name: "VGF2P8AFFINEINVQBMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVGF2P8AFFINEINVQB,
reg: regInfo{
},
{
name: "VGF2P8AFFINEINVQBMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVGF2P8AFFINEINVQB,
reg: regInfo{
},
{
name: "VGF2P8AFFINEQBMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVGF2P8AFFINEQB,
reg: regInfo{
},
{
name: "VGF2P8AFFINEQBMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVGF2P8AFFINEQB,
reg: regInfo{
},
{
name: "VGF2P8AFFINEQBMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVGF2P8AFFINEQB,
reg: regInfo{
},
{
name: "VPEXTRB128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPEXTRB,
reg: regInfo{
},
{
name: "VPEXTRW128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPEXTRW,
reg: regInfo{
},
{
name: "VPEXTRD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPEXTRD,
reg: regInfo{
},
{
name: "VPEXTRQ128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPEXTRQ,
reg: regInfo{
},
{
name: "VEXTRACTF128128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVEXTRACTF128,
reg: regInfo{
},
{
name: "VEXTRACTF64X4256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVEXTRACTF64X4,
reg: regInfo{
},
{
name: "VEXTRACTI128128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVEXTRACTI128,
reg: regInfo{
},
{
name: "VEXTRACTI64X4256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVEXTRACTI64X4,
reg: regInfo{
},
{
name: "VPCMPUB128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUB,
reg: regInfo{
},
{
name: "VPCMPUB256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUB,
reg: regInfo{
},
{
name: "VPCMPUB512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUB,
reg: regInfo{
},
{
name: "VPCMPUW128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUW,
reg: regInfo{
},
{
name: "VPCMPUW256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUW,
reg: regInfo{
},
{
name: "VPCMPUW512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUW,
reg: regInfo{
},
{
name: "VPCMPUD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUD,
reg: regInfo{
},
{
name: "VPCMPUD256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUD,
reg: regInfo{
},
{
name: "VPCMPUD512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUD,
reg: regInfo{
},
{
name: "VPCMPUQ128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUQ,
reg: regInfo{
},
{
name: "VPCMPUQ256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUQ,
reg: regInfo{
},
{
name: "VPCMPUQ512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPUQ,
reg: regInfo{
},
{
name: "VPCMPB128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPB,
reg: regInfo{
},
{
name: "VPCMPB256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPB,
reg: regInfo{
},
{
name: "VPCMPB512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPB,
reg: regInfo{
},
{
name: "VPCMPW128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPW,
reg: regInfo{
},
{
name: "VPCMPW256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPW,
reg: regInfo{
},
{
name: "VPCMPW512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPW,
reg: regInfo{
},
{
name: "VPCMPD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPD,
reg: regInfo{
},
{
name: "VPCMPD256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPD,
reg: regInfo{
},
{
name: "VPCMPD512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPD,
reg: regInfo{
},
{
name: "VPCMPQ128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPQ,
reg: regInfo{
},
{
name: "VPCMPQ256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPQ,
reg: regInfo{
},
{
name: "VPCMPQ512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPCMPQ,
reg: regInfo{
},
{
name: "VPROLD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPROLD,
reg: regInfo{
},
{
name: "VPROLD256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPROLD,
reg: regInfo{
},
{
name: "VPROLD512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPROLD,
reg: regInfo{
},
{
name: "VPROLQ128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPROLQ,
reg: regInfo{
},
{
name: "VPROLQ256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPROLQ,
reg: regInfo{
},
{
name: "VPROLQ512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPROLQ,
reg: regInfo{
},
{
name: "VPROLDMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPROLD,
reg: regInfo{
},
{
name: "VPROLDMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPROLD,
reg: regInfo{
},
{
name: "VPROLDMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPROLD,
reg: regInfo{
},
{
name: "VPROLQMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPROLQ,
reg: regInfo{
},
{
name: "VPROLQMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPROLQ,
reg: regInfo{
},
{
name: "VPROLQMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPROLQ,
reg: regInfo{
},
{
name: "VPRORD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPRORD,
reg: regInfo{
},
{
name: "VPRORD256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPRORD,
reg: regInfo{
},
{
name: "VPRORD512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPRORD,
reg: regInfo{
},
{
name: "VPRORQ128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPRORQ,
reg: regInfo{
},
{
name: "VPRORQ256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPRORQ,
reg: regInfo{
},
{
name: "VPRORQ512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPRORQ,
reg: regInfo{
},
{
name: "VPRORDMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPRORD,
reg: regInfo{
},
{
name: "VPRORDMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPRORD,
reg: regInfo{
},
{
name: "VPRORDMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPRORD,
reg: regInfo{
},
{
name: "VPRORQMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPRORQ,
reg: regInfo{
},
{
name: "VPRORQMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPRORQ,
reg: regInfo{
},
{
name: "VPRORQMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPRORQ,
reg: regInfo{
},
{
name: "VPINSRB128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPINSRB,
reg: regInfo{
},
{
name: "VPINSRW128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPINSRW,
reg: regInfo{
},
{
name: "VPINSRD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPINSRD,
reg: regInfo{
},
{
name: "VPINSRQ128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPINSRQ,
reg: regInfo{
},
{
name: "VINSERTF128256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVINSERTF128,
reg: regInfo{
},
{
name: "VINSERTF64X4512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVINSERTF64X4,
reg: regInfo{
},
{
name: "VINSERTI128256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVINSERTI128,
reg: regInfo{
},
{
name: "VINSERTI64X4512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVINSERTI64X4,
reg: regInfo{
},
{
name: "VPSHLDW128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHLDW,
reg: regInfo{
},
{
name: "VPSHLDW256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHLDW,
reg: regInfo{
},
{
name: "VPSHLDW512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHLDW,
reg: regInfo{
},
{
name: "VPSHLDD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHLDD,
reg: regInfo{
},
{
name: "VPSHLDD256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHLDD,
reg: regInfo{
},
{
name: "VPSHLDD512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHLDD,
reg: regInfo{
},
{
name: "VPSHLDQ128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHLDQ,
reg: regInfo{
},
{
name: "VPSHLDQ256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHLDQ,
reg: regInfo{
},
{
name: "VPSHLDQ512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHLDQ,
reg: regInfo{
},
{
name: "VPSHLDWMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHLDW,
reg: regInfo{
},
{
name: "VPSHLDWMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHLDW,
reg: regInfo{
},
{
name: "VPSHLDWMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHLDW,
reg: regInfo{
},
{
name: "VPSHLDDMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHLDD,
reg: regInfo{
},
{
name: "VPSHLDDMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHLDD,
reg: regInfo{
},
{
name: "VPSHLDDMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHLDD,
reg: regInfo{
},
{
name: "VPSHLDQMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHLDQ,
reg: regInfo{
},
{
name: "VPSHLDQMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHLDQ,
reg: regInfo{
},
{
name: "VPSHLDQMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHLDQ,
reg: regInfo{
},
{
name: "VPSHRDW128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHRDW,
reg: regInfo{
},
{
name: "VPSHRDW256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHRDW,
reg: regInfo{
},
{
name: "VPSHRDW512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHRDW,
reg: regInfo{
},
{
name: "VPSHRDD128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHRDD,
reg: regInfo{
},
{
name: "VPSHRDD256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHRDD,
reg: regInfo{
},
{
name: "VPSHRDD512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHRDD,
reg: regInfo{
},
{
name: "VPSHRDQ128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHRDQ,
reg: regInfo{
},
{
name: "VPSHRDQ256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHRDQ,
reg: regInfo{
},
{
name: "VPSHRDQ512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSHRDQ,
reg: regInfo{
},
{
name: "VPSHRDWMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHRDW,
reg: regInfo{
},
{
name: "VPSHRDWMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHRDW,
reg: regInfo{
},
{
name: "VPSHRDWMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHRDW,
reg: regInfo{
},
{
name: "VPSHRDDMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHRDD,
reg: regInfo{
},
{
name: "VPSHRDDMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHRDD,
reg: regInfo{
},
{
name: "VPSHRDDMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHRDD,
reg: regInfo{
},
{
name: "VPSHRDQMasked128",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHRDQ,
reg: regInfo{
},
{
name: "VPSHRDQMasked256",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHRDQ,
reg: regInfo{
},
{
name: "VPSHRDQMasked512",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
asm: x86.AVPSHRDQ,
reg: regInfo{
},
{
name: "VPSLLW128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSLLW,
reg: regInfo{
},
{
name: "VPSLLW256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSLLW,
reg: regInfo{
},
{
name: "VPSLLW512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSLLW,
reg: regInfo{
},
{
name: "VPSLLD128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSLLD,
reg: regInfo{
},
{
name: "VPSLLD256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSLLD,
reg: regInfo{
},
{
name: "VPSLLD512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSLLD,
reg: regInfo{
},
{
name: "VPSLLQ128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSLLQ,
reg: regInfo{
},
{
name: "VPSLLQ256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSLLQ,
reg: regInfo{
},
{
name: "VPSLLQ512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSLLQ,
reg: regInfo{
},
{
name: "VPSLLWMasked128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSLLW,
reg: regInfo{
},
{
name: "VPSLLWMasked256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSLLW,
reg: regInfo{
},
{
name: "VPSLLWMasked512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSLLW,
reg: regInfo{
},
{
name: "VPSLLDMasked128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSLLD,
reg: regInfo{
},
{
name: "VPSLLDMasked256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSLLD,
reg: regInfo{
},
{
name: "VPSLLDMasked512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSLLD,
reg: regInfo{
},
{
name: "VPSLLQMasked128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSLLQ,
reg: regInfo{
},
{
name: "VPSLLQMasked256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSLLQ,
reg: regInfo{
},
{
name: "VPSLLQMasked512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSLLQ,
reg: regInfo{
},
{
name: "VPSRLW128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRLW,
reg: regInfo{
},
{
name: "VPSRLW256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRLW,
reg: regInfo{
},
{
name: "VPSRLW512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRLW,
reg: regInfo{
},
{
name: "VPSRLD128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRLD,
reg: regInfo{
},
{
name: "VPSRLD256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRLD,
reg: regInfo{
},
{
name: "VPSRLD512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRLD,
reg: regInfo{
},
{
name: "VPSRLQ128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRLQ,
reg: regInfo{
},
{
name: "VPSRLQ256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRLQ,
reg: regInfo{
},
{
name: "VPSRLQ512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRLQ,
reg: regInfo{
},
{
name: "VPSRAW128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRAW,
reg: regInfo{
},
{
name: "VPSRAW256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRAW,
reg: regInfo{
},
{
name: "VPSRAW512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRAW,
reg: regInfo{
},
{
name: "VPSRAD128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRAD,
reg: regInfo{
},
{
name: "VPSRAD256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRAD,
reg: regInfo{
},
{
name: "VPSRAD512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRAD,
reg: regInfo{
},
{
name: "VPSRAQ128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRAQ,
reg: regInfo{
},
{
name: "VPSRAQ256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRAQ,
reg: regInfo{
},
{
name: "VPSRAQ512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
asm: x86.AVPSRAQ,
reg: regInfo{
},
{
name: "VPSRLWMasked128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRLW,
reg: regInfo{
},
{
name: "VPSRLWMasked256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRLW,
reg: regInfo{
},
{
name: "VPSRLWMasked512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRLW,
reg: regInfo{
},
{
name: "VPSRLDMasked128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRLD,
reg: regInfo{
},
{
name: "VPSRLDMasked256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRLD,
reg: regInfo{
},
{
name: "VPSRLDMasked512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRLD,
reg: regInfo{
},
{
name: "VPSRLQMasked128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRLQ,
reg: regInfo{
},
{
name: "VPSRLQMasked256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRLQ,
reg: regInfo{
},
{
name: "VPSRLQMasked512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRLQ,
reg: regInfo{
},
{
name: "VPSRAWMasked128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRAW,
reg: regInfo{
},
{
name: "VPSRAWMasked256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRAW,
reg: regInfo{
},
{
name: "VPSRAWMasked512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRAW,
reg: regInfo{
},
{
name: "VPSRADMasked128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRAD,
reg: regInfo{
},
{
name: "VPSRADMasked256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRAD,
reg: regInfo{
},
{
name: "VPSRADMasked512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRAD,
reg: regInfo{
},
{
name: "VPSRAQMasked128const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRAQ,
reg: regInfo{
},
{
name: "VPSRAQMasked256const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRAQ,
reg: regInfo{
},
{
name: "VPSRAQMasked512const",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
asm: x86.AVPSRAQ,
reg: regInfo{
},
{
name: "CeilScaledFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledMaskedFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "CeilScaledMaskedFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "CeilScaledMaskedFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "CeilScaledMaskedFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "CeilScaledMaskedFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "CeilScaledMaskedFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "CeilScaledResidueFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledResidueFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledResidueFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledResidueFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledResidueFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledResidueFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "CeilScaledResidueMaskedFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "CeilScaledResidueMaskedFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "CeilScaledResidueMaskedFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "CeilScaledResidueMaskedFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "CeilScaledResidueMaskedFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "CeilScaledResidueMaskedFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledMaskedFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledMaskedFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledMaskedFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledMaskedFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledMaskedFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledMaskedFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledResidueFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledResidueFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledResidueFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledResidueFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledResidueFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledResidueFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "FloorScaledResidueMaskedFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledResidueMaskedFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledResidueMaskedFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledResidueMaskedFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledResidueMaskedFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "FloorScaledResidueMaskedFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "GaloisFieldAffineTransformInverseMaskedUint8x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "GaloisFieldAffineTransformInverseMaskedUint8x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "GaloisFieldAffineTransformInverseMaskedUint8x64",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "GaloisFieldAffineTransformInverseUint8x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "GaloisFieldAffineTransformInverseUint8x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "GaloisFieldAffineTransformInverseUint8x64",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "GaloisFieldAffineTransformMaskedUint8x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "GaloisFieldAffineTransformMaskedUint8x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "GaloisFieldAffineTransformMaskedUint8x64",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "GaloisFieldAffineTransformUint8x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "GaloisFieldAffineTransformUint8x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "GaloisFieldAffineTransformUint8x64",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "GetElemInt8x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "GetElemInt16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "GetElemInt32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "GetElemInt64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "GetElemUint8x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "GetElemUint16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "GetElemUint32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "GetElemUint64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftInt32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftInt32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftInt32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftInt64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftInt64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftInt64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftMaskedInt32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftMaskedInt32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftMaskedInt32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftMaskedInt64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftMaskedInt64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftMaskedInt64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftMaskedUint32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftMaskedUint32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftMaskedUint32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftMaskedUint64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftMaskedUint64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftMaskedUint64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllLeftUint32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftUint32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftUint32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftUint64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftUint64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllLeftUint64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightInt32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightInt32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightInt32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightInt64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightInt64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightInt64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightMaskedInt32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightMaskedInt32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightMaskedInt32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightMaskedInt64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightMaskedInt64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightMaskedInt64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightMaskedUint32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightMaskedUint32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightMaskedUint32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightMaskedUint64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightMaskedUint64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightMaskedUint64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RotateAllRightUint32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightUint32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightUint32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightUint64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightUint64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RotateAllRightUint64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledMaskedFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RoundToEvenScaledMaskedFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RoundToEvenScaledMaskedFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RoundToEvenScaledMaskedFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RoundToEvenScaledMaskedFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RoundToEvenScaledMaskedFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RoundToEvenScaledResidueFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledResidueFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledResidueFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledResidueFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledResidueFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledResidueFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "RoundToEvenScaledResidueMaskedFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RoundToEvenScaledResidueMaskedFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RoundToEvenScaledResidueMaskedFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RoundToEvenScaledResidueMaskedFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RoundToEvenScaledResidueMaskedFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "RoundToEvenScaledResidueMaskedFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "SetElemInt8x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "SetElemInt16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "SetElemInt32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "SetElemInt64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "SetElemUint8x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "SetElemUint16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "SetElemUint32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "SetElemUint64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatInt16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatInt16x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatInt16x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatInt32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatInt32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatInt32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatInt64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatInt64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatInt64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedInt16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedInt16x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedInt16x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedInt32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedInt32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedInt32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedInt64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedInt64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedInt64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedUint16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedUint16x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedUint16x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedUint32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedUint32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedUint32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedUint64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedUint64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatMaskedUint64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllLeftConcatUint16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatUint16x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatUint16x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatUint32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatUint32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatUint32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatUint64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatUint64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllLeftConcatUint64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatInt16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatInt16x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatInt16x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatInt32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatInt32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatInt32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatInt64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatInt64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatInt64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedInt16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedInt16x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedInt16x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedInt32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedInt32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedInt32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedInt64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedInt64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedInt64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedUint16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedUint16x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedUint16x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedUint32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedUint32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedUint32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedUint64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedUint64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatMaskedUint64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 3,
generic: true,
},
{
name: "ShiftAllRightConcatUint16x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatUint16x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatUint16x32",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatUint32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatUint32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatUint32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatUint64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatUint64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "ShiftAllRightConcatUint64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledMaskedFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledMaskedFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledMaskedFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledMaskedFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledMaskedFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledMaskedFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledResidueFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledResidueFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledResidueFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledResidueFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledResidueFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledResidueFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 1,
generic: true,
},
{
name: "TruncScaledResidueMaskedFloat32x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledResidueMaskedFloat32x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledResidueMaskedFloat32x16",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledResidueMaskedFloat64x2",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledResidueMaskedFloat64x4",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
{
name: "TruncScaledResidueMaskedFloat64x8",
- auxType: auxInt8,
+ auxType: auxUInt8,
argLen: 2,
generic: true,
},
for {
x := v_0
v.reset(OpAMD64VROUNDPS128)
- v.AuxInt = int8ToAuxInt(2)
+ v.AuxInt = uint8ToAuxInt(2)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPS256)
- v.AuxInt = int8ToAuxInt(2)
+ v.AuxInt = uint8ToAuxInt(2)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPD128)
- v.AuxInt = int8ToAuxInt(2)
+ v.AuxInt = uint8ToAuxInt(2)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPD256)
- v.AuxInt = int8ToAuxInt(2)
+ v.AuxInt = uint8ToAuxInt(2)
v.AddArg(x)
return true
}
// match: (CeilScaledFloat32x16 [a] x)
// result: (VRNDSCALEPS512 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS512)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledFloat32x4 [a] x)
// result: (VRNDSCALEPS128 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS128)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledFloat32x8 [a] x)
// result: (VRNDSCALEPS256 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS256)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledFloat64x2 [a] x)
// result: (VRNDSCALEPD128 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD128)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledFloat64x4 [a] x)
// result: (VRNDSCALEPD256 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD256)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledFloat64x8 [a] x)
// result: (VRNDSCALEPD512 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD512)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledMaskedFloat32x16 [a] x mask)
// result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (CeilScaledMaskedFloat32x4 [a] x mask)
// result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (CeilScaledMaskedFloat32x8 [a] x mask)
// result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (CeilScaledMaskedFloat64x2 [a] x mask)
// result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (CeilScaledMaskedFloat64x4 [a] x mask)
// result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (CeilScaledMaskedFloat64x8 [a] x mask)
// result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (CeilScaledResidueFloat32x16 [a] x)
// result: (VREDUCEPS512 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS512)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledResidueFloat32x4 [a] x)
// result: (VREDUCEPS128 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS128)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledResidueFloat32x8 [a] x)
// result: (VREDUCEPS256 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS256)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledResidueFloat64x2 [a] x)
// result: (VREDUCEPD128 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD128)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledResidueFloat64x4 [a] x)
// result: (VREDUCEPD256 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD256)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledResidueFloat64x8 [a] x)
// result: (VREDUCEPD512 [a+2] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD512)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v.AddArg(x)
return true
}
// match: (CeilScaledResidueMaskedFloat32x16 [a] x mask)
// result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (CeilScaledResidueMaskedFloat32x4 [a] x mask)
// result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (CeilScaledResidueMaskedFloat32x8 [a] x mask)
// result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (CeilScaledResidueMaskedFloat64x2 [a] x mask)
// result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (CeilScaledResidueMaskedFloat64x4 [a] x mask)
// result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (CeilScaledResidueMaskedFloat64x8 [a] x mask)
// result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 2)
+ v.AuxInt = uint8ToAuxInt(a + 2)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(0)
+ v0.AuxInt = uint8ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
for {
x := v_0
v.reset(OpAMD64VROUNDPS128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPS256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPD128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPD256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
// match: (FloorScaledFloat32x16 [a] x)
// result: (VRNDSCALEPS512 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS512)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledFloat32x4 [a] x)
// result: (VRNDSCALEPS128 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS128)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledFloat32x8 [a] x)
// result: (VRNDSCALEPS256 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS256)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledFloat64x2 [a] x)
// result: (VRNDSCALEPD128 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD128)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledFloat64x4 [a] x)
// result: (VRNDSCALEPD256 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD256)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledFloat64x8 [a] x)
// result: (VRNDSCALEPD512 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD512)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledMaskedFloat32x16 [a] x mask)
// result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (FloorScaledMaskedFloat32x4 [a] x mask)
// result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (FloorScaledMaskedFloat32x8 [a] x mask)
// result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (FloorScaledMaskedFloat64x2 [a] x mask)
// result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (FloorScaledMaskedFloat64x4 [a] x mask)
// result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (FloorScaledMaskedFloat64x8 [a] x mask)
// result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (FloorScaledResidueFloat32x16 [a] x)
// result: (VREDUCEPS512 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS512)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledResidueFloat32x4 [a] x)
// result: (VREDUCEPS128 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS128)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledResidueFloat32x8 [a] x)
// result: (VREDUCEPS256 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS256)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledResidueFloat64x2 [a] x)
// result: (VREDUCEPD128 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD128)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledResidueFloat64x4 [a] x)
// result: (VREDUCEPD256 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD256)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledResidueFloat64x8 [a] x)
// result: (VREDUCEPD512 [a+1] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD512)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v.AddArg(x)
return true
}
// match: (FloorScaledResidueMaskedFloat32x16 [a] x mask)
// result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (FloorScaledResidueMaskedFloat32x4 [a] x mask)
// result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (FloorScaledResidueMaskedFloat32x8 [a] x mask)
// result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (FloorScaledResidueMaskedFloat64x2 [a] x mask)
// result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (FloorScaledResidueMaskedFloat64x4 [a] x mask)
// result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (FloorScaledResidueMaskedFloat64x8 [a] x mask)
// result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 1)
+ v.AuxInt = uint8ToAuxInt(a + 1)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (GaloisFieldAffineTransformInverseMaskedUint8x16 [a] x y mask)
// result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (GaloisFieldAffineTransformInverseMaskedUint8x32 [a] x y mask)
// result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (GaloisFieldAffineTransformInverseMaskedUint8x64 [a] x y mask)
// result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (GaloisFieldAffineTransformMaskedUint8x16 [a] x y mask)
// result: (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VGF2P8AFFINEQBMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (GaloisFieldAffineTransformMaskedUint8x32 [a] x y mask)
// result: (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VGF2P8AFFINEQBMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (GaloisFieldAffineTransformMaskedUint8x64 [a] x y mask)
// result: (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VGF2P8AFFINEQBMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
for {
x := v_0
v.reset(OpAMD64VEXTRACTF64X4256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTF128128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTF128128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTF64X4256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTF64X4256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTF128128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTF128128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTF64X4256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI128128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VEXTRACTI64X4256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS128)
- v.AuxInt = int8ToAuxInt(13)
+ v.AuxInt = uint8ToAuxInt(13)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS256)
- v.AuxInt = int8ToAuxInt(13)
+ v.AuxInt = uint8ToAuxInt(13)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD128)
- v.AuxInt = int8ToAuxInt(13)
+ v.AuxInt = uint8ToAuxInt(13)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD256)
- v.AuxInt = int8ToAuxInt(13)
+ v.AuxInt = uint8ToAuxInt(13)
v.AddArg2(x, y)
return true
}
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(13)
+ v0.AuxInt = uint8ToAuxInt(13)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS128)
- v.AuxInt = int8ToAuxInt(14)
+ v.AuxInt = uint8ToAuxInt(14)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS256)
- v.AuxInt = int8ToAuxInt(14)
+ v.AuxInt = uint8ToAuxInt(14)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD128)
- v.AuxInt = int8ToAuxInt(14)
+ v.AuxInt = uint8ToAuxInt(14)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD256)
- v.AuxInt = int8ToAuxInt(14)
+ v.AuxInt = uint8ToAuxInt(14)
v.AddArg2(x, y)
return true
}
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(14)
+ v0.AuxInt = uint8ToAuxInt(14)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(3)
+ v0.AuxInt = uint8ToAuxInt(3)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS128)
- v.AuxInt = int8ToAuxInt(3)
+ v.AuxInt = uint8ToAuxInt(3)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS256)
- v.AuxInt = int8ToAuxInt(3)
+ v.AuxInt = uint8ToAuxInt(3)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD128)
- v.AuxInt = int8ToAuxInt(3)
+ v.AuxInt = uint8ToAuxInt(3)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD256)
- v.AuxInt = int8ToAuxInt(3)
+ v.AuxInt = uint8ToAuxInt(3)
v.AddArg2(x, y)
return true
}
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(3)
+ v0.AuxInt = uint8ToAuxInt(3)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(3)
+ v0.AuxInt = uint8ToAuxInt(3)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(3)
+ v0.AuxInt = uint8ToAuxInt(3)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(3)
+ v0.AuxInt = uint8ToAuxInt(3)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(3)
+ v0.AuxInt = uint8ToAuxInt(3)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(3)
+ v0.AuxInt = uint8ToAuxInt(3)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(3)
+ v0.AuxInt = uint8ToAuxInt(3)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS128)
- v.AuxInt = int8ToAuxInt(2)
+ v.AuxInt = uint8ToAuxInt(2)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS256)
- v.AuxInt = int8ToAuxInt(2)
+ v.AuxInt = uint8ToAuxInt(2)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD128)
- v.AuxInt = int8ToAuxInt(2)
+ v.AuxInt = uint8ToAuxInt(2)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD256)
- v.AuxInt = int8ToAuxInt(2)
+ v.AuxInt = uint8ToAuxInt(2)
v.AddArg2(x, y)
return true
}
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(2)
+ v0.AuxInt = uint8ToAuxInt(2)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD128)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(1)
+ v0.AuxInt = uint8ToAuxInt(1)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS128)
- v.AuxInt = int8ToAuxInt(4)
+ v.AuxInt = uint8ToAuxInt(4)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPS256)
- v.AuxInt = int8ToAuxInt(4)
+ v.AuxInt = uint8ToAuxInt(4)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD128)
- v.AuxInt = int8ToAuxInt(4)
+ v.AuxInt = uint8ToAuxInt(4)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VCMPPD256)
- v.AuxInt = int8ToAuxInt(4)
+ v.AuxInt = uint8ToAuxInt(4)
v.AddArg2(x, y)
return true
}
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
mask := v_2
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
v1.AddArg(mask)
v0.AddArg3(x, y, v1)
y := v_1
v.reset(OpAMD64VPMOVMToVec16x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec16x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec32x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x2)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x4)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec64x8)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x16)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x32)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
y := v_1
v.reset(OpAMD64VPMOVMToVec8x64)
v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask)
- v0.AuxInt = int8ToAuxInt(4)
+ v0.AuxInt = uint8ToAuxInt(4)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
// match: (RotateAllLeftMaskedInt32x16 [a] x mask)
// result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLDMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllLeftMaskedInt32x4 [a] x mask)
// result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLDMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllLeftMaskedInt32x8 [a] x mask)
// result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLDMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllLeftMaskedInt64x2 [a] x mask)
// result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLQMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllLeftMaskedInt64x4 [a] x mask)
// result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLQMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllLeftMaskedInt64x8 [a] x mask)
// result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLQMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllLeftMaskedUint32x16 [a] x mask)
// result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLDMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllLeftMaskedUint32x4 [a] x mask)
// result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLDMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllLeftMaskedUint32x8 [a] x mask)
// result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLDMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllLeftMaskedUint64x2 [a] x mask)
// result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLQMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllLeftMaskedUint64x4 [a] x mask)
// result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLQMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllLeftMaskedUint64x8 [a] x mask)
// result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPROLQMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedInt32x16 [a] x mask)
// result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORDMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedInt32x4 [a] x mask)
// result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORDMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedInt32x8 [a] x mask)
// result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORDMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedInt64x2 [a] x mask)
// result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORQMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedInt64x4 [a] x mask)
// result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORQMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedInt64x8 [a] x mask)
// result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORQMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedUint32x16 [a] x mask)
// result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORDMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedUint32x4 [a] x mask)
// result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORDMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedUint32x8 [a] x mask)
// result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORDMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedUint64x2 [a] x mask)
// result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORQMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedUint64x4 [a] x mask)
// result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORQMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RotateAllRightMaskedUint64x8 [a] x mask)
// result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VPRORQMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
for {
x := v_0
v.reset(OpAMD64VROUNDPS128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPS256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPD128)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPD256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledFloat32x16 [a] x)
// result: (VRNDSCALEPS512 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS512)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledFloat32x4 [a] x)
// result: (VRNDSCALEPS128 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS128)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledFloat32x8 [a] x)
// result: (VRNDSCALEPS256 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS256)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledFloat64x2 [a] x)
// result: (VRNDSCALEPD128 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD128)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledFloat64x4 [a] x)
// result: (VRNDSCALEPD256 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD256)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledFloat64x8 [a] x)
// result: (VRNDSCALEPD512 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD512)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledMaskedFloat32x16 [a] x mask)
// result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RoundToEvenScaledMaskedFloat32x4 [a] x mask)
// result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RoundToEvenScaledMaskedFloat32x8 [a] x mask)
// result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RoundToEvenScaledMaskedFloat64x2 [a] x mask)
// result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RoundToEvenScaledMaskedFloat64x4 [a] x mask)
// result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RoundToEvenScaledMaskedFloat64x8 [a] x mask)
// result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RoundToEvenScaledResidueFloat32x16 [a] x)
// result: (VREDUCEPS512 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS512)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledResidueFloat32x4 [a] x)
// result: (VREDUCEPS128 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS128)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledResidueFloat32x8 [a] x)
// result: (VREDUCEPS256 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS256)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledResidueFloat64x2 [a] x)
// result: (VREDUCEPD128 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD128)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledResidueFloat64x4 [a] x)
// result: (VREDUCEPD256 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD256)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledResidueFloat64x8 [a] x)
// result: (VREDUCEPD512 [a+0] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD512)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v.AddArg(x)
return true
}
// match: (RoundToEvenScaledResidueMaskedFloat32x16 [a] x mask)
// result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RoundToEvenScaledResidueMaskedFloat32x4 [a] x mask)
// result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RoundToEvenScaledResidueMaskedFloat32x8 [a] x mask)
// result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RoundToEvenScaledResidueMaskedFloat64x2 [a] x mask)
// result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RoundToEvenScaledResidueMaskedFloat64x4 [a] x mask)
// result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (RoundToEvenScaledResidueMaskedFloat64x8 [a] x mask)
// result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 0)
+ v.AuxInt = uint8ToAuxInt(a + 0)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
x := v_0
y := v_1
v.reset(OpAMD64VINSERTF64X4512)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTF128256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTF128256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTF64X4512)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(1)
+ v.AuxInt = uint8ToAuxInt(1)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTF64X4512)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTF128256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTF128256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTF64X4512)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI128256)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
x := v_0
y := v_1
v.reset(OpAMD64VINSERTI64X4512)
- v.AuxInt = int8ToAuxInt(0)
+ v.AuxInt = uint8ToAuxInt(0)
v.AddArg2(x, y)
return true
}
// match: (ShiftAllLeftConcatMaskedInt16x16 [a] x y mask)
// result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDWMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedInt16x32 [a] x y mask)
// result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDWMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedInt16x8 [a] x y mask)
// result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDWMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedInt32x16 [a] x y mask)
// result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDDMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedInt32x4 [a] x y mask)
// result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDDMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedInt32x8 [a] x y mask)
// result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDDMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedInt64x2 [a] x y mask)
// result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDQMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedInt64x4 [a] x y mask)
// result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDQMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedInt64x8 [a] x y mask)
// result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDQMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedUint16x16 [a] x y mask)
// result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDWMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedUint16x32 [a] x y mask)
// result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDWMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedUint16x8 [a] x y mask)
// result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDWMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedUint32x16 [a] x y mask)
// result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDDMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedUint32x4 [a] x y mask)
// result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDDMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedUint32x8 [a] x y mask)
// result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDDMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask)
// result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDQMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask)
// result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDQMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask)
// result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHLDQMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftInt16x16 x (MOVQconst [c]))
- // result: (VPSLLW256const [int8(c)] x)
+ // result: (VPSLLW256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLW256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftInt16x32 x (MOVQconst [c]))
- // result: (VPSLLW512const [int8(c)] x)
+ // result: (VPSLLW512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLW512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftInt16x8 x (MOVQconst [c]))
- // result: (VPSLLW128const [int8(c)] x)
+ // result: (VPSLLW128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLW128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftInt32x16 x (MOVQconst [c]))
- // result: (VPSLLD512const [int8(c)] x)
+ // result: (VPSLLD512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLD512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftInt32x4 x (MOVQconst [c]))
- // result: (VPSLLD128const [int8(c)] x)
+ // result: (VPSLLD128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLD128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftInt32x8 x (MOVQconst [c]))
- // result: (VPSLLD256const [int8(c)] x)
+ // result: (VPSLLD256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLD256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftInt64x2 x (MOVQconst [c]))
- // result: (VPSLLQ128const [int8(c)] x)
+ // result: (VPSLLQ128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLQ128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftInt64x4 x (MOVQconst [c]))
- // result: (VPSLLQ256const [int8(c)] x)
+ // result: (VPSLLQ256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLQ256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftInt64x8 x (MOVQconst [c]))
- // result: (VPSLLQ512const [int8(c)] x)
+ // result: (VPSLLQ512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLQ512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask)
- // result: (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
+ // result: (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLWMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask)
- // result: (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
+ // result: (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLWMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask)
- // result: (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
+ // result: (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLWMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask)
- // result: (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
+ // result: (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLDMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask)
- // result: (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
+ // result: (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLDMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask)
- // result: (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
+ // result: (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLDMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask)
- // result: (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
+ // result: (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLQMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask)
- // result: (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
+ // result: (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLQMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask)
- // result: (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
+ // result: (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLQMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask)
- // result: (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
+ // result: (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLWMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask)
- // result: (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
+ // result: (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLWMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask)
- // result: (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
+ // result: (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLWMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask)
- // result: (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
+ // result: (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLDMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask)
- // result: (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
+ // result: (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLDMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask)
- // result: (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
+ // result: (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLDMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask)
- // result: (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
+ // result: (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLQMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask)
- // result: (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
+ // result: (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLQMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask)
- // result: (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
+ // result: (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSLLQMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftUint16x16 x (MOVQconst [c]))
- // result: (VPSLLW256const [int8(c)] x)
+ // result: (VPSLLW256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLW256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftUint16x32 x (MOVQconst [c]))
- // result: (VPSLLW512const [int8(c)] x)
+ // result: (VPSLLW512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLW512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftUint16x8 x (MOVQconst [c]))
- // result: (VPSLLW128const [int8(c)] x)
+ // result: (VPSLLW128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLW128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftUint32x16 x (MOVQconst [c]))
- // result: (VPSLLD512const [int8(c)] x)
+ // result: (VPSLLD512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLD512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftUint32x4 x (MOVQconst [c]))
- // result: (VPSLLD128const [int8(c)] x)
+ // result: (VPSLLD128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLD128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftUint32x8 x (MOVQconst [c]))
- // result: (VPSLLD256const [int8(c)] x)
+ // result: (VPSLLD256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLD256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftUint64x2 x (MOVQconst [c]))
- // result: (VPSLLQ128const [int8(c)] x)
+ // result: (VPSLLQ128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLQ128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftUint64x4 x (MOVQconst [c]))
- // result: (VPSLLQ256const [int8(c)] x)
+ // result: (VPSLLQ256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLQ256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllLeftUint64x8 x (MOVQconst [c]))
- // result: (VPSLLQ512const [int8(c)] x)
+ // result: (VPSLLQ512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSLLQ512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
// match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask)
// result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDWMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask)
// result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDWMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask)
// result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDWMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedInt32x16 [a] x y mask)
// result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDDMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedInt32x4 [a] x y mask)
// result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDDMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedInt32x8 [a] x y mask)
// result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDDMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedInt64x2 [a] x y mask)
// result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDQMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedInt64x4 [a] x y mask)
// result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDQMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedInt64x8 [a] x y mask)
// result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDQMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedUint16x16 [a] x y mask)
// result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDWMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedUint16x32 [a] x y mask)
// result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDWMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedUint16x8 [a] x y mask)
// result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDWMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedUint32x16 [a] x y mask)
// result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDDMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedUint32x4 [a] x y mask)
// result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDDMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedUint32x8 [a] x y mask)
// result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDDMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedUint64x2 [a] x y mask)
// result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDQMasked128)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedUint64x4 [a] x y mask)
// result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDQMasked256)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
// match: (ShiftAllRightConcatMaskedUint64x8 [a] x y mask)
// result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
y := v_1
mask := v_2
v.reset(OpAMD64VPSHRDQMasked512)
- v.AuxInt = int8ToAuxInt(a)
+ v.AuxInt = uint8ToAuxInt(a)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg3(x, y, v0)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightInt16x16 x (MOVQconst [c]))
- // result: (VPSRAW256const [int8(c)] x)
+ // result: (VPSRAW256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRAW256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightInt16x32 x (MOVQconst [c]))
- // result: (VPSRAW512const [int8(c)] x)
+ // result: (VPSRAW512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRAW512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightInt16x8 x (MOVQconst [c]))
- // result: (VPSRAW128const [int8(c)] x)
+ // result: (VPSRAW128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRAW128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightInt32x16 x (MOVQconst [c]))
- // result: (VPSRAD512const [int8(c)] x)
+ // result: (VPSRAD512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRAD512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightInt32x4 x (MOVQconst [c]))
- // result: (VPSRAD128const [int8(c)] x)
+ // result: (VPSRAD128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRAD128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightInt32x8 x (MOVQconst [c]))
- // result: (VPSRAD256const [int8(c)] x)
+ // result: (VPSRAD256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRAD256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightInt64x2 x (MOVQconst [c]))
- // result: (VPSRAQ128const [int8(c)] x)
+ // result: (VPSRAQ128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRAQ128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightInt64x4 x (MOVQconst [c]))
- // result: (VPSRAQ256const [int8(c)] x)
+ // result: (VPSRAQ256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRAQ256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightInt64x8 x (MOVQconst [c]))
- // result: (VPSRAQ512const [int8(c)] x)
+ // result: (VPSRAQ512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRAQ512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask)
- // result: (VPSRAWMasked256const [int8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
+ // result: (VPSRAWMasked256const [uint8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRAWMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask)
- // result: (VPSRAWMasked512const [int8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
+ // result: (VPSRAWMasked512const [uint8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRAWMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask)
- // result: (VPSRAWMasked128const [int8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
+ // result: (VPSRAWMasked128const [uint8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRAWMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask)
- // result: (VPSRADMasked512const [int8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
+ // result: (VPSRADMasked512const [uint8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRADMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask)
- // result: (VPSRADMasked128const [int8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
+ // result: (VPSRADMasked128const [uint8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRADMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask)
- // result: (VPSRADMasked256const [int8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
+ // result: (VPSRADMasked256const [uint8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRADMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask)
- // result: (VPSRAQMasked128const [int8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
+ // result: (VPSRAQMasked128const [uint8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRAQMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask)
- // result: (VPSRAQMasked256const [int8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
+ // result: (VPSRAQMasked256const [uint8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRAQMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask)
- // result: (VPSRAQMasked512const [int8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
+ // result: (VPSRAQMasked512const [uint8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRAQMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask)
- // result: (VPSRLWMasked256const [int8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
+ // result: (VPSRLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRLWMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask)
- // result: (VPSRLWMasked512const [int8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
+ // result: (VPSRLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRLWMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask)
- // result: (VPSRLWMasked128const [int8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
+ // result: (VPSRLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRLWMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask)
- // result: (VPSRLDMasked512const [int8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
+ // result: (VPSRLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRLDMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask)
- // result: (VPSRLDMasked128const [int8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
+ // result: (VPSRLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRLDMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask)
- // result: (VPSRLDMasked256const [int8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
+ // result: (VPSRLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRLDMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask)
- // result: (VPSRLQMasked128const [int8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
+ // result: (VPSRLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRLQMasked128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask)
- // result: (VPSRLQMasked256const [int8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
+ // result: (VPSRLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRLQMasked256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_0 := v.Args[0]
b := v.Block
// match: (ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask)
- // result: (VPSRLQMasked512const [int8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
+ // result: (VPSRLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
c := auxIntToInt64(v_1.AuxInt)
mask := v_2
v.reset(OpAMD64VPSRLQMasked512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightUint16x16 x (MOVQconst [c]))
- // result: (VPSRLW256const [int8(c)] x)
+ // result: (VPSRLW256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRLW256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightUint16x32 x (MOVQconst [c]))
- // result: (VPSRLW512const [int8(c)] x)
+ // result: (VPSRLW512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRLW512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightUint16x8 x (MOVQconst [c]))
- // result: (VPSRLW128const [int8(c)] x)
+ // result: (VPSRLW128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRLW128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightUint32x16 x (MOVQconst [c]))
- // result: (VPSRLD512const [int8(c)] x)
+ // result: (VPSRLD512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRLD512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightUint32x4 x (MOVQconst [c]))
- // result: (VPSRLD128const [int8(c)] x)
+ // result: (VPSRLD128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRLD128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightUint32x8 x (MOVQconst [c]))
- // result: (VPSRLD256const [int8(c)] x)
+ // result: (VPSRLD256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRLD256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightUint64x2 x (MOVQconst [c]))
- // result: (VPSRLQ128const [int8(c)] x)
+ // result: (VPSRLQ128const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRLQ128const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightUint64x4 x (MOVQconst [c]))
- // result: (VPSRLQ256const [int8(c)] x)
+ // result: (VPSRLQ256const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRLQ256const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ShiftAllRightUint64x8 x (MOVQconst [c]))
- // result: (VPSRLQ512const [int8(c)] x)
+ // result: (VPSRLQ512const [uint8(c)] x)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpAMD64VPSRLQ512const)
- v.AuxInt = int8ToAuxInt(int8(c))
+ v.AuxInt = uint8ToAuxInt(uint8(c))
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPS128)
- v.AuxInt = int8ToAuxInt(3)
+ v.AuxInt = uint8ToAuxInt(3)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPS256)
- v.AuxInt = int8ToAuxInt(3)
+ v.AuxInt = uint8ToAuxInt(3)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPD128)
- v.AuxInt = int8ToAuxInt(3)
+ v.AuxInt = uint8ToAuxInt(3)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpAMD64VROUNDPD256)
- v.AuxInt = int8ToAuxInt(3)
+ v.AuxInt = uint8ToAuxInt(3)
v.AddArg(x)
return true
}
// match: (TruncScaledFloat32x16 [a] x)
// result: (VRNDSCALEPS512 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS512)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledFloat32x4 [a] x)
// result: (VRNDSCALEPS128 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS128)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledFloat32x8 [a] x)
// result: (VRNDSCALEPS256 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPS256)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledFloat64x2 [a] x)
// result: (VRNDSCALEPD128 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD128)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledFloat64x4 [a] x)
// result: (VRNDSCALEPD256 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD256)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledFloat64x8 [a] x)
// result: (VRNDSCALEPD512 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VRNDSCALEPD512)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledMaskedFloat32x16 [a] x mask)
// result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (TruncScaledMaskedFloat32x4 [a] x mask)
// result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (TruncScaledMaskedFloat32x8 [a] x mask)
// result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (TruncScaledMaskedFloat64x2 [a] x mask)
// result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (TruncScaledMaskedFloat64x4 [a] x mask)
// result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (TruncScaledMaskedFloat64x8 [a] x mask)
// result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VRNDSCALEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (TruncScaledResidueFloat32x16 [a] x)
// result: (VREDUCEPS512 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS512)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledResidueFloat32x4 [a] x)
// result: (VREDUCEPS128 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS128)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledResidueFloat32x8 [a] x)
// result: (VREDUCEPS256 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPS256)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledResidueFloat64x2 [a] x)
// result: (VREDUCEPD128 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD128)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledResidueFloat64x4 [a] x)
// result: (VREDUCEPD256 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD256)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledResidueFloat64x8 [a] x)
// result: (VREDUCEPD512 [a+3] x)
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
v.reset(OpAMD64VREDUCEPD512)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v.AddArg(x)
return true
}
// match: (TruncScaledResidueMaskedFloat32x16 [a] x mask)
// result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked512)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (TruncScaledResidueMaskedFloat32x4 [a] x mask)
// result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked128)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (TruncScaledResidueMaskedFloat32x8 [a] x mask)
// result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPSMasked256)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (TruncScaledResidueMaskedFloat64x2 [a] x mask)
// result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked128)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (TruncScaledResidueMaskedFloat64x4 [a] x mask)
// result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked256)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
// match: (TruncScaledResidueMaskedFloat64x8 [a] x mask)
// result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM <types.TypeMask> mask))
for {
- a := auxIntToInt8(v.AuxInt)
+ a := auxIntToUint8(v.AuxInt)
x := v_0
mask := v_1
v.reset(OpAMD64VREDUCEPDMasked512)
- v.AuxInt = int8ToAuxInt(a + 3)
+ v.AuxInt = uint8ToAuxInt(a + 3)
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
v0.AddArg(mask)
v.AddArg2(x, v0)
}
}
-func plainPanicSimdImm(s *state) {
- cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL])
- cmp.AuxInt = 0
- // TODO: make this a standalone panic instead of reusing the overflow panic.
- // Or maybe after we implement the switch table this will be obsolete anyway.
- s.check(cmp, ir.Syms.Panicoverflow)
+func immJumpTable(s *state, idx *ssa.Value, intrinsicCall *ir.CallExpr, genOp func(*state, int)) *ssa.Value {
+ // Make blocks we'll need.
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ t := types.Types[types.TUINT8]
+ if !idx.Type.IsKind(types.TUINT8) {
+ panic("immJumpTable expects uint8 value")
+ }
+ // We will exhaust 0-255, so no need to check the bounds.
+
+ b := s.curBlock
+ b.Kind = ssa.BlockJumpTable
+ b.Pos = intrinsicCall.Pos()
+ if base.Flag.Cfg.SpectreIndex {
+ // Potential Spectre vulnerability hardening?
+ idx = s.newValue2(ssa.OpSpectreSliceIndex, t, idx, s.uintptrConstant(255))
+ }
+ b.SetControl(idx)
+ targets := [256]*ssa.Block{}
+ for i := range 256 {
+ t := s.f.NewBlock(ssa.BlockPlain)
+ targets[i] = t
+ b.AddEdgeTo(t)
+ }
+ s.endBlock()
+
+ for i, t := range targets {
+ s.startBlock(t)
+ genOp(s, i)
+ t.AddEdgeTo(bEnd)
+ s.endBlock()
+ }
+
+ s.startBlock(bEnd)
+ ret := s.variable(intrinsicCall, intrinsicCall.Type())
+ return ret
}
func opLen1Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if args[1].Op == ssa.OpConst8 {
return s.newValue1I(op, t, args[1].AuxInt<<int64(offset), args[0])
}
- plainPanicSimdImm(s)
- // Even though this default call is unreachable semantically,
- // it has to return something, otherwise the compiler will try to generate
- // default codes which might lead to a FwdRef being put at the entry block
- // triggering a compiler panic.
- return s.newValue1I(op, t, 0, args[0])
+ return immJumpTable(s, args[1], n, func(sNew *state, idx int) {
+ // Encode as int8 due to requirement of AuxInt, check its comment for details.
+ s.vars[n] = sNew.newValue1I(op, t, int64(int8(idx<<offset)), args[0])
+ })
}
}
if args[1].Op == ssa.OpConst8 {
return s.newValue2I(op, t, args[1].AuxInt<<int64(offset), args[0], args[2])
}
- plainPanicSimdImm(s)
- // Even though this default call is unreachable semantically,
- // it has to return something, otherwise the compiler will try to generate
- // default codes which might lead to a FwdRef being put at the entry block
- // triggering a compiler panic.
- return s.newValue2I(op, t, 0, args[0], args[2])
+ return immJumpTable(s, args[1], n, func(sNew *state, idx int) {
+ // Encode as int8 due to requirement of AuxInt, check its comment for details.
+ s.vars[n] = sNew.newValue2I(op, t, int64(int8(idx<<offset)), args[0], args[2])
+ })
}
}
if args[1].Op == ssa.OpConst8 {
return s.newValue3I(op, t, args[1].AuxInt<<int64(offset), args[0], args[2], args[3])
}
- plainPanicSimdImm(s)
- // Even though this default call is unreachable semantically,
- // it has to return something, otherwise the compiler will try to generate
- // default codes which might lead to a FwdRef being put at the entry block
- // triggering a compiler panic.
- return s.newValue3I(op, t, 0, args[0], args[2], args[3])
+ return immJumpTable(s, args[1], n, func(sNew *state, idx int) {
+ // Encode as int8 due to requirement of AuxInt, check its comment for details.
+ s.vars[n] = sNew.newValue3I(op, t, int64(int8(idx<<offset)), args[0], args[2], args[3])
+ })
}
}
if args[2].Op == ssa.OpConst8 {
return s.newValue2I(op, t, args[2].AuxInt<<int64(offset), args[0], args[1])
}
- plainPanicSimdImm(s)
- // Even though this default call is unreachable semantically,
- // it has to return something, otherwise the compiler will try to generate
- // default codes which might lead to a FwdRef being put at the entry block
- // triggering a compiler panic.
- return s.newValue2I(op, t, 0, args[0], args[1])
+ return immJumpTable(s, args[2], n, func(sNew *state, idx int) {
+ // Encode as int8 due to requirement of AuxInt, check its comment for details.
+ s.vars[n] = sNew.newValue2I(op, t, int64(int8(idx<<offset)), args[0], args[1])
+ })
}
}
if args[2].Op == ssa.OpConst8 {
return s.newValue3I(op, t, args[2].AuxInt<<int64(offset), args[0], args[1], args[3])
}
- plainPanicSimdImm(s)
- // Even though this default call is unreachable semantically,
- // it has to return something, otherwise the compiler will try to generate
- // default codes which might lead to a FwdRef being put at the entry block
- // triggering a compiler panic.
- return s.newValue3I(op, t, 0, args[0], args[1], args[3])
+ return immJumpTable(s, args[2], n, func(sNew *state, idx int) {
+ // Encode as int8 due to requirement of AuxInt, check its comment for details.
+ s.vars[n] = sNew.newValue3I(op, t, int64(int8(idx<<offset)), args[0], args[1], args[3])
+ })
}
}
if args[1].Op == ssa.OpConst8 {
return s.newValue4I(op, t, args[1].AuxInt<<int64(offset), args[0], args[2], args[3], args[4])
}
- plainPanicSimdImm(s)
- // Even though this default call is unreachable semantically,
- // it has to return something, otherwise the compiler will try to generate
- // default codes which might lead to a FwdRef being put at the entry block
- // triggering a compiler panic.
- return s.newValue4I(op, t, 0, args[0], args[2], args[3], args[4])
+ return immJumpTable(s, args[1], n, func(sNew *state, idx int) {
+ // Encode as int8 due to requirement of AuxInt, check its comment for details.
+ s.vars[n] = sNew.newValue4I(op, t, int64(int8(idx<<offset)), args[0], args[2], args[3], args[4])
+ })
}
}
// CeilScaled rounds elements up with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x4) CeilScaled(prec uint8) Float32x4
// CeilScaled rounds elements up with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x8) CeilScaled(prec uint8) Float32x8
// CeilScaled rounds elements up with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x16) CeilScaled(prec uint8) Float32x16
// CeilScaled rounds elements up with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x2) CeilScaled(prec uint8) Float64x2
// CeilScaled rounds elements up with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x4) CeilScaled(prec uint8) Float64x4
// CeilScaled rounds elements up with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x8) CeilScaled(prec uint8) Float64x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8
// CeilScaledResidue computes the difference after ceiling with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x4) CeilScaledResidue(prec uint8) Float32x4
// CeilScaledResidue computes the difference after ceiling with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x8) CeilScaledResidue(prec uint8) Float32x8
// CeilScaledResidue computes the difference after ceiling with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x16) CeilScaledResidue(prec uint8) Float32x16
// CeilScaledResidue computes the difference after ceiling with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x2) CeilScaledResidue(prec uint8) Float64x2
// CeilScaledResidue computes the difference after ceiling with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x4) CeilScaledResidue(prec uint8) Float64x4
// CeilScaledResidue computes the difference after ceiling with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x8) CeilScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8
// FloorScaled rounds elements down with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x4) FloorScaled(prec uint8) Float32x4
// FloorScaled rounds elements down with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x8) FloorScaled(prec uint8) Float32x8
// FloorScaled rounds elements down with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x16) FloorScaled(prec uint8) Float32x16
// FloorScaled rounds elements down with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x2) FloorScaled(prec uint8) Float64x2
// FloorScaled rounds elements down with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x4) FloorScaled(prec uint8) Float64x4
// FloorScaled rounds elements down with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x8) FloorScaled(prec uint8) Float64x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8
// FloorScaledResidue computes the difference after flooring with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x4) FloorScaledResidue(prec uint8) Float32x4
// FloorScaledResidue computes the difference after flooring with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x8) FloorScaledResidue(prec uint8) Float32x8
// FloorScaledResidue computes the difference after flooring with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x16) FloorScaledResidue(prec uint8) Float32x16
// FloorScaledResidue computes the difference after flooring with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x2) FloorScaledResidue(prec uint8) Float64x2
// FloorScaledResidue computes the difference after flooring with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x4) FloorScaledResidue(prec uint8) Float64x4
// FloorScaledResidue computes the difference after flooring with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI
func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI
func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI
func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI
func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x16
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI
func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x32
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI
func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x64
//
// This operation is applied selectively under a write mask.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI
func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16
//
// This operation is applied selectively under a write mask.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI
func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32
//
// This operation is applied selectively under a write mask.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI
func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64
//
// This operation is applied selectively under a write mask.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI
func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16
//
// This operation is applied selectively under a write mask.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI
func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32
//
// This operation is applied selectively under a write mask.
//
-// b is expected to be a constant, non-constant value will trigger a runtime panic.
+// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI
func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64
// GetElem retrieves a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPEXTRB, CPU Feature: AVX512BW
func (x Int8x16) GetElem(index uint8) int8
// GetElem retrieves a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPEXTRW, CPU Feature: AVX512BW
func (x Int16x8) GetElem(index uint8) int16
// GetElem retrieves a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPEXTRD, CPU Feature: AVX
func (x Int32x4) GetElem(index uint8) int32
// GetElem retrieves a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPEXTRQ, CPU Feature: AVX
func (x Int64x2) GetElem(index uint8) int64
// GetElem retrieves a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPEXTRB, CPU Feature: AVX512BW
func (x Uint8x16) GetElem(index uint8) uint8
// GetElem retrieves a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPEXTRW, CPU Feature: AVX512BW
func (x Uint16x8) GetElem(index uint8) uint16
// GetElem retrieves a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPEXTRD, CPU Feature: AVX
func (x Uint32x4) GetElem(index uint8) uint32
// GetElem retrieves a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPEXTRQ, CPU Feature: AVX
func (x Uint64x2) GetElem(index uint8) uint64
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Int32x4) RotateAllLeft(shift uint8) Int32x4
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Int32x8) RotateAllLeft(shift uint8) Int32x8
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Int32x16) RotateAllLeft(shift uint8) Int32x16
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Int64x2) RotateAllLeft(shift uint8) Int64x2
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Int64x4) RotateAllLeft(shift uint8) Int64x4
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Int64x8) RotateAllLeft(shift uint8) Int64x8
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Uint32x4) RotateAllLeft(shift uint8) Uint32x4
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Uint32x8) RotateAllLeft(shift uint8) Uint32x8
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Uint32x16) RotateAllLeft(shift uint8) Uint32x16
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Uint64x2) RotateAllLeft(shift uint8) Uint64x2
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Uint64x4) RotateAllLeft(shift uint8) Uint64x4
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLD, CPU Feature: AVX512F
func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPROLQ, CPU Feature: AVX512F
func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Int32x4) RotateAllRight(shift uint8) Int32x4
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Int32x8) RotateAllRight(shift uint8) Int32x8
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Int32x16) RotateAllRight(shift uint8) Int32x16
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Int64x2) RotateAllRight(shift uint8) Int64x2
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Int64x4) RotateAllRight(shift uint8) Int64x4
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Int64x8) RotateAllRight(shift uint8) Int64x8
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Uint32x4) RotateAllRight(shift uint8) Uint32x4
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Uint32x8) RotateAllRight(shift uint8) Uint32x8
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Uint32x16) RotateAllRight(shift uint8) Uint32x16
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Uint64x2) RotateAllRight(shift uint8) Uint64x2
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Uint64x4) RotateAllRight(shift uint8) Uint64x4
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORD, CPU Feature: AVX512F
func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPRORQ, CPU Feature: AVX512F
func (x Uint64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Uint64x8
// RoundToEvenScaled rounds elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x4) RoundToEvenScaled(prec uint8) Float32x4
// RoundToEvenScaled rounds elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x8) RoundToEvenScaled(prec uint8) Float32x8
// RoundToEvenScaled rounds elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x16) RoundToEvenScaled(prec uint8) Float32x16
// RoundToEvenScaled rounds elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x2) RoundToEvenScaled(prec uint8) Float64x2
// RoundToEvenScaled rounds elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x4) RoundToEvenScaled(prec uint8) Float64x4
// RoundToEvenScaled rounds elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x16
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8
// RoundToEvenScaledResidue computes the difference after rounding with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x4) RoundToEvenScaledResidue(prec uint8) Float32x4
// RoundToEvenScaledResidue computes the difference after rounding with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x8) RoundToEvenScaledResidue(prec uint8) Float32x8
// RoundToEvenScaledResidue computes the difference after rounding with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x16) RoundToEvenScaledResidue(prec uint8) Float32x16
// RoundToEvenScaledResidue computes the difference after rounding with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x2) RoundToEvenScaledResidue(prec uint8) Float64x2
// RoundToEvenScaledResidue computes the difference after rounding with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x4) RoundToEvenScaledResidue(prec uint8) Float64x4
// RoundToEvenScaledResidue computes the difference after rounding with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8
// SetElem sets a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPINSRB, CPU Feature: AVX
func (x Int8x16) SetElem(index uint8, y int8) Int8x16
// SetElem sets a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPINSRW, CPU Feature: AVX
func (x Int16x8) SetElem(index uint8, y int16) Int16x8
// SetElem sets a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPINSRD, CPU Feature: AVX
func (x Int32x4) SetElem(index uint8, y int32) Int32x4
// SetElem sets a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPINSRQ, CPU Feature: AVX
func (x Int64x2) SetElem(index uint8, y int64) Int64x2
// SetElem sets a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPINSRB, CPU Feature: AVX
func (x Uint8x16) SetElem(index uint8, y uint8) Uint8x16
// SetElem sets a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPINSRW, CPU Feature: AVX
func (x Uint16x8) SetElem(index uint8, y uint16) Uint16x8
// SetElem sets a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPINSRD, CPU Feature: AVX
func (x Uint32x4) SetElem(index uint8, y uint32) Uint32x4
// SetElem sets a single constant-indexed element's value.
//
-// index is expected to be a constant, non-constant value will trigger a runtime panic.
+// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPINSRQ, CPU Feature: AVX
func (x Uint64x2) SetElem(index uint8, y uint64) Uint64x2
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Int16x8) ShiftAllLeftConcat(shift uint8, y Int16x8) Int16x8
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Int16x16) ShiftAllLeftConcat(shift uint8, y Int16x16) Int16x16
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Int16x32) ShiftAllLeftConcat(shift uint8, y Int16x32) Int16x32
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Int32x4) ShiftAllLeftConcat(shift uint8, y Int32x4) Int32x4
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Int32x8) ShiftAllLeftConcat(shift uint8, y Int32x8) Int32x8
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Int32x16) ShiftAllLeftConcat(shift uint8, y Int32x16) Int32x16
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Int64x2) ShiftAllLeftConcat(shift uint8, y Int64x2) Int64x2
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Int64x4) ShiftAllLeftConcat(shift uint8, y Int64x4) Int64x4
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Int64x8) ShiftAllLeftConcat(shift uint8, y Int64x8) Int64x8
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Uint16x8) ShiftAllLeftConcat(shift uint8, y Uint16x8) Uint16x8
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Uint16x16) ShiftAllLeftConcat(shift uint8, y Uint16x16) Uint16x16
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Uint16x32) ShiftAllLeftConcat(shift uint8, y Uint16x32) Uint16x32
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Uint32x4) ShiftAllLeftConcat(shift uint8, y Uint32x4) Uint32x4
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Uint32x8) ShiftAllLeftConcat(shift uint8, y Uint32x8) Uint32x8
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Uint32x16) ShiftAllLeftConcat(shift uint8, y Uint32x16) Uint32x16
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Uint64x2) ShiftAllLeftConcat(shift uint8, y Uint64x2) Uint64x2
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Uint64x4) ShiftAllLeftConcat(shift uint8, y Uint64x4) Uint64x4
// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Uint64x8) ShiftAllLeftConcat(shift uint8, y Uint64x8) Uint64x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Int16x8) ShiftAllLeftConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Int16x16) ShiftAllLeftConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Int16x32) ShiftAllLeftConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Int32x4) ShiftAllLeftConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Int32x8) ShiftAllLeftConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Int32x16) ShiftAllLeftConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Int64x2) ShiftAllLeftConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Int64x4) ShiftAllLeftConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Int64x8) ShiftAllLeftConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Uint16x8) ShiftAllLeftConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Uint16x16) ShiftAllLeftConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDW, CPU Feature: AVX512VBMI2
func (x Uint16x32) ShiftAllLeftConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Uint32x4) ShiftAllLeftConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Uint32x8) ShiftAllLeftConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDD, CPU Feature: AVX512VBMI2
func (x Uint32x16) ShiftAllLeftConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Uint64x2) ShiftAllLeftConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Uint64x4) ShiftAllLeftConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2
func (x Uint64x8) ShiftAllLeftConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Int16x8) ShiftAllRightConcat(shift uint8, y Int16x8) Int16x8
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Int16x16) ShiftAllRightConcat(shift uint8, y Int16x16) Int16x16
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Int16x32) ShiftAllRightConcat(shift uint8, y Int16x32) Int16x32
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Int32x4) ShiftAllRightConcat(shift uint8, y Int32x4) Int32x4
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Int32x8) ShiftAllRightConcat(shift uint8, y Int32x8) Int32x8
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Int32x16) ShiftAllRightConcat(shift uint8, y Int32x16) Int32x16
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Int64x2) ShiftAllRightConcat(shift uint8, y Int64x2) Int64x2
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Int64x4) ShiftAllRightConcat(shift uint8, y Int64x4) Int64x4
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Int64x8) ShiftAllRightConcat(shift uint8, y Int64x8) Int64x8
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Uint16x8) ShiftAllRightConcat(shift uint8, y Uint16x8) Uint16x8
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Uint16x16) ShiftAllRightConcat(shift uint8, y Uint16x16) Uint16x16
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Uint16x32) ShiftAllRightConcat(shift uint8, y Uint16x32) Uint16x32
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Uint32x4) ShiftAllRightConcat(shift uint8, y Uint32x4) Uint32x4
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Uint32x8) ShiftAllRightConcat(shift uint8, y Uint32x8) Uint32x8
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Uint32x16) ShiftAllRightConcat(shift uint8, y Uint32x16) Uint32x16
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Uint64x2) ShiftAllRightConcat(shift uint8, y Uint64x2) Uint64x2
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Uint64x4) ShiftAllRightConcat(shift uint8, y Uint64x4) Uint64x4
// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Uint64x8) ShiftAllRightConcat(shift uint8, y Uint64x8) Uint64x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Int16x8) ShiftAllRightConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Int16x16) ShiftAllRightConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Int16x32) ShiftAllRightConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Int32x4) ShiftAllRightConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Int32x8) ShiftAllRightConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Int32x16) ShiftAllRightConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Int64x2) ShiftAllRightConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Int64x4) ShiftAllRightConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Int64x8) ShiftAllRightConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Uint16x8) ShiftAllRightConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Uint16x16) ShiftAllRightConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDW, CPU Feature: AVX512VBMI2
func (x Uint16x32) ShiftAllRightConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Uint32x4) ShiftAllRightConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Uint32x8) ShiftAllRightConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDD, CPU Feature: AVX512VBMI2
func (x Uint32x16) ShiftAllRightConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Uint64x2) ShiftAllRightConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Uint64x4) ShiftAllRightConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4
//
// This operation is applied selectively under a write mask.
//
-// shift is expected to be a constant, non-constant value will trigger a runtime panic.
+// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2
func (x Uint64x8) ShiftAllRightConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8
// TruncScaled truncates elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x4) TruncScaled(prec uint8) Float32x4
// TruncScaled truncates elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x8) TruncScaled(prec uint8) Float32x8
// TruncScaled truncates elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x16) TruncScaled(prec uint8) Float32x16
// TruncScaled truncates elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x2) TruncScaled(prec uint8) Float64x2
// TruncScaled truncates elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x4) TruncScaled(prec uint8) Float64x4
// TruncScaled truncates elements with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x8) TruncScaled(prec uint8) Float64x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512F
func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512F
func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8
// TruncScaledResidue computes the difference after truncating with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x4) TruncScaledResidue(prec uint8) Float32x4
// TruncScaledResidue computes the difference after truncating with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x8) TruncScaledResidue(prec uint8) Float32x8
// TruncScaledResidue computes the difference after truncating with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x16) TruncScaledResidue(prec uint8) Float32x16
// TruncScaledResidue computes the difference after truncating with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x2) TruncScaledResidue(prec uint8) Float64x2
// TruncScaledResidue computes the difference after truncating with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x4) TruncScaledResidue(prec uint8) Float64x4
// TruncScaledResidue computes the difference after truncating with specified precision.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPS, CPU Feature: AVX512DQ
func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4
//
// This operation is applied selectively under a write mask.
//
-// prec is expected to be a constant, non-constant value will trigger a runtime panic.
+// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated.
//
// Asm: VREDUCEPD, CPU Feature: AVX512DQ
func (x Float64x8) TruncScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8
c.StoreSlice(s)
checkSlices[float64](t, s, []float64{4, 2, 3, 4})
}
+
+var ro uint8 = 2
+
+func TestRotateAllVariable(t *testing.T) {
+ if !simd.HasAVX512() {
+ t.Skip("Test requires HasAVX512, not available on this hardware")
+ return
+ }
+ got := make([]int32, 4)
+ simd.LoadInt32x4Slice([]int32{0b11, 0b11, 0b11, 0b11}).RotateAllLeft(ro).StoreSlice(got)
+ for _, v := range got {
+ if v != 0b1100 {
+ t.Errorf("Want 0b1100, got %b", v)
+ }
+ }
+}